1 /*******************************************************************************
2 * This file contains tcm implementation using v4 configfs fabric infrastructure
3 * for QLogic target mode HBAs
5 * ?? Copyright 2010-2011 RisingTide Systems LLC.
7 * Licensed to the Linux Foundation under the General Public License (GPL)
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
12 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
13 * the TCM_FC / Open-FCoE.org fabric module.
15 * Copyright (c) 2010 Cisco Systems, Inc
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 ****************************************************************************/
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <generated/utsrelease.h>
32 #include <linux/utsname.h>
33 #include <linux/init.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/kthread.h>
37 #include <linux/types.h>
38 #include <linux/string.h>
39 #include <linux/configfs.h>
40 #include <linux/ctype.h>
41 #include <linux/string.h>
42 #include <linux/ctype.h>
43 #include <asm/unaligned.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_device.h>
47 #include <scsi/scsi_cmnd.h>
48 #include <target/target_core_base.h>
49 #include <target/target_core_fabric.h>
50 #include <target/target_core_fabric_configfs.h>
51 #include <target/target_core_configfs.h>
52 #include <target/configfs_macros.h>
55 #include "qla_target.h"
56 #include "tcm_qla2xxx.h"
58 struct workqueue_struct *tcm_qla2xxx_free_wq;
59 struct workqueue_struct *tcm_qla2xxx_cmd_wq;
61 static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
66 static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
73 * If strict, we require lower-case hex and colon separators to be sure
74 * the name is the same as what would be generated by ft_format_wwn()
75 * so the name and wwn are mapped one-to-one.
77 static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
87 for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
89 if (c == '\n' && cp[1] == '\0')
91 if (strict && pos++ == 2 && byte++ < 7) {
100 if (strict && byte != 8)
107 else if (isxdigit(c) && (islower(c) || !strict))
108 nibble = tolower(c) - 'a' + 10;
111 *wwn = (*wwn << 4) | nibble;
115 pr_debug("err %u len %zu pos %u byte %u\n",
116 err, cp - name, pos, byte);
120 static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
124 put_unaligned_be64(wwn, b);
125 return snprintf(buf, len,
126 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
127 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
130 static char *tcm_qla2xxx_get_fabric_name(void)
136 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
138 static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
140 unsigned int i, j, value;
143 memset(wwn, 0, sizeof(wwn));
145 /* Validate and store the new name */
146 for (i = 0, j = 0; i < 16; i++) {
147 value = hex_to_bin(*ns++);
149 j = (j << 4) | value;
159 *nm = wwn_to_u64(wwn);
164 * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
165 * store_fc_host_vport_create()
167 static int tcm_qla2xxx_npiv_parse_wwn(
173 unsigned int cnt = count;
179 /* count may include a LF at end of string */
180 if (name[cnt-1] == '\n')
183 /* validate we have enough characters for WWPN */
184 if ((cnt != (16+1+16)) || (name[16] != ':'))
187 rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
191 rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
198 static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
203 put_unaligned_be64(wwpn, b);
204 put_unaligned_be64(wwnn, b2);
205 return snprintf(buf, len,
206 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
207 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
208 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
209 b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
212 static char *tcm_qla2xxx_npiv_get_fabric_name(void)
214 return "qla2xxx_npiv";
217 static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
219 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
220 struct tcm_qla2xxx_tpg, se_tpg);
221 struct tcm_qla2xxx_lport *lport = tpg->lport;
224 switch (lport->lport_proto_id) {
225 case SCSI_PROTOCOL_FCP:
227 proto_id = fc_get_fabric_proto_ident(se_tpg);
234 static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
236 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
237 struct tcm_qla2xxx_tpg, se_tpg);
238 struct tcm_qla2xxx_lport *lport = tpg->lport;
240 return &lport->lport_name[0];
243 static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
245 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
246 struct tcm_qla2xxx_tpg, se_tpg);
247 struct tcm_qla2xxx_lport *lport = tpg->lport;
249 return &lport->lport_npiv_name[0];
252 static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
254 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
255 struct tcm_qla2xxx_tpg, se_tpg);
256 return tpg->lport_tpgt;
259 static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
264 static u32 tcm_qla2xxx_get_pr_transport_id(
265 struct se_portal_group *se_tpg,
266 struct se_node_acl *se_nacl,
267 struct t10_pr_registration *pr_reg,
271 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
272 struct tcm_qla2xxx_tpg, se_tpg);
273 struct tcm_qla2xxx_lport *lport = tpg->lport;
276 switch (lport->lport_proto_id) {
277 case SCSI_PROTOCOL_FCP:
279 ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
287 static u32 tcm_qla2xxx_get_pr_transport_id_len(
288 struct se_portal_group *se_tpg,
289 struct se_node_acl *se_nacl,
290 struct t10_pr_registration *pr_reg,
293 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
294 struct tcm_qla2xxx_tpg, se_tpg);
295 struct tcm_qla2xxx_lport *lport = tpg->lport;
298 switch (lport->lport_proto_id) {
299 case SCSI_PROTOCOL_FCP:
301 ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
309 static char *tcm_qla2xxx_parse_pr_out_transport_id(
310 struct se_portal_group *se_tpg,
313 char **port_nexus_ptr)
315 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
316 struct tcm_qla2xxx_tpg, se_tpg);
317 struct tcm_qla2xxx_lport *lport = tpg->lport;
320 switch (lport->lport_proto_id) {
321 case SCSI_PROTOCOL_FCP:
323 tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
331 static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
333 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
334 struct tcm_qla2xxx_tpg, se_tpg);
336 return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
339 static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
341 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
342 struct tcm_qla2xxx_tpg, se_tpg);
344 return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
347 static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
349 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
350 struct tcm_qla2xxx_tpg, se_tpg);
352 return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
355 static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
357 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
358 struct tcm_qla2xxx_tpg, se_tpg);
360 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
363 static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
364 struct se_portal_group *se_tpg)
366 struct tcm_qla2xxx_nacl *nacl;
368 nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
370 pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
374 return &nacl->se_node_acl;
377 static void tcm_qla2xxx_release_fabric_acl(
378 struct se_portal_group *se_tpg,
379 struct se_node_acl *se_nacl)
381 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
382 struct tcm_qla2xxx_nacl, se_node_acl);
386 static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
388 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
389 struct tcm_qla2xxx_tpg, se_tpg);
391 return tpg->lport_tpgt;
394 static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
396 struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
397 struct qla_tgt_mgmt_cmd, free_work);
399 transport_generic_free_cmd(&mcmd->se_cmd, 0);
403 * Called from qla_target_template->free_mcmd(), and will call
404 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
405 * release callback. qla_hw_data->hardware_lock is expected to be held
407 static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
409 INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
410 queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
413 static void tcm_qla2xxx_complete_free(struct work_struct *work)
415 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
417 transport_generic_free_cmd(&cmd->se_cmd, 0);
421 * Called from qla_target_template->free_cmd(), and will call
422 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
423 * release callback. qla_hw_data->hardware_lock is expected to be held
425 static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
427 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
428 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
432 * Called from struct target_core_fabric_ops->check_stop_free() context
434 static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
436 return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
439 /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
440 * fabric descriptor @se_cmd command to release
442 static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
444 struct qla_tgt_cmd *cmd;
446 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
447 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
448 struct qla_tgt_mgmt_cmd, se_cmd);
453 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
457 static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
459 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
460 struct scsi_qla_host *vha;
466 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
467 sess->tearing_down = 1;
468 target_splice_sess_cmd_list(se_sess);
469 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
474 static void tcm_qla2xxx_close_session(struct se_session *se_sess)
476 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
477 struct scsi_qla_host *vha;
483 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
484 qlt_unreg_sess(sess);
485 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
488 static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
494 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
495 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
496 * that data is coming from the target (eg handling a READ). However,
497 * this is just the opposite of what we have to tell the DMA mapping
498 * layer -- eg when handling a READ, the HBA will have to DMA the data
499 * out of memory so it can send it to the initiator, which means we
500 * need to use DMA_TO_DEVICE when we map the data.
502 static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
504 if (se_cmd->se_cmd_flags & SCF_BIDI)
505 return DMA_BIDIRECTIONAL;
507 switch (se_cmd->data_direction) {
509 return DMA_FROM_DEVICE;
510 case DMA_FROM_DEVICE:
511 return DMA_TO_DEVICE;
518 static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
520 struct qla_tgt_cmd *cmd = container_of(se_cmd,
521 struct qla_tgt_cmd, se_cmd);
523 cmd->bufflen = se_cmd->data_length;
524 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
526 cmd->sg_cnt = se_cmd->t_data_nents;
527 cmd->sg = se_cmd->t_data_sg;
530 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
531 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
533 return qlt_rdy_to_xfer(cmd);
536 static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
540 * Check for WRITE_PENDING status to determine if we need to wait for
541 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
543 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
544 if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
545 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
546 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
547 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
551 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
556 static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
561 static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
563 struct qla_tgt_cmd *cmd = container_of(se_cmd,
564 struct qla_tgt_cmd, se_cmd);
569 static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
575 * Called from process context in qla_target.c:qlt_do_work() code
577 static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
578 unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
579 int data_dir, int bidi)
581 struct se_cmd *se_cmd = &cmd->se_cmd;
582 struct se_session *se_sess;
583 struct qla_tgt_sess *sess;
584 int flags = TARGET_SCF_ACK_KREF;
587 flags |= TARGET_SCF_BIDI_OP;
591 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
595 se_sess = sess->se_sess;
597 pr_err("Unable to locate active struct se_session\n");
601 target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
602 cmd->unpacked_lun, data_length, fcp_task_attr,
607 static void tcm_qla2xxx_do_rsp(struct work_struct *work)
609 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
611 * Dispatch ->queue_status from workqueue process context
613 transport_generic_request_failure(&cmd->se_cmd);
617 * Called from qla_target.c:qlt_do_ctio_completion()
619 static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
621 struct se_cmd *se_cmd = &cmd->se_cmd;
624 * Ensure that the complete FCP WRITE payload has been received.
625 * Otherwise return an exception via CHECK_CONDITION status.
627 if (!cmd->write_data_transferred) {
629 * Check if se_cmd has already been aborted via LUN_RESET, and
630 * waiting upon completion in tcm_qla2xxx_write_pending_status()
632 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
633 if (se_cmd->transport_state & CMD_T_ABORTED) {
634 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
635 complete(&se_cmd->t_transport_stop_comp);
638 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
640 se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
641 INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
642 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
646 * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
647 * status to the backstore processing thread.
649 return transport_generic_handle_data(&cmd->se_cmd);
653 * Called from qla_target.c:qlt_issue_task_mgmt()
655 static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
656 uint8_t tmr_func, uint32_t tag)
658 struct qla_tgt_sess *sess = mcmd->sess;
659 struct se_cmd *se_cmd = &mcmd->se_cmd;
661 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
662 tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
665 static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
667 struct qla_tgt_cmd *cmd = container_of(se_cmd,
668 struct qla_tgt_cmd, se_cmd);
670 cmd->bufflen = se_cmd->data_length;
671 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
672 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
674 cmd->sg_cnt = se_cmd->t_data_nents;
675 cmd->sg = se_cmd->t_data_sg;
679 * Now queue completed DATA_IN the qla2xxx LLD and response ring
681 return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
682 se_cmd->scsi_status);
685 static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
687 struct qla_tgt_cmd *cmd = container_of(se_cmd,
688 struct qla_tgt_cmd, se_cmd);
689 int xmit_type = QLA_TGT_XMIT_STATUS;
691 cmd->bufflen = se_cmd->data_length;
695 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
696 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
698 if (se_cmd->data_direction == DMA_FROM_DEVICE) {
700 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
701 * for qla_tgt_xmit_response LLD code
703 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
704 se_cmd->residual_count = se_cmd->data_length;
709 * Now queue status response to qla2xxx LLD code and response ring
711 return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
714 static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
716 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
717 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
718 struct qla_tgt_mgmt_cmd, se_cmd);
720 pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
721 mcmd, se_tmr->function, se_tmr->response);
723 * Do translation between TCM TM response codes and
724 * QLA2xxx FC TM response codes.
726 switch (se_tmr->response) {
727 case TMR_FUNCTION_COMPLETE:
728 mcmd->fc_tm_rsp = FC_TM_SUCCESS;
730 case TMR_TASK_DOES_NOT_EXIST:
731 mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
733 case TMR_FUNCTION_REJECTED:
734 mcmd->fc_tm_rsp = FC_TM_REJECT;
736 case TMR_LUN_DOES_NOT_EXIST:
738 mcmd->fc_tm_rsp = FC_TM_FAILED;
742 * Queue the TM response to QLA2xxx LLD to build a
743 * CTIO response packet.
745 qlt_xmit_tm_rsp(mcmd);
750 static u16 tcm_qla2xxx_get_fabric_sense_len(void)
755 static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
761 /* Local pointer to allocated TCM configfs fabric module */
762 struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
763 struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
765 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
766 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
768 * Expected to be called with struct qla_hw_data->hardware_lock held
770 static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
772 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
773 struct se_portal_group *se_tpg = se_nacl->se_tpg;
774 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
775 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
776 struct tcm_qla2xxx_lport, lport_wwn);
777 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
778 struct tcm_qla2xxx_nacl, se_node_acl);
781 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
783 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
784 WARN_ON(node && (node != se_nacl));
786 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
787 se_nacl, nacl->nport_wwnn, nacl->nport_id);
789 * Now clear the se_nacl and session pointers from our HW lport lookup
790 * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
792 * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
793 * target_wait_for_sess_cmds() before the session waits for outstanding
794 * I/O to complete, to avoid a race between session shutdown execution
795 * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
797 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
800 static void tcm_qla2xxx_release_session(struct kref *kref)
802 struct se_session *se_sess = container_of(kref,
803 struct se_session, sess_kref);
805 qlt_unreg_sess(se_sess->fabric_sess_ptr);
808 static void tcm_qla2xxx_put_session(struct se_session *se_sess)
810 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
811 struct qla_hw_data *ha = sess->vha->hw;
814 spin_lock_irqsave(&ha->hardware_lock, flags);
815 kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
816 spin_unlock_irqrestore(&ha->hardware_lock, flags);
819 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
821 tcm_qla2xxx_put_session(sess->se_sess);
824 static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
826 tcm_qla2xxx_shutdown_session(sess->se_sess);
829 static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
830 struct se_portal_group *se_tpg,
831 struct config_group *group,
834 struct se_node_acl *se_nacl, *se_nacl_new;
835 struct tcm_qla2xxx_nacl *nacl;
837 u32 qla2xxx_nexus_depth;
839 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
840 return ERR_PTR(-EINVAL);
842 se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
844 return ERR_PTR(-ENOMEM);
845 /* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
846 qla2xxx_nexus_depth = 1;
849 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
850 * when converting a NodeACL from demo mode -> explict
852 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
853 name, qla2xxx_nexus_depth);
854 if (IS_ERR(se_nacl)) {
855 tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
859 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
861 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
862 nacl->nport_wwnn = wwnn;
863 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
868 static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
870 struct se_portal_group *se_tpg = se_acl->se_tpg;
871 struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
872 struct tcm_qla2xxx_nacl, se_node_acl);
874 core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
878 /* Start items for tcm_qla2xxx_tpg_attrib_cit */
880 #define DEF_QLA_TPG_ATTRIB(name) \
882 static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
883 struct se_portal_group *se_tpg, \
886 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
887 struct tcm_qla2xxx_tpg, se_tpg); \
889 return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
892 static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
893 struct se_portal_group *se_tpg, \
897 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
898 struct tcm_qla2xxx_tpg, se_tpg); \
902 ret = kstrtoul(page, 0, &val); \
904 pr_err("kstrtoul() failed with" \
905 " ret: %d\n", ret); \
908 ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
910 return (!ret) ? count : -EINVAL; \
913 #define DEF_QLA_TPG_ATTR_BOOL(_name) \
915 static int tcm_qla2xxx_set_attrib_##_name( \
916 struct tcm_qla2xxx_tpg *tpg, \
919 struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
921 if ((val != 0) && (val != 1)) { \
922 pr_err("Illegal boolean value %lu\n", val); \
930 #define QLA_TPG_ATTR(_name, _mode) \
931 TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
934 * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
936 DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
937 DEF_QLA_TPG_ATTRIB(generate_node_acls);
938 QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
941 Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
943 DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
944 DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
945 QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
948 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
950 DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
951 DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
952 QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
955 * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
957 DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
958 DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
959 QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
961 static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
962 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
963 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
964 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
965 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
969 /* End items for tcm_qla2xxx_tpg_attrib_cit */
971 static ssize_t tcm_qla2xxx_tpg_show_enable(
972 struct se_portal_group *se_tpg,
975 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
976 struct tcm_qla2xxx_tpg, se_tpg);
978 return snprintf(page, PAGE_SIZE, "%d\n",
979 atomic_read(&tpg->lport_tpg_enabled));
982 static ssize_t tcm_qla2xxx_tpg_store_enable(
983 struct se_portal_group *se_tpg,
987 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
988 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
989 struct tcm_qla2xxx_lport, lport_wwn);
990 struct scsi_qla_host *vha = lport->qla_vha;
991 struct qla_hw_data *ha = vha->hw;
992 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
993 struct tcm_qla2xxx_tpg, se_tpg);
997 rc = kstrtoul(page, 0, &op);
999 pr_err("kstrtoul() returned %d\n", rc);
1002 if ((op != 1) && (op != 0)) {
1003 pr_err("Illegal value for tpg_enable: %lu\n", op);
1008 atomic_set(&tpg->lport_tpg_enabled, 1);
1009 qlt_enable_vha(vha);
1011 if (!ha->tgt.qla_tgt) {
1012 pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
1015 atomic_set(&tpg->lport_tpg_enabled, 0);
1016 qlt_stop_phase1(ha->tgt.qla_tgt);
1022 TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
1024 static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
1025 &tcm_qla2xxx_tpg_enable.attr,
1029 static struct se_portal_group *tcm_qla2xxx_make_tpg(
1031 struct config_group *group,
1034 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1035 struct tcm_qla2xxx_lport, lport_wwn);
1036 struct tcm_qla2xxx_tpg *tpg;
1040 if (strstr(name, "tpgt_") != name)
1041 return ERR_PTR(-EINVAL);
1042 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1043 return ERR_PTR(-EINVAL);
1045 if (!lport->qla_npiv_vp && (tpgt != 1)) {
1046 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
1047 return ERR_PTR(-ENOSYS);
1050 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1052 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1053 return ERR_PTR(-ENOMEM);
1056 tpg->lport_tpgt = tpgt;
1058 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1061 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
1062 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
1063 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
1065 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
1066 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1072 * Setup local TPG=1 pointer for non NPIV mode.
1074 if (lport->qla_npiv_vp == NULL)
1077 return &tpg->se_tpg;
1080 static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
1082 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1083 struct tcm_qla2xxx_tpg, se_tpg);
1084 struct tcm_qla2xxx_lport *lport = tpg->lport;
1085 struct scsi_qla_host *vha = lport->qla_vha;
1086 struct qla_hw_data *ha = vha->hw;
1088 * Call into qla2x_target.c LLD logic to shutdown the active
1089 * FC Nexuses and disable target mode operation for this qla_hw_data
1091 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
1092 qlt_stop_phase1(ha->tgt.qla_tgt);
1094 core_tpg_deregister(se_tpg);
1096 * Clear local TPG=1 pointer for non NPIV mode.
1098 if (lport->qla_npiv_vp == NULL)
1099 lport->tpg_1 = NULL;
1104 static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
1106 struct config_group *group,
1109 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1110 struct tcm_qla2xxx_lport, lport_wwn);
1111 struct tcm_qla2xxx_tpg *tpg;
1115 if (strstr(name, "tpgt_") != name)
1116 return ERR_PTR(-EINVAL);
1117 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1118 return ERR_PTR(-EINVAL);
1120 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1122 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1123 return ERR_PTR(-ENOMEM);
1126 tpg->lport_tpgt = tpgt;
1128 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
1129 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1134 return &tpg->se_tpg;
1138 * Expected to be called with struct qla_hw_data->hardware_lock held
1140 static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1141 scsi_qla_host_t *vha,
1142 const uint8_t *s_id)
1144 struct qla_hw_data *ha = vha->hw;
1145 struct tcm_qla2xxx_lport *lport;
1146 struct se_node_acl *se_nacl;
1147 struct tcm_qla2xxx_nacl *nacl;
1150 lport = ha->tgt.target_lport_ptr;
1152 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1157 key = (((unsigned long)s_id[0] << 16) |
1158 ((unsigned long)s_id[1] << 8) |
1159 (unsigned long)s_id[2]);
1160 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1162 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
1164 pr_debug("Unable to locate s_id: 0x%06x\n", key);
1167 pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
1168 se_nacl, se_nacl->initiatorname);
1170 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1171 if (!nacl->qla_tgt_sess) {
1172 pr_err("Unable to locate struct qla_tgt_sess\n");
1176 return nacl->qla_tgt_sess;
1180 * Expected to be called with struct qla_hw_data->hardware_lock held
1182 static void tcm_qla2xxx_set_sess_by_s_id(
1183 struct tcm_qla2xxx_lport *lport,
1184 struct se_node_acl *new_se_nacl,
1185 struct tcm_qla2xxx_nacl *nacl,
1186 struct se_session *se_sess,
1187 struct qla_tgt_sess *qla_tgt_sess,
1194 key = (((unsigned long)s_id[0] << 16) |
1195 ((unsigned long)s_id[1] << 8) |
1196 (unsigned long)s_id[2]);
1197 pr_debug("set_sess_by_s_id: %06x\n", key);
1199 slot = btree_lookup32(&lport->lport_fcport_map, key);
1202 pr_debug("Setting up new fc_port entry to new_se_nacl\n");
1203 nacl->nport_id = key;
1204 rc = btree_insert32(&lport->lport_fcport_map, key,
1205 new_se_nacl, GFP_ATOMIC);
1207 printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
1210 pr_debug("Wiping nonexisting fc_port entry\n");
1213 qla_tgt_sess->se_sess = se_sess;
1214 nacl->qla_tgt_sess = qla_tgt_sess;
1218 if (nacl->qla_tgt_sess) {
1219 if (new_se_nacl == NULL) {
1220 pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
1221 btree_remove32(&lport->lport_fcport_map, key);
1222 nacl->qla_tgt_sess = NULL;
1225 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
1226 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1227 qla_tgt_sess->se_sess = se_sess;
1228 nacl->qla_tgt_sess = qla_tgt_sess;
1232 if (new_se_nacl == NULL) {
1233 pr_debug("Clearing existing fc_port entry\n");
1234 btree_remove32(&lport->lport_fcport_map, key);
1238 pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
1239 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1240 qla_tgt_sess->se_sess = se_sess;
1241 nacl->qla_tgt_sess = qla_tgt_sess;
1243 pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
1244 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1248 * Expected to be called with struct qla_hw_data->hardware_lock held
1250 static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
1251 scsi_qla_host_t *vha,
1252 const uint16_t loop_id)
1254 struct qla_hw_data *ha = vha->hw;
1255 struct tcm_qla2xxx_lport *lport;
1256 struct se_node_acl *se_nacl;
1257 struct tcm_qla2xxx_nacl *nacl;
1258 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1260 lport = ha->tgt.target_lport_ptr;
1262 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1267 pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1269 fc_loopid = lport->lport_loopid_map + loop_id;
1270 se_nacl = fc_loopid->se_nacl;
1272 pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
1277 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1279 if (!nacl->qla_tgt_sess) {
1280 pr_err("Unable to locate struct qla_tgt_sess\n");
1284 return nacl->qla_tgt_sess;
1288 * Expected to be called with struct qla_hw_data->hardware_lock held
1290 static void tcm_qla2xxx_set_sess_by_loop_id(
1291 struct tcm_qla2xxx_lport *lport,
1292 struct se_node_acl *new_se_nacl,
1293 struct tcm_qla2xxx_nacl *nacl,
1294 struct se_session *se_sess,
1295 struct qla_tgt_sess *qla_tgt_sess,
1298 struct se_node_acl *saved_nacl;
1299 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1301 pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1303 fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
1304 lport->lport_loopid_map)[loop_id];
1306 saved_nacl = fc_loopid->se_nacl;
1308 pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
1309 fc_loopid->se_nacl = new_se_nacl;
1310 if (qla_tgt_sess->se_sess != se_sess)
1311 qla_tgt_sess->se_sess = se_sess;
1312 if (nacl->qla_tgt_sess != qla_tgt_sess)
1313 nacl->qla_tgt_sess = qla_tgt_sess;
1317 if (nacl->qla_tgt_sess) {
1318 if (new_se_nacl == NULL) {
1319 pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1320 fc_loopid->se_nacl = NULL;
1321 nacl->qla_tgt_sess = NULL;
1325 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1326 fc_loopid->se_nacl = new_se_nacl;
1327 if (qla_tgt_sess->se_sess != se_sess)
1328 qla_tgt_sess->se_sess = se_sess;
1329 if (nacl->qla_tgt_sess != qla_tgt_sess)
1330 nacl->qla_tgt_sess = qla_tgt_sess;
1334 if (new_se_nacl == NULL) {
1335 pr_debug("Clearing fc_loopid->se_nacl\n");
1336 fc_loopid->se_nacl = NULL;
1340 pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
1341 fc_loopid->se_nacl = new_se_nacl;
1342 if (qla_tgt_sess->se_sess != se_sess)
1343 qla_tgt_sess->se_sess = se_sess;
1344 if (nacl->qla_tgt_sess != qla_tgt_sess)
1345 nacl->qla_tgt_sess = qla_tgt_sess;
1347 pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
1348 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1352 * Should always be called with qla_hw_data->hardware_lock held.
1354 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
1355 struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
1357 struct se_session *se_sess = sess->se_sess;
1358 unsigned char be_sid[3];
1360 be_sid[0] = sess->s_id.b.domain;
1361 be_sid[1] = sess->s_id.b.area;
1362 be_sid[2] = sess->s_id.b.al_pa;
1364 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
1366 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
1367 sess, sess->loop_id);
1370 static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1372 struct qla_tgt *tgt = sess->tgt;
1373 struct qla_hw_data *ha = tgt->ha;
1374 struct se_session *se_sess;
1375 struct se_node_acl *se_nacl;
1376 struct tcm_qla2xxx_lport *lport;
1377 struct tcm_qla2xxx_nacl *nacl;
1379 BUG_ON(in_interrupt());
1381 se_sess = sess->se_sess;
1383 pr_err("struct qla_tgt_sess->se_sess is NULL\n");
1387 se_nacl = se_sess->se_node_acl;
1388 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1390 lport = ha->tgt.target_lport_ptr;
1392 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1396 target_wait_for_sess_cmds(se_sess, 0);
1398 transport_deregister_session_configfs(sess->se_sess);
1399 transport_deregister_session(sess->se_sess);
1403 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
1404 * to locate struct se_node_acl
1406 static int tcm_qla2xxx_check_initiator_node_acl(
1407 scsi_qla_host_t *vha,
1408 unsigned char *fc_wwpn,
1413 struct qla_hw_data *ha = vha->hw;
1414 struct tcm_qla2xxx_lport *lport;
1415 struct tcm_qla2xxx_tpg *tpg;
1416 struct tcm_qla2xxx_nacl *nacl;
1417 struct se_portal_group *se_tpg;
1418 struct se_node_acl *se_nacl;
1419 struct se_session *se_sess;
1420 struct qla_tgt_sess *sess = qla_tgt_sess;
1421 unsigned char port_name[36];
1422 unsigned long flags;
1424 lport = ha->tgt.target_lport_ptr;
1426 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1431 * Locate the TPG=1 reference..
1435 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
1438 se_tpg = &tpg->se_tpg;
1440 se_sess = transport_init_session();
1441 if (IS_ERR(se_sess)) {
1442 pr_err("Unable to initialize struct se_session\n");
1443 return PTR_ERR(se_sess);
1446 * Format the FCP Initiator port_name into colon seperated values to
1447 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
1449 memset(&port_name, 0, 36);
1450 snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1451 fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
1452 fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
1454 * Locate our struct se_node_acl either from an explict NodeACL created
1455 * via ConfigFS, or via running in TPG demo mode.
1457 se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
1459 if (!se_sess->se_node_acl) {
1460 transport_free_session(se_sess);
1463 se_nacl = se_sess->se_node_acl;
1464 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1466 * And now setup the new se_nacl and session pointers into our HW lport
1467 * mappings for fabric S_ID and LOOP_ID.
1469 spin_lock_irqsave(&ha->hardware_lock, flags);
1470 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
1471 qla_tgt_sess, s_id);
1472 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
1473 qla_tgt_sess, loop_id);
1474 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1476 * Finally register the new FC Nexus with TCM
1478 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1484 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
1486 static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1487 .handle_cmd = tcm_qla2xxx_handle_cmd,
1488 .handle_data = tcm_qla2xxx_handle_data,
1489 .handle_tmr = tcm_qla2xxx_handle_tmr,
1490 .free_cmd = tcm_qla2xxx_free_cmd,
1491 .free_mcmd = tcm_qla2xxx_free_mcmd,
1492 .free_session = tcm_qla2xxx_free_session,
1493 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
1494 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
1495 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
1496 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
1497 .put_sess = tcm_qla2xxx_put_sess,
1498 .shutdown_sess = tcm_qla2xxx_shutdown_sess,
1501 static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
1505 rc = btree_init32(&lport->lport_fcport_map);
1507 pr_err("Unable to initialize lport->lport_fcport_map btree\n");
1511 lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
1513 if (!lport->lport_loopid_map) {
1514 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
1515 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1516 btree_destroy32(&lport->lport_fcport_map);
1519 memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
1521 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
1522 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1526 static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
1528 struct qla_hw_data *ha = vha->hw;
1529 struct tcm_qla2xxx_lport *lport;
1531 * Setup local pointer to vha, NPIV VP pointer (if present) and
1532 * vha->tcm_lport pointer
1534 lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
1535 lport->qla_vha = vha;
1540 static struct se_wwn *tcm_qla2xxx_make_lport(
1541 struct target_fabric_configfs *tf,
1542 struct config_group *group,
1545 struct tcm_qla2xxx_lport *lport;
1549 if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
1550 return ERR_PTR(-EINVAL);
1552 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1554 pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
1555 return ERR_PTR(-ENOMEM);
1557 lport->lport_wwpn = wwpn;
1558 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
1561 ret = tcm_qla2xxx_init_lport(lport);
1565 ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
1566 tcm_qla2xxx_lport_register_cb, lport);
1570 return &lport->lport_wwn;
1572 vfree(lport->lport_loopid_map);
1573 btree_destroy32(&lport->lport_fcport_map);
1576 return ERR_PTR(ret);
1579 static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
1581 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1582 struct tcm_qla2xxx_lport, lport_wwn);
1583 struct scsi_qla_host *vha = lport->qla_vha;
1584 struct qla_hw_data *ha = vha->hw;
1585 struct se_node_acl *node;
1589 * Call into qla2x_target.c LLD logic to complete the
1590 * shutdown of struct qla_tgt after the call to
1591 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
1593 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
1594 qlt_stop_phase2(ha->tgt.qla_tgt);
1596 qlt_lport_deregister(vha);
1598 vfree(lport->lport_loopid_map);
1599 btree_for_each_safe32(&lport->lport_fcport_map, key, node)
1600 btree_remove32(&lport->lport_fcport_map, key);
1601 btree_destroy32(&lport->lport_fcport_map);
1605 static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1606 struct target_fabric_configfs *tf,
1607 struct config_group *group,
1610 struct tcm_qla2xxx_lport *lport;
1611 u64 npiv_wwpn, npiv_wwnn;
1614 if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
1615 &npiv_wwpn, &npiv_wwnn) < 0)
1616 return ERR_PTR(-EINVAL);
1618 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1620 pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
1621 return ERR_PTR(-ENOMEM);
1623 lport->lport_npiv_wwpn = npiv_wwpn;
1624 lport->lport_npiv_wwnn = npiv_wwnn;
1625 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
1626 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
1628 /* FIXME: tcm_qla2xxx_npiv_make_lport */
1633 return &lport->lport_wwn;
1636 return ERR_PTR(ret);
1639 static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
1641 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1642 struct tcm_qla2xxx_lport, lport_wwn);
1643 struct scsi_qla_host *vha = lport->qla_vha;
1644 struct Scsi_Host *sh = vha->host;
1646 * Notify libfc that we want to release the lport->npiv_vport
1648 fc_vport_terminate(lport->npiv_vport);
1655 static ssize_t tcm_qla2xxx_wwn_show_attr_version(
1656 struct target_fabric_configfs *tf,
1659 return sprintf(page,
1660 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
1661 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1662 utsname()->machine);
1665 TF_WWN_ATTR_RO(tcm_qla2xxx, version);
1667 static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
1668 &tcm_qla2xxx_wwn_version.attr,
1672 static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1673 .get_fabric_name = tcm_qla2xxx_get_fabric_name,
1674 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1675 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1676 .tpg_get_tag = tcm_qla2xxx_get_tag,
1677 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1678 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1679 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1680 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1681 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
1682 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
1683 .tpg_check_demo_mode_write_protect =
1684 tcm_qla2xxx_check_demo_write_protect,
1685 .tpg_check_prod_mode_write_protect =
1686 tcm_qla2xxx_check_prod_write_protect,
1687 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
1688 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1689 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1690 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1691 .new_cmd_map = NULL,
1692 .check_stop_free = tcm_qla2xxx_check_stop_free,
1693 .release_cmd = tcm_qla2xxx_release_cmd,
1694 .put_session = tcm_qla2xxx_put_session,
1695 .shutdown_session = tcm_qla2xxx_shutdown_session,
1696 .close_session = tcm_qla2xxx_close_session,
1697 .sess_get_index = tcm_qla2xxx_sess_get_index,
1698 .sess_get_initiator_sid = NULL,
1699 .write_pending = tcm_qla2xxx_write_pending,
1700 .write_pending_status = tcm_qla2xxx_write_pending_status,
1701 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1702 .get_task_tag = tcm_qla2xxx_get_task_tag,
1703 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1704 .queue_data_in = tcm_qla2xxx_queue_data_in,
1705 .queue_status = tcm_qla2xxx_queue_status,
1706 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1707 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1708 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1710 * Setup function pointers for generic logic in
1711 * target_core_fabric_configfs.c
1713 .fabric_make_wwn = tcm_qla2xxx_make_lport,
1714 .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
1715 .fabric_make_tpg = tcm_qla2xxx_make_tpg,
1716 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1717 .fabric_post_link = NULL,
1718 .fabric_pre_unlink = NULL,
1719 .fabric_make_np = NULL,
1720 .fabric_drop_np = NULL,
1721 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
1722 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
1725 static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1726 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
1727 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1728 .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
1729 .tpg_get_tag = tcm_qla2xxx_get_tag,
1730 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1731 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1732 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1733 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1734 .tpg_check_demo_mode = tcm_qla2xxx_check_false,
1735 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
1736 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
1737 .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
1738 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
1739 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1740 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1741 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1742 .release_cmd = tcm_qla2xxx_release_cmd,
1743 .put_session = tcm_qla2xxx_put_session,
1744 .shutdown_session = tcm_qla2xxx_shutdown_session,
1745 .close_session = tcm_qla2xxx_close_session,
1746 .sess_get_index = tcm_qla2xxx_sess_get_index,
1747 .sess_get_initiator_sid = NULL,
1748 .write_pending = tcm_qla2xxx_write_pending,
1749 .write_pending_status = tcm_qla2xxx_write_pending_status,
1750 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1751 .get_task_tag = tcm_qla2xxx_get_task_tag,
1752 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1753 .queue_data_in = tcm_qla2xxx_queue_data_in,
1754 .queue_status = tcm_qla2xxx_queue_status,
1755 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1756 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1757 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1759 * Setup function pointers for generic logic in
1760 * target_core_fabric_configfs.c
1762 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
1763 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
1764 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
1765 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1766 .fabric_post_link = NULL,
1767 .fabric_pre_unlink = NULL,
1768 .fabric_make_np = NULL,
1769 .fabric_drop_np = NULL,
1770 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
1771 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
1774 static int tcm_qla2xxx_register_configfs(void)
1776 struct target_fabric_configfs *fabric, *npiv_fabric;
1779 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
1780 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1781 utsname()->machine);
1783 * Register the top level struct config_item_type with TCM core
1785 fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
1786 if (IS_ERR(fabric)) {
1787 pr_err("target_fabric_configfs_init() failed\n");
1788 return PTR_ERR(fabric);
1791 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
1793 fabric->tf_ops = tcm_qla2xxx_ops;
1795 * Setup default attribute lists for various fabric->tf_cit_tmpl
1797 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1798 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
1799 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
1800 tcm_qla2xxx_tpg_attrib_attrs;
1801 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1802 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1803 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1804 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1805 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1806 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1808 * Register the fabric for use within TCM
1810 ret = target_fabric_configfs_register(fabric);
1812 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
1816 * Setup our local pointer to *fabric
1818 tcm_qla2xxx_fabric_configfs = fabric;
1819 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
1822 * Register the top level struct config_item_type for NPIV with TCM core
1824 npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
1825 if (IS_ERR(npiv_fabric)) {
1826 pr_err("target_fabric_configfs_init() failed\n");
1827 ret = PTR_ERR(npiv_fabric);
1831 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
1833 npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
1835 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
1837 TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1838 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
1839 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1840 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1841 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1842 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1843 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1844 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1845 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1847 * Register the npiv_fabric for use within TCM
1849 ret = target_fabric_configfs_register(npiv_fabric);
1851 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
1855 * Setup our local pointer to *npiv_fabric
1857 tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
1858 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
1860 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
1862 if (!tcm_qla2xxx_free_wq) {
1864 goto out_fabric_npiv;
1867 tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
1868 if (!tcm_qla2xxx_cmd_wq) {
1876 destroy_workqueue(tcm_qla2xxx_free_wq);
1878 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
1880 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
1884 static void tcm_qla2xxx_deregister_configfs(void)
1886 destroy_workqueue(tcm_qla2xxx_cmd_wq);
1887 destroy_workqueue(tcm_qla2xxx_free_wq);
1889 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
1890 tcm_qla2xxx_fabric_configfs = NULL;
1891 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
1893 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
1894 tcm_qla2xxx_npiv_fabric_configfs = NULL;
1895 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
1898 static int __init tcm_qla2xxx_init(void)
1902 ret = tcm_qla2xxx_register_configfs();
1909 static void __exit tcm_qla2xxx_exit(void)
1911 tcm_qla2xxx_deregister_configfs();
1914 MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
1915 MODULE_LICENSE("GPL");
1916 module_init(tcm_qla2xxx_init);
1917 module_exit(tcm_qla2xxx_exit);