Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 13 Jun 2014 05:38:32 +0000 (22:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 13 Jun 2014 05:38:32 +0000 (22:38 -0700)
Pull SCSI target updates from Nicholas Bellinger:
 "The highlights this round include:

   - Add support for T10 PI pass-through between vhost-scsi +
     virtio-scsi (MST + Paolo + MKP + nab)
   - Add support for T10 PI in qla2xxx target mode (Quinn + MKP + hch +
     nab, merged through scsi.git)
   - Add support for percpu-ida pre-allocation in qla2xxx target code
     (Quinn + nab)
   - A number of iser-target fixes related to hardening the network
     portal shutdown path (Sagi + Slava)
   - Fix response length residual handling for a number of control CDBs
     (Roland + Christophe V.)
   - Various iscsi RFC conformance fixes in the CHAP authentication path
     (Tejas and Calsoft folks + nab)
   - Return TASK_SET_FULL status for tcm_fc(FCoE) DataIn + Response
     failures (Vasu + Jun + nab)
   - Fix long-standing ABORT_TASK + session reset hang (nab)
   - Convert iser-initiator + iser-target to include T10 bytes into EDTL
     (Sagi + Or + MKP + Mike Christie)
   - Fix NULL pointer dereference regression related to XCOPY introduced
     in v3.15 + CC'ed to v3.12.y (nab)"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (34 commits)
  target: Fix NULL pointer dereference for XCOPY in target_put_sess_cmd
  vhost-scsi: Include prot_bytes into expected data transfer length
  TARGET/sbc,loopback: Adjust command data length in case pi exists on the wire
  libiscsi, iser: Adjust data_length to include protection information
  scsi_cmnd: Introduce scsi_transfer_length helper
  target: Report correct response length for some commands
  target/sbc: Check that the LBA and number of blocks are correct in VERIFY
  target/sbc: Remove sbc_check_valid_sectors()
  Target/iscsi: Fix sendtargets response pdu for iser transport
  Target/iser: Fix a wrong dereference in case discovery session is over iser
  iscsi-target: Fix ABORT_TASK + connection reset iscsi_queue_req memory leak
  target: Use complete_all for se_cmd->t_transport_stop_comp
  target: Set CMD_T_ACTIVE bit for Task Management Requests
  target: cleanup some boolean tests
  target/spc: Simplify INQUIRY EVPD=0x80
  tcm_fc: Generate TASK_SET_FULL status for response failures
  tcm_fc: Generate TASK_SET_FULL status for DataIN failures
  iscsi-target: Reject mutual authentication with reflected CHAP_C
  iscsi-target: Remove no-op from iscsit_tpg_del_portal_group
  iscsi-target: Fix CHAP_A parameter list handling
  ...

13 files changed:
1  2 
drivers/infiniband/ulp/isert/ib_isert.c
drivers/scsi/libiscsi.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/virtio_scsi.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_transport.c
drivers/vhost/scsi.c
include/scsi/scsi_cmnd.h

index b9d647468b99e66ed44c4a1176744ef5eefdbc82,ba619fa846626e6b2227585b09b362186c0fc84a..d4c7928a0f36143844f3ca0b4735f9c22a1c148a
@@@ -663,8 -663,9 +663,9 @@@ isert_connect_request(struct rdma_cm_i
  
        pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
        if (pi_support && !device->pi_capable) {
-               pr_err("Protection information requested but not supported\n");
-               ret = -EINVAL;
+               pr_err("Protection information requested but not supported, "
+                      "rejecting connect request\n");
+               ret = rdma_reject(cma_id, NULL, 0);
                goto out_mr;
        }
  
@@@ -787,14 -788,12 +788,12 @@@ isert_disconnect_work(struct work_struc
                isert_put_conn(isert_conn);
                return;
        }
-       if (!isert_conn->logout_posted) {
-               pr_debug("Calling rdma_disconnect for !logout_posted from"
-                        " isert_disconnect_work\n");
+       if (isert_conn->disconnect) {
+               /* Send DREQ/DREP towards our initiator */
                rdma_disconnect(isert_conn->conn_cm_id);
-               mutex_unlock(&isert_conn->conn_mutex);
-               iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
-               goto wake_up;
        }
        mutex_unlock(&isert_conn->conn_mutex);
  
  wake_up:
  }
  
  static void
- isert_disconnected_handler(struct rdma_cm_id *cma_id)
+ isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
  {
        struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
  
+       isert_conn->disconnect = disconnect;
        INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
        schedule_work(&isert_conn->conn_logout_work);
  }
@@@ -815,29 -815,28 +815,28 @@@ static in
  isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
  {
        int ret = 0;
+       bool disconnect = false;
  
        pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
                 event->event, event->status, cma_id->context, cma_id);
  
        switch (event->event) {
        case RDMA_CM_EVENT_CONNECT_REQUEST:
-               pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
                ret = isert_connect_request(cma_id, event);
                break;
        case RDMA_CM_EVENT_ESTABLISHED:
-               pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
                isert_connected_handler(cma_id);
                break;
-       case RDMA_CM_EVENT_DISCONNECTED:
-               pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
-               isert_disconnected_handler(cma_id);
-               break;
-       case RDMA_CM_EVENT_DEVICE_REMOVAL:
-       case RDMA_CM_EVENT_ADDR_CHANGE:
+       case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
+       case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
+       case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+               disconnect = true;
+       case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
+               isert_disconnected_handler(cma_id, disconnect);
                break;
        case RDMA_CM_EVENT_CONNECT_ERROR:
        default:
-               pr_err("Unknown RDMA CMA event: %d\n", event->event);
+               pr_err("Unhandled RDMA CMA event: %d\n", event->event);
                break;
        }
  
@@@ -1054,7 -1053,9 +1053,9 @@@ isert_put_login_tx(struct iscsi_conn *c
        }
        if (!login->login_failed) {
                if (login->login_complete) {
-                       if (isert_conn->conn_device->use_fastreg) {
+                       if (!conn->sess->sess_ops->SessionType &&
+                           isert_conn->conn_device->use_fastreg) {
+                               /* Normal Session and fastreg is used */
                                u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
  
                                ret = isert_conn_create_fastreg_pool(isert_conn,
@@@ -1210,8 -1211,6 +1211,8 @@@ sequence_cmd
  
        if (!rc && dump_payload == false && unsol_data)
                iscsit_set_unsoliticed_dataout(cmd);
 +      else if (dump_payload && imm_data)
 +              target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
  
        return 0;
  }
@@@ -1824,11 -1823,8 +1825,8 @@@ isert_do_control_comp(struct work_struc
                break;
        case ISTATE_SEND_LOGOUTRSP:
                pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
-               /*
-                * Call atomic_dec(&isert_conn->post_send_buf_count)
-                * from isert_wait_conn()
-                */
-               isert_conn->logout_posted = true;
+               atomic_dec(&isert_conn->post_send_buf_count);
                iscsit_logout_post_handler(cmd, cmd->conn);
                break;
        case ISTATE_SEND_TEXTRSP:
@@@ -2034,6 -2030,8 +2032,8 @@@ isert_cq_rx_comp_err(struct isert_conn 
        isert_conn->state = ISER_CONN_DOWN;
        mutex_unlock(&isert_conn->conn_mutex);
  
+       iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
        complete(&isert_conn->conn_wait_comp_err);
  }
  
@@@ -2320,7 -2318,7 +2320,7 @@@ isert_put_text_rsp(struct iscsi_cmd *cm
        int rc;
  
        isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
-       rc = iscsit_build_text_rsp(cmd, conn, hdr);
+       rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
        if (rc < 0)
                return rc;
  
@@@ -3156,9 -3154,14 +3156,14 @@@ accept_wait
                return -ENODEV;
  
        spin_lock_bh(&np->np_thread_lock);
-       if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+       if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
                spin_unlock_bh(&np->np_thread_lock);
-               pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+               pr_debug("np_thread_state %d for isert_accept_np\n",
+                        np->np_thread_state);
+               /**
+                * No point in stalling here when np_thread
+                * is in state RESET/SHUTDOWN/EXIT - bail
+                **/
                return -ENODEV;
        }
        spin_unlock_bh(&np->np_thread_lock);
@@@ -3208,15 -3211,9 +3213,9 @@@ static void isert_wait_conn(struct iscs
        struct isert_conn *isert_conn = conn->context;
  
        pr_debug("isert_wait_conn: Starting \n");
-       /*
-        * Decrement post_send_buf_count for special case when called
-        * from isert_do_control_comp() -> iscsit_logout_post_handler()
-        */
-       mutex_lock(&isert_conn->conn_mutex);
-       if (isert_conn->logout_posted)
-               atomic_dec(&isert_conn->post_send_buf_count);
  
-       if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
+       mutex_lock(&isert_conn->conn_mutex);
+       if (isert_conn->conn_cm_id) {
                pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
                rdma_disconnect(isert_conn->conn_cm_id);
        }
@@@ -3293,6 -3290,7 +3292,7 @@@ destroy_rx_wq
  
  static void __exit isert_exit(void)
  {
+       flush_scheduled_work();
        destroy_workqueue(isert_comp_wq);
        destroy_workqueue(isert_rx_wq);
        iscsit_unregister_transport(&iser_target_transport);
diff --combined drivers/scsi/libiscsi.c
index ecd7bd304efebb51ce99580e7810edb6a3cf84be,3f462349b16c8589185b1d1155fab8718f37d5dc..3d1bc67bac9dc58ac11b9785694e568b3c3ab285
@@@ -338,7 -338,7 +338,7 @@@ static int iscsi_prep_scsi_cmd_pdu(stru
        struct iscsi_session *session = conn->session;
        struct scsi_cmnd *sc = task->sc;
        struct iscsi_scsi_req *hdr;
-       unsigned hdrlength, cmd_len;
+       unsigned hdrlength, cmd_len, transfer_length;
        itt_t itt;
        int rc;
  
        if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
                task->protected = true;
  
+       transfer_length = scsi_transfer_length(sc);
+       hdr->data_length = cpu_to_be32(transfer_length);
        if (sc->sc_data_direction == DMA_TO_DEVICE) {
-               unsigned out_len = scsi_out(sc)->length;
                struct iscsi_r2t_info *r2t = &task->unsol_r2t;
  
-               hdr->data_length = cpu_to_be32(out_len);
                hdr->flags |= ISCSI_FLAG_CMD_WRITE;
                /*
                 * Write counters:
                memset(r2t, 0, sizeof(*r2t));
  
                if (session->imm_data_en) {
-                       if (out_len >= session->first_burst)
+                       if (transfer_length >= session->first_burst)
                                task->imm_count = min(session->first_burst,
                                                        conn->max_xmit_dlength);
                        else
-                               task->imm_count = min(out_len,
-                                                       conn->max_xmit_dlength);
+                               task->imm_count = min(transfer_length,
+                                                     conn->max_xmit_dlength);
                        hton24(hdr->dlength, task->imm_count);
                } else
                        zero_data(hdr->dlength);
  
                if (!session->initial_r2t_en) {
-                       r2t->data_length = min(session->first_burst, out_len) -
+                       r2t->data_length = min(session->first_burst,
+                                              transfer_length) -
                                               task->imm_count;
                        r2t->data_offset = task->imm_count;
                        r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
        } else {
                hdr->flags |= ISCSI_FLAG_CMD_FINAL;
                zero_data(hdr->dlength);
-               hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
  
                if (sc->sc_data_direction == DMA_FROM_DEVICE)
                        hdr->flags |= ISCSI_FLAG_CMD_READ;
                          scsi_bidi_cmnd(sc) ? "bidirectional" :
                          sc->sc_data_direction == DMA_TO_DEVICE ?
                          "write" : "read", conn->id, sc, sc->cmnd[0],
-                         task->itt, scsi_bufflen(sc),
+                         task->itt, transfer_length,
                          scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
                          session->cmdsn,
                          session->max_cmdsn - session->exp_cmdsn + 1);
@@@ -1442,9 -1442,9 +1442,9 @@@ static int iscsi_xmit_task(struct iscsi
                conn->task = NULL;
        }
        /* regular RX path uses back_lock */
 -      spin_lock_bh(&conn->session->back_lock);
 +      spin_lock(&conn->session->back_lock);
        __iscsi_put_task(task);
 -      spin_unlock_bh(&conn->session->back_lock);
 +      spin_unlock(&conn->session->back_lock);
        return rc;
  }
  
index b1d10f9935c7caac0f85cc7cdb17ccee12c9e8df,bd9c725c08e1c98a244eca58389937dc87f458da..8d85ed8d89170265647938f5bfe9fc8f8d77d185
@@@ -104,7 -104,6 +104,6 @@@ static void qlt_reject_free_srr_imm(str
  /*
   * Global Variables
   */
- static struct kmem_cache *qla_tgt_cmd_cachep;
  static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
  static mempool_t *qla_tgt_mgmt_cmd_mempool;
  static struct workqueue_struct *qla_tgt_wq;
@@@ -182,11 -181,6 +181,11 @@@ struct scsi_qla_host *qlt_find_host_by_
  void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
        struct atio_from_isp *atio)
  {
 +      ql_dbg(ql_dbg_tgt, vha, 0xe072,
 +              "%s: qla_target(%d): type %x ox_id %04x\n",
 +              __func__, vha->vp_idx, atio->u.raw.entry_type,
 +              be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
 +
        switch (atio->u.raw.entry_type) {
        case ATIO_TYPE7:
        {
  void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
  {
        switch (pkt->entry_type) {
 +      case CTIO_CRC2:
 +              ql_dbg(ql_dbg_tgt, vha, 0xe073,
 +                      "qla_target(%d):%s: CRC2 Response pkt\n",
 +                      vha->vp_idx, __func__);
        case CTIO_TYPE7:
        {
                struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@@ -1359,42 -1349,13 +1358,42 @@@ static int qlt_pci_map_calc_cnt(struct 
  
        prm->cmd->sg_mapped = 1;
  
 -      /*
 -       * If greater than four sg entries then we need to allocate
 -       * the continuation entries
 -       */
 -      if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
 -              prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
 -                  prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
 +      if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
 +              /*
 +               * If greater than four sg entries then we need to allocate
 +               * the continuation entries
 +               */
 +              if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
 +                      prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
 +                      prm->tgt->datasegs_per_cmd,
 +                      prm->tgt->datasegs_per_cont);
 +      } else {
 +              /* DIF */
 +              if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
 +                  (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
 +                      prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
 +                      prm->tot_dsds = prm->seg_cnt;
 +              } else
 +                      prm->tot_dsds = prm->seg_cnt;
 +
 +              if (cmd->prot_sg_cnt) {
 +                      prm->prot_sg      = cmd->prot_sg;
 +                      prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
 +                              cmd->prot_sg, cmd->prot_sg_cnt,
 +                              cmd->dma_data_direction);
 +                      if (unlikely(prm->prot_seg_cnt == 0))
 +                              goto out_err;
 +
 +                      if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
 +                          (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
 +                              /* Dif Bundling not support here */
 +                              prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
 +                                                              cmd->blk_sz);
 +                              prm->tot_dsds += prm->prot_seg_cnt;
 +                      } else
 +                              prm->tot_dsds += prm->prot_seg_cnt;
 +              }
 +      }
  
        ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
            prm->seg_cnt, prm->req_cnt);
@@@ -1415,16 -1376,6 +1414,16 @@@ static inline void qlt_unmap_sg(struct 
        BUG_ON(!cmd->sg_mapped);
        pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
        cmd->sg_mapped = 0;
 +
 +      if (cmd->prot_sg_cnt)
 +              pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
 +                      cmd->dma_data_direction);
 +
 +      if (cmd->ctx_dsd_alloced)
 +              qla2x00_clean_dsd_pool(ha, NULL, cmd);
 +
 +      if (cmd->ctx)
 +              dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
  }
  
  static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
@@@ -1713,9 -1664,8 +1712,9 @@@ static int qlt_pre_xmit_response(struc
                return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
        }
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
 -          vha->vp_idx, cmd->tag);
 +      ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
 +              vha->vp_idx, cmd->tag,
 +              be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
  
        prm->cmd = cmd;
        prm->tgt = tgt;
@@@ -1951,323 -1901,6 +1950,323 @@@ skip_explict_conf
        /* Sense with len > 24, is it possible ??? */
  }
  
 +
 +
 +/* diff  */
 +static inline int
 +qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
 +{
 +      /*
 +       * Uncomment when corresponding SCSI changes are done.
 +       *
 +       if (!sp->cmd->prot_chk)
 +       return 0;
 +       *
 +       */
 +      switch (se_cmd->prot_op) {
 +      case TARGET_PROT_DOUT_INSERT:
 +      case TARGET_PROT_DIN_STRIP:
 +              if (ql2xenablehba_err_chk >= 1)
 +                      return 1;
 +              break;
 +      case TARGET_PROT_DOUT_PASS:
 +      case TARGET_PROT_DIN_PASS:
 +              if (ql2xenablehba_err_chk >= 2)
 +                      return 1;
 +              break;
 +      case TARGET_PROT_DIN_INSERT:
 +      case TARGET_PROT_DOUT_STRIP:
 +              return 1;
 +      default:
 +              break;
 +      }
 +      return 0;
 +}
 +
 +/*
 + * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
 + *
 + */
 +static inline void
 +qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
 +{
 +      uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
 +
 +      /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
 +       * have been immplemented by TCM, before AppTag is avail.
 +       * Look for modesense_handlers[]
 +       */
 +      ctx->app_tag = __constant_cpu_to_le16(0);
 +      ctx->app_tag_mask[0] = 0x0;
 +      ctx->app_tag_mask[1] = 0x0;
 +
 +      switch (se_cmd->prot_type) {
 +      case TARGET_DIF_TYPE0_PROT:
 +              /*
 +               * No check for ql2xenablehba_err_chk, as it would be an
 +               * I/O error if hba tag generation is not done.
 +               */
 +              ctx->ref_tag = cpu_to_le32(lba);
 +
 +              if (!qlt_hba_err_chk_enabled(se_cmd))
 +                      break;
 +
 +              /* enable ALL bytes of the ref tag */
 +              ctx->ref_tag_mask[0] = 0xff;
 +              ctx->ref_tag_mask[1] = 0xff;
 +              ctx->ref_tag_mask[2] = 0xff;
 +              ctx->ref_tag_mask[3] = 0xff;
 +              break;
 +      /*
 +       * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
 +       * 16 bit app tag.
 +       */
 +      case TARGET_DIF_TYPE1_PROT:
 +              ctx->ref_tag = cpu_to_le32(lba);
 +
 +              if (!qlt_hba_err_chk_enabled(se_cmd))
 +                      break;
 +
 +              /* enable ALL bytes of the ref tag */
 +              ctx->ref_tag_mask[0] = 0xff;
 +              ctx->ref_tag_mask[1] = 0xff;
 +              ctx->ref_tag_mask[2] = 0xff;
 +              ctx->ref_tag_mask[3] = 0xff;
 +              break;
 +      /*
 +       * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
 +       * match LBA in CDB + N
 +       */
 +      case TARGET_DIF_TYPE2_PROT:
 +              ctx->ref_tag = cpu_to_le32(lba);
 +
 +              if (!qlt_hba_err_chk_enabled(se_cmd))
 +                      break;
 +
 +              /* enable ALL bytes of the ref tag */
 +              ctx->ref_tag_mask[0] = 0xff;
 +              ctx->ref_tag_mask[1] = 0xff;
 +              ctx->ref_tag_mask[2] = 0xff;
 +              ctx->ref_tag_mask[3] = 0xff;
 +              break;
 +
 +      /* For Type 3 protection: 16 bit GUARD only */
 +      case TARGET_DIF_TYPE3_PROT:
 +              ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
 +                      ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
 +              break;
 +      }
 +}
 +
 +
 +static inline int
 +qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
 +{
 +      uint32_t                *cur_dsd;
 +      int                     sgc;
 +      uint32_t                transfer_length = 0;
 +      uint32_t                data_bytes;
 +      uint32_t                dif_bytes;
 +      uint8_t                 bundling = 1;
 +      uint8_t                 *clr_ptr;
 +      struct crc_context      *crc_ctx_pkt = NULL;
 +      struct qla_hw_data      *ha;
 +      struct ctio_crc2_to_fw  *pkt;
 +      dma_addr_t              crc_ctx_dma;
 +      uint16_t                fw_prot_opts = 0;
 +      struct qla_tgt_cmd      *cmd = prm->cmd;
 +      struct se_cmd           *se_cmd = &cmd->se_cmd;
 +      uint32_t h;
 +      struct atio_from_isp *atio = &prm->cmd->atio;
 +
 +      sgc = 0;
 +      ha = vha->hw;
 +
 +      pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
 +      prm->pkt = pkt;
 +      memset(pkt, 0, sizeof(*pkt));
 +
 +      ql_dbg(ql_dbg_tgt, vha, 0xe071,
 +              "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
 +              vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
 +              prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
 +
 +      if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
 +          (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
 +              bundling = 0;
 +
 +      /* Compute dif len and adjust data len to incude protection */
 +      data_bytes = cmd->bufflen;
 +      dif_bytes  = (data_bytes / cmd->blk_sz) * 8;
 +
 +      switch (se_cmd->prot_op) {
 +      case TARGET_PROT_DIN_INSERT:
 +      case TARGET_PROT_DOUT_STRIP:
 +              transfer_length = data_bytes;
 +              data_bytes += dif_bytes;
 +              break;
 +
 +      case TARGET_PROT_DIN_STRIP:
 +      case TARGET_PROT_DOUT_INSERT:
 +      case TARGET_PROT_DIN_PASS:
 +      case TARGET_PROT_DOUT_PASS:
 +              transfer_length = data_bytes + dif_bytes;
 +              break;
 +
 +      default:
 +              BUG();
 +              break;
 +      }
 +
 +      if (!qlt_hba_err_chk_enabled(se_cmd))
 +              fw_prot_opts |= 0x10; /* Disable Guard tag checking */
 +      /* HBA error checking enabled */
 +      else if (IS_PI_UNINIT_CAPABLE(ha)) {
 +              if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
 +                  (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
 +                      fw_prot_opts |= PO_DIS_VALD_APP_ESC;
 +              else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
 +                      fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
 +      }
 +
 +      switch (se_cmd->prot_op) {
 +      case TARGET_PROT_DIN_INSERT:
 +      case TARGET_PROT_DOUT_INSERT:
 +              fw_prot_opts |= PO_MODE_DIF_INSERT;
 +              break;
 +      case TARGET_PROT_DIN_STRIP:
 +      case TARGET_PROT_DOUT_STRIP:
 +              fw_prot_opts |= PO_MODE_DIF_REMOVE;
 +              break;
 +      case TARGET_PROT_DIN_PASS:
 +      case TARGET_PROT_DOUT_PASS:
 +              fw_prot_opts |= PO_MODE_DIF_PASS;
 +              /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
 +              break;
 +      default:/* Normal Request */
 +              fw_prot_opts |= PO_MODE_DIF_PASS;
 +              break;
 +      }
 +
 +
 +      /* ---- PKT ---- */
 +      /* Update entry type to indicate Command Type CRC_2 IOCB */
 +      pkt->entry_type  = CTIO_CRC2;
 +      pkt->entry_count = 1;
 +      pkt->vp_index = vha->vp_idx;
 +
 +      h = qlt_make_handle(vha);
 +      if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
 +              /*
 +               * CTIO type 7 from the firmware doesn't provide a way to
 +               * know the initiator's LOOP ID, hence we can't find
 +               * the session and, so, the command.
 +               */
 +              return -EAGAIN;
 +      } else
 +              ha->tgt.cmds[h-1] = prm->cmd;
 +
 +
 +      pkt->handle  = h | CTIO_COMPLETION_HANDLE_MARK;
 +      pkt->nport_handle = prm->cmd->loop_id;
 +      pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
 +      pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
 +      pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
 +      pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
 +      pkt->exchange_addr   = atio->u.isp24.exchange_addr;
 +      pkt->ox_id  = swab16(atio->u.isp24.fcp_hdr.ox_id);
 +      pkt->flags |= (atio->u.isp24.attr << 9);
 +      pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
 +
 +      /* Set transfer direction */
 +      if (cmd->dma_data_direction == DMA_TO_DEVICE)
 +              pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
 +      else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
 +              pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
 +
 +
 +      pkt->dseg_count = prm->tot_dsds;
 +      /* Fibre channel byte count */
 +      pkt->transfer_length = cpu_to_le32(transfer_length);
 +
 +
 +      /* ----- CRC context -------- */
 +
 +      /* Allocate CRC context from global pool */
 +      crc_ctx_pkt = cmd->ctx =
 +          dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
 +
 +      if (!crc_ctx_pkt)
 +              goto crc_queuing_error;
 +
 +      /* Zero out CTX area. */
 +      clr_ptr = (uint8_t *)crc_ctx_pkt;
 +      memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
 +
 +      crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
 +      INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
 +
 +      /* Set handle */
 +      crc_ctx_pkt->handle = pkt->handle;
 +
 +      qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
 +
 +      pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
 +      pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
 +      pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
 +
 +
 +      if (!bundling) {
 +              cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
 +      } else {
 +              /*
 +               * Configure Bundling if we need to fetch interlaving
 +               * protection PCI accesses
 +               */
 +              fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
 +              crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
 +              crc_ctx_pkt->u.bundling.dseg_count =
 +                      cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
 +              cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
 +      }
 +
 +      /* Finish the common fields of CRC pkt */
 +      crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
 +      crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
 +      crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
 +      crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
 +
 +
 +      /* Walks data segments */
 +      pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
 +
 +      if (!bundling && prm->prot_seg_cnt) {
 +              if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
 +                      prm->tot_dsds, cmd))
 +                      goto crc_queuing_error;
 +      } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
 +              (prm->tot_dsds - prm->prot_seg_cnt), cmd))
 +              goto crc_queuing_error;
 +
 +      if (bundling && prm->prot_seg_cnt) {
 +              /* Walks dif segments */
 +              pkt->add_flags |=
 +                      __constant_cpu_to_le16(CTIO_CRC2_AF_DIF_DSD_ENA);
 +
 +              cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
 +              if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
 +                      prm->prot_seg_cnt, cmd))
 +                      goto crc_queuing_error;
 +      }
 +      return QLA_SUCCESS;
 +
 +crc_queuing_error:
 +      /* Cleanup will be performed by the caller */
 +
 +      return QLA_FUNCTION_FAILED;
 +}
 +
 +
  /*
   * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
   * QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@@ -2287,10 -1920,9 +2286,10 @@@ int qlt_xmit_response(struct qla_tgt_cm
        qlt_check_srr_debug(cmd, &xmit_type);
  
        ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
 -          "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
 -          "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
 -          1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
 +          "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
 +          (xmit_type & QLA_TGT_XMIT_STATUS) ?
 +          1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
 +          &cmd->se_cmd);
  
        res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
            &full_req_cnt);
        if (unlikely(res))
                goto out_unmap_unlock;
  
 -      res = qlt_24xx_build_ctio_pkt(&prm, vha);
 +      if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
 +              res = qlt_build_ctio_crc2_pkt(&prm, vha);
 +      else
 +              res = qlt_24xx_build_ctio_pkt(&prm, vha);
        if (unlikely(res != 0))
                goto out_unmap_unlock;
  
                    __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
                        CTIO7_FLAGS_STATUS_MODE_0);
  
 -              qlt_load_data_segments(&prm, vha);
 +              if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
 +                      qlt_load_data_segments(&prm, vha);
  
                if (prm.add_status_pkt == 0) {
                        if (xmit_type & QLA_TGT_XMIT_STATUS) {
                        ql_dbg(ql_dbg_tgt, vha, 0xe019,
                            "Building additional status packet\n");
  
 +                      /*
 +                       * T10Dif: ctio_crc2_to_fw overlay ontop of
 +                       * ctio7_to_24xx
 +                       */
                        memcpy(ctio, pkt, sizeof(*ctio));
 +                      /* reset back to CTIO7 */
                        ctio->entry_count = 1;
 +                      ctio->entry_type = CTIO_TYPE7;
                        ctio->dseg_count = 0;
                        ctio->u.status1.flags &= ~__constant_cpu_to_le16(
                            CTIO7_FLAGS_DATA_IN);
                        pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
                        pkt->u.status0.flags |= __constant_cpu_to_le16(
                            CTIO7_FLAGS_DONT_RET_CTIO);
 +
 +                      /* qlt_24xx_init_ctio_to_isp will correct
 +                       * all neccessary fields that's part of CTIO7.
 +                       * There should be no residual of CTIO-CRC2 data.
 +                       */
                        qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
                            &prm);
                        pr_debug("Status CTIO7: %p\n", ctio);
@@@ -2423,10 -2040,8 +2422,10 @@@ int qlt_rdy_to_xfer(struct qla_tgt_cmd 
        if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
                return -EIO;
  
 -      ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
 -          (int)vha->vp_idx);
 +      ql_dbg(ql_dbg_tgt, vha, 0xe01b,
 +              "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
 +              __func__, (int)vha->vp_idx, &cmd->se_cmd,
 +              be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
  
        /* Calculate number of entries and segments required */
        if (qlt_pci_map_calc_cnt(&prm) != 0)
        res = qlt_check_reserve_free_req(vha, prm.req_cnt);
        if (res != 0)
                goto out_unlock_free_unmap;
 +      if (cmd->se_cmd.prot_op)
 +              res = qlt_build_ctio_crc2_pkt(&prm, vha);
 +      else
 +              res = qlt_24xx_build_ctio_pkt(&prm, vha);
  
 -      res = qlt_24xx_build_ctio_pkt(&prm, vha);
        if (unlikely(res != 0))
                goto out_unlock_free_unmap;
        pkt = (struct ctio7_to_24xx *)prm.pkt;
        pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
            CTIO7_FLAGS_STATUS_MODE_0);
 -      qlt_load_data_segments(&prm, vha);
 +
 +      if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
 +              qlt_load_data_segments(&prm, vha);
  
        cmd->state = QLA_TGT_STATE_NEED_DATA;
  
@@@ -2468,143 -2078,6 +2467,143 @@@ out_unlock_free_unmap
  }
  EXPORT_SYMBOL(qlt_rdy_to_xfer);
  
 +
 +/*
 + * Checks the guard or meta-data for the type of error
 + * detected by the HBA.
 + */
 +static inline int
 +qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
 +              struct ctio_crc_from_fw *sts)
 +{
 +      uint8_t         *ap = &sts->actual_dif[0];
 +      uint8_t         *ep = &sts->expected_dif[0];
 +      uint32_t        e_ref_tag, a_ref_tag;
 +      uint16_t        e_app_tag, a_app_tag;
 +      uint16_t        e_guard, a_guard;
 +      uint64_t        lba = cmd->se_cmd.t_task_lba;
 +
 +      a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
 +      a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
 +      a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
 +
 +      e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
 +      e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
 +      e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
 +
 +      ql_dbg(ql_dbg_tgt, vha, 0xe075,
 +          "iocb(s) %p Returned STATUS.\n", sts);
 +
 +      ql_dbg(ql_dbg_tgt, vha, 0xf075,
 +          "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
 +          cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
 +          a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
 +
 +      /*
 +       * Ignore sector if:
 +       * For type     3: ref & app tag is all 'f's
 +       * For type 0,1,2: app tag is all 'f's
 +       */
 +      if ((a_app_tag == 0xffff) &&
 +          ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
 +           (a_ref_tag == 0xffffffff))) {
 +              uint32_t blocks_done;
 +
 +              /* 2TB boundary case covered automatically with this */
 +              blocks_done = e_ref_tag - (uint32_t)lba + 1;
 +              cmd->se_cmd.bad_sector = e_ref_tag;
 +              cmd->se_cmd.pi_err = 0;
 +              ql_dbg(ql_dbg_tgt, vha, 0xf074,
 +                      "need to return scsi good\n");
 +
 +              /* Update protection tag */
 +              if (cmd->prot_sg_cnt) {
 +                      uint32_t i, j = 0, k = 0, num_ent;
 +                      struct scatterlist *sg, *sgl;
 +
 +
 +                      sgl = cmd->prot_sg;
 +
 +                      /* Patch the corresponding protection tags */
 +                      for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
 +                              num_ent = sg_dma_len(sg) / 8;
 +                              if (k + num_ent < blocks_done) {
 +                                      k += num_ent;
 +                                      continue;
 +                              }
 +                              j = blocks_done - k - 1;
 +                              k = blocks_done;
 +                              break;
 +                      }
 +
 +                      if (k != blocks_done) {
 +                              ql_log(ql_log_warn, vha, 0xf076,
 +                                  "unexpected tag values tag:lba=%u:%llu)\n",
 +                                  e_ref_tag, (unsigned long long)lba);
 +                              goto out;
 +                      }
 +
 +#if 0
 +                      struct sd_dif_tuple *spt;
 +                      /* TODO:
 +                       * This section came from initiator. Is it valid here?
 +                       * should ulp be override with actual val???
 +                       */
 +                      spt = page_address(sg_page(sg)) + sg->offset;
 +                      spt += j;
 +
 +                      spt->app_tag = 0xffff;
 +                      if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
 +                              spt->ref_tag = 0xffffffff;
 +#endif
 +              }
 +
 +              return 0;
 +      }
 +
 +      /* check guard */
 +      if (e_guard != a_guard) {
 +              cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
 +              cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
 +
 +              ql_log(ql_log_warn, vha, 0xe076,
 +                  "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
 +                  cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
 +                  a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
 +                  a_guard, e_guard, cmd);
 +              goto out;
 +      }
 +
 +      /* check ref tag */
 +      if (e_ref_tag != a_ref_tag) {
 +              cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
 +              cmd->se_cmd.bad_sector = e_ref_tag;
 +
 +              ql_log(ql_log_warn, vha, 0xe077,
 +                      "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
 +                      cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
 +                      a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
 +                      a_guard, e_guard, cmd);
 +              goto out;
 +      }
 +
 +      /* check appl tag */
 +      if (e_app_tag != a_app_tag) {
 +              cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
 +              cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
 +
 +              ql_log(ql_log_warn, vha, 0xe078,
 +                      "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
 +                      cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
 +                      a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
 +                      a_guard, e_guard, cmd);
 +              goto out;
 +      }
 +out:
 +      return 1;
 +}
 +
 +
  /* If hardware_lock held on entry, might drop it, then reaquire */
  /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
  static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
@@@ -2681,39 -2154,28 +2680,46 @@@ static void qlt_send_term_exchange(stru
        rc = __qlt_send_term_exchange(vha, cmd, atio);
        spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  done:
 -      if (rc == 1) {
 +      /*
 +       * Terminate exchange will tell fw to release any active CTIO
 +       * that's in FW posession and cleanup the exchange.
 +       *
 +       * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
 +       * down at FW.  Free the cmd later when CTIO comes back later
 +       * w/aborted(0x2) status.
 +       *
 +       * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
 +       * back w/some err.  Free the cmd now.
 +       */
 +      if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {
                if (!ha_locked && !in_interrupt())
                        msleep(250); /* just in case */
  
 +              if (cmd->sg_mapped)
 +                      qlt_unmap_sg(vha, cmd);
                vha->hw->tgt.tgt_ops->free_cmd(cmd);
        }
 +      return;
  }
  
  void qlt_free_cmd(struct qla_tgt_cmd *cmd)
  {
 -      BUG_ON(cmd->sg_mapped);
+       struct qla_tgt_sess *sess = cmd->sess;
 +      ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
 +          "%s: se_cmd[%p] ox_id %04x\n",
 +          __func__, &cmd->se_cmd,
 +          be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
  
 +      BUG_ON(cmd->sg_mapped);
        if (unlikely(cmd->free_sg))
                kfree(cmd->sg);
-       kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+       if (!sess || !sess->se_sess) {
+               WARN_ON(1);
+               return;
+       }
+       percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
  }
  EXPORT_SYMBOL(qlt_free_cmd);
  
@@@ -2918,7 -2380,6 +2924,7 @@@ static void qlt_do_ctio_completion(stru
                case CTIO_LIP_RESET:
                case CTIO_TARGET_RESET:
                case CTIO_ABORTED:
 +                      /* driver request abort via Terminate exchange */
                case CTIO_TIMEOUT:
                case CTIO_INVALID_RX_ID:
                        /* They are OK */
                        else
                                return;
  
 +              case CTIO_DIF_ERROR: {
 +                      struct ctio_crc_from_fw *crc =
 +                              (struct ctio_crc_from_fw *)ctio;
 +                      ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
 +                          "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
 +                          vha->vp_idx, status, cmd->state, se_cmd,
 +                          *((u64 *)&crc->actual_dif[0]),
 +                          *((u64 *)&crc->expected_dif[0]));
 +
 +                      if (qlt_handle_dif_error(vha, cmd, ctio)) {
 +                              if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
 +                                      /* scsi Write/xfer rdy complete */
 +                                      goto skip_term;
 +                              } else {
 +                                      /* scsi read/xmit respond complete
 +                                       * call handle dif to send scsi status
 +                                       * rather than terminate exchange.
 +                                       */
 +                                      cmd->state = QLA_TGT_STATE_PROCESSED;
 +                                      ha->tgt.tgt_ops->handle_dif_err(cmd);
 +                                      return;
 +                              }
 +                      } else {
 +                              /* Need to generate a SCSI good completion.
 +                               * because FW did not send scsi status.
 +                               */
 +                              status = 0;
 +                              goto skip_term;
 +                      }
 +                      break;
 +              }
                default:
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
 -                          "qla_target(%d): CTIO with error status "
 -                          "0x%x received (state %x, se_cmd %p\n",
 +                          "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
                            vha->vp_idx, status, cmd->state, se_cmd);
                        break;
                }
  
 -              if (cmd->state != QLA_TGT_STATE_NEED_DATA)
 +
 +              /* "cmd->state == QLA_TGT_STATE_ABORTED" means
 +               * cmd is already aborted/terminated, we don't
 +               * need to terminate again.  The exchange is already
 +               * cleaned up/freed at FW level.  Just cleanup at driver
 +               * level.
 +               */
 +              if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
 +                      (cmd->state != QLA_TGT_STATE_ABORTED)) {
                        if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
                                return;
 +              }
        }
 +skip_term:
  
        if (cmd->state == QLA_TGT_STATE_PROCESSED) {
                ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
                    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
        }
  
 -      if (unlikely(status != CTIO_SUCCESS)) {
 +      if (unlikely(status != CTIO_SUCCESS) &&
 +              (cmd->state != QLA_TGT_STATE_ABORTED)) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
                dump_stack();
        }
@@@ -3075,13 -2495,12 +3081,12 @@@ static struct qla_tgt_sess *qlt_make_lo
  /*
   * Process context for I/O path into tcm_qla2xxx code
   */
- static void qlt_do_work(struct work_struct *work)
+ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
  {
-       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
        scsi_qla_host_t *vha = cmd->vha;
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
-       struct qla_tgt_sess *sess = NULL;
+       struct qla_tgt_sess *sess = cmd->sess;
        struct atio_from_isp *atio = &cmd->atio;
        unsigned char *cdb;
        unsigned long flags;
        if (tgt->tgt_stop)
                goto out_term;
  
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
-           atio->u.isp24.fcp_hdr.s_id);
-       /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
-       if (sess)
-               kref_get(&sess->se_sess->sess_kref);
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       if (unlikely(!sess)) {
-               uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
-               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
-                       "qla_target(%d): Unable to find wwn login"
-                       " (s_id %x:%x:%x), trying to create it manually\n",
-                       vha->vp_idx, s_id[0], s_id[1], s_id[2]);
-               if (atio->u.raw.entry_count > 1) {
-                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
-                               "Dropping multy entry cmd %p\n", cmd);
-                       goto out_term;
-               }
-               mutex_lock(&vha->vha_tgt.tgt_mutex);
-               sess = qlt_make_local_sess(vha, s_id);
-               /* sess has an extra creation ref. */
-               mutex_unlock(&vha->vha_tgt.tgt_mutex);
-               if (!sess)
-                       goto out_term;
-       }
-       cmd->sess = sess;
-       cmd->loop_id = sess->loop_id;
-       cmd->conf_compl_supported = sess->conf_compl_supported;
        cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
        cmd->tag = atio->u.isp24.exchange_addr;
        cmd->unpacked_lun = scsilun_to_int(
            atio->u.isp24.fcp_cmnd.add_cdb_len]));
  
        ql_dbg(ql_dbg_tgt, vha, 0xe022,
 -          "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
 -          cmd, cmd->unpacked_lun, cmd->tag);
 +              "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
 +              cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
 +              cmd->atio.u.isp24.fcp_hdr.ox_id);
  
-       ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
-           fcp_task_attr, data_dir, bidi);
+       ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+                                         fcp_task_attr, data_dir, bidi);
        if (ret != 0)
                goto out_term;
        /*
@@@ -3173,17 -2556,114 +3143,114 @@@ out_term
         */
        spin_lock_irqsave(&ha->hardware_lock, flags);
        qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
-       kmem_cache_free(qla_tgt_cmd_cachep, cmd);
-       if (sess)
+       percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
+       ha->tgt.tgt_ops->put_sess(sess);
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+ static void qlt_do_work(struct work_struct *work)
+ {
+       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+       __qlt_do_work(cmd);
+ }
+ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
+                                      struct qla_tgt_sess *sess,
+                                      struct atio_from_isp *atio)
+ {
+       struct se_session *se_sess = sess->se_sess;
+       struct qla_tgt_cmd *cmd;
+       int tag;
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+       if (tag < 0)
+               return NULL;
+       cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+       memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+       memcpy(&cmd->atio, atio, sizeof(*atio));
+       cmd->state = QLA_TGT_STATE_NEW;
+       cmd->tgt = vha->vha_tgt.qla_tgt;
+       cmd->vha = vha;
+       cmd->se_cmd.map_tag = tag;
+       cmd->sess = sess;
+       cmd->loop_id = sess->loop_id;
+       cmd->conf_compl_supported = sess->conf_compl_supported;
+       return cmd;
+ }
+ static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *,
+                         uint16_t);
+ static void qlt_create_sess_from_atio(struct work_struct *work)
+ {
+       struct qla_tgt_sess_op *op = container_of(work,
+                                       struct qla_tgt_sess_op, work);
+       scsi_qla_host_t *vha = op->vha;
+       struct qla_hw_data *ha = vha->hw;
+       struct qla_tgt_sess *sess;
+       struct qla_tgt_cmd *cmd;
+       unsigned long flags;
+       uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+               "qla_target(%d): Unable to find wwn login"
+               " (s_id %x:%x:%x), trying to create it manually\n",
+               vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+       if (op->atio.u.raw.entry_count > 1) {
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+                       "Dropping multy entry atio %p\n", &op->atio);
+               goto out_term;
+       }
+       mutex_lock(&vha->vha_tgt.tgt_mutex);
+       sess = qlt_make_local_sess(vha, s_id);
+       /* sess has an extra creation ref. */
+       mutex_unlock(&vha->vha_tgt.tgt_mutex);
+       if (!sess)
+               goto out_term;
+       /*
+        * Now obtain a pre-allocated session tag using the original op->atio
+        * packet header, and dispatch into __qlt_do_work() using the existing
+        * process context.
+        */
+       cmd = qlt_get_tag(vha, sess, &op->atio);
+       if (!cmd) {
+               spin_lock_irqsave(&ha->hardware_lock, flags);
+               qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
                ha->tgt.tgt_ops->put_sess(sess);
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               kfree(op);
+               return;
+       }
+       /*
+        * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
+        * the extra reference taken above by qlt_make_local_sess()
+        */
+       __qlt_do_work(cmd);
+       kfree(op);
+       return;
+ out_term:
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+       qlt_send_term_exchange(vha, NULL, &op->atio, 1);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
+       kfree(op);
  }
  
  /* ha->hardware_lock supposed to be held on entry */
  static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
        struct atio_from_isp *atio)
  {
+       struct qla_hw_data *ha = vha->hw;
        struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+       struct qla_tgt_sess *sess;
        struct qla_tgt_cmd *cmd;
  
        if (unlikely(tgt->tgt_stop)) {
                return -EFAULT;
        }
  
-       cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
+       sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
+       if (unlikely(!sess)) {
+               struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op),
+                                                    GFP_ATOMIC);
+               if (!op)
+                       return -ENOMEM;
+               memcpy(&op->atio, atio, sizeof(*atio));
+               INIT_WORK(&op->work, qlt_create_sess_from_atio);
+               queue_work(qla_tgt_wq, &op->work);
+               return 0;
+       }
+       /*
+        * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
+        */
+       kref_get(&sess->se_sess->sess_kref);
+       cmd = qlt_get_tag(vha, sess, atio);
        if (!cmd) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
                    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+               ha->tgt.tgt_ops->put_sess(sess);
                return -ENOMEM;
        }
  
-       memcpy(&cmd->atio, atio, sizeof(*atio));
-       cmd->state = QLA_TGT_STATE_NEW;
-       cmd->tgt = vha->vha_tgt.qla_tgt;
-       cmd->vha = vha;
        INIT_WORK(&cmd->work, qlt_do_work);
        queue_work(qla_tgt_wq, &cmd->work);
        return 0;
@@@ -4114,11 -3607,11 +4194,11 @@@ static void qlt_24xx_atio_pkt(struct sc
        switch (atio->u.raw.entry_type) {
        case ATIO_TYPE7:
                ql_dbg(ql_dbg_tgt, vha, 0xe02d,
 -                  "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
 -                  "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
 +                  "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
                    vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
                    atio->u.isp24.fcp_cmnd.rddata,
                    atio->u.isp24.fcp_cmnd.wrdata,
 +                  atio->u.isp24.fcp_cmnd.cdb[0],
                    atio->u.isp24.fcp_cmnd.add_cdb_len,
                    be32_to_cpu(get_unaligned((uint32_t *)
                        &atio->u.isp24.fcp_cmnd.add_cdb[
@@@ -4216,13 -3709,11 +4296,13 @@@ static void qlt_response_pkt(struct scs
        tgt->irq_cmd_count++;
  
        switch (pkt->entry_type) {
 +      case CTIO_CRC2:
        case CTIO_TYPE7:
        {
                struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
 -              ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
 -                  vha->vp_idx);
 +              ql_dbg(ql_dbg_tgt, vha, 0xe030,
 +                      "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
 +                      entry->entry_type, vha->vp_idx);
                qlt_do_ctio_completion(vha, entry->handle,
                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
                    entry);
@@@ -5357,7 -4848,6 +5437,7 @@@ qlt_24xx_process_response_error(struct 
        case ABTS_RESP_24XX:
        case CTIO_TYPE7:
        case NOTIFY_ACK_TYPE:
 +      case CTIO_CRC2:
                return 1;
        default:
                return 0;
@@@ -5501,23 -4991,13 +5581,13 @@@ int __init qlt_init(void
        if (!QLA_TGT_MODE_ENABLED())
                return 0;
  
-       qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
-           sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
-           NULL);
-       if (!qla_tgt_cmd_cachep) {
-               ql_log(ql_log_fatal, NULL, 0xe06c,
-                   "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
-               return -ENOMEM;
-       }
        qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
            sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
            qla_tgt_mgmt_cmd), 0, NULL);
        if (!qla_tgt_mgmt_cmd_cachep) {
                ql_log(ql_log_fatal, NULL, 0xe06d,
                    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
-               ret = -ENOMEM;
-               goto out;
+               return -ENOMEM;
        }
  
        qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
@@@ -5545,8 -5025,6 +5615,6 @@@ out_cmd_mempool
        mempool_destroy(qla_tgt_mgmt_cmd_mempool);
  out_mgmt_cmd_cachep:
        kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
- out:
-       kmem_cache_destroy(qla_tgt_cmd_cachep);
        return ret;
  }
  
@@@ -5558,5 -5036,4 +5626,4 @@@ void qlt_exit(void
        destroy_workqueue(qla_tgt_wq);
        mempool_destroy(qla_tgt_mgmt_cmd_mempool);
        kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
-       kmem_cache_destroy(qla_tgt_cmd_cachep);
  }
index f873e10451d29758ffd65b87a7b400af301e38c7,63283c58fb3356546f3f4e406409fab1254d47ec..5c9f185a8ebd8384df5d4d4010e27c9574001238
@@@ -293,7 -293,6 +293,7 @@@ struct ctio_to_2xxx 
  #define CTIO_ABORTED                  0x02
  #define CTIO_INVALID_RX_ID            0x08
  #define CTIO_TIMEOUT                  0x0B
 +#define CTIO_DIF_ERROR                        0x0C     /* DIF error detected  */
  #define CTIO_LIP_RESET                        0x0E
  #define CTIO_TARGET_RESET             0x17
  #define CTIO_PORT_UNAVAILABLE         0x28
@@@ -499,12 -498,11 +499,12 @@@ struct ctio7_from_24xx 
  #define CTIO7_FLAGS_DONT_RET_CTIO     BIT_8
  #define CTIO7_FLAGS_STATUS_MODE_0     0
  #define CTIO7_FLAGS_STATUS_MODE_1     BIT_6
 +#define CTIO7_FLAGS_STATUS_MODE_2     BIT_7
  #define CTIO7_FLAGS_EXPLICIT_CONFORM  BIT_5
  #define CTIO7_FLAGS_CONFIRM_SATISF    BIT_4
  #define CTIO7_FLAGS_DSD_PTR           BIT_2
 -#define CTIO7_FLAGS_DATA_IN           BIT_1
 -#define CTIO7_FLAGS_DATA_OUT          BIT_0
 +#define CTIO7_FLAGS_DATA_IN           BIT_1 /* data to initiator */
 +#define CTIO7_FLAGS_DATA_OUT          BIT_0 /* data from initiator */
  
  #define ELS_PLOGI                     0x3
  #define ELS_FLOGI                     0x4
  #define ELS_PDISC                     0x50
  #define ELS_ADISC                     0x52
  
 +/*
 + *CTIO Type CRC_2 IOCB
 + */
 +struct ctio_crc2_to_fw {
 +      uint8_t entry_type;             /* Entry type. */
 +#define CTIO_CRC2 0x7A
 +      uint8_t entry_count;            /* Entry count. */
 +      uint8_t sys_define;             /* System defined. */
 +      uint8_t entry_status;           /* Entry Status. */
 +
 +      uint32_t handle;                /* System handle. */
 +      uint16_t nport_handle;          /* N_PORT handle. */
 +      uint16_t timeout;               /* Command timeout. */
 +
 +      uint16_t dseg_count;            /* Data segment count. */
 +      uint8_t  vp_index;
 +      uint8_t  add_flags;             /* additional flags */
 +#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
 +
 +      uint8_t  initiator_id[3];       /* initiator ID */
 +      uint8_t  reserved1;
 +      uint32_t exchange_addr;         /* rcv exchange address */
 +      uint16_t reserved2;
 +      uint16_t flags;                 /* refer to CTIO7 flags values */
 +      uint32_t residual;
 +      uint16_t ox_id;
 +      uint16_t scsi_status;
 +      uint32_t relative_offset;
 +      uint32_t reserved5;
 +      uint32_t transfer_length;               /* total fc transfer length */
 +      uint32_t reserved6;
 +      uint32_t crc_context_address[2];/* Data segment address. */
 +      uint16_t crc_context_len;       /* Data segment length. */
 +      uint16_t reserved_1;            /* MUST be set to 0. */
 +} __packed;
 +
 +/* CTIO Type CRC_x Status IOCB */
 +struct ctio_crc_from_fw {
 +      uint8_t entry_type;             /* Entry type. */
 +      uint8_t entry_count;            /* Entry count. */
 +      uint8_t sys_define;             /* System defined. */
 +      uint8_t entry_status;           /* Entry Status. */
 +
 +      uint32_t handle;                /* System handle. */
 +      uint16_t status;
 +      uint16_t timeout;               /* Command timeout. */
 +      uint16_t dseg_count;            /* Data segment count. */
 +      uint32_t reserved1;
 +      uint16_t state_flags;
 +#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
 +
 +      uint32_t exchange_address;      /* rcv exchange address */
 +      uint16_t reserved2;
 +      uint16_t flags;
 +      uint32_t resid_xfer_length;
 +      uint16_t ox_id;
 +      uint8_t  reserved3[12];
 +      uint16_t runt_guard;            /* reported runt blk guard */
 +      uint8_t  actual_dif[8];
 +      uint8_t  expected_dif[8];
 +} __packed;
 +
  /*
   * ISP queue - ABTS received/response entries structure definition for 24xx.
   */
@@@ -705,7 -641,6 +705,7 @@@ struct qla_tgt_func_tmpl 
        int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
                        unsigned char *, uint32_t, int, int, int);
        void (*handle_data)(struct qla_tgt_cmd *);
 +      void (*handle_dif_err)(struct qla_tgt_cmd *);
        int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
                        uint32_t);
        void (*free_cmd)(struct qla_tgt_cmd *);
@@@ -870,6 -805,12 +870,12 @@@ struct qla_tgt 
        struct list_head tgt_list_entry;
  };
  
+ struct qla_tgt_sess_op {
+       struct scsi_qla_host *vha;
+       struct atio_from_isp atio;
+       struct work_struct work;
+ };
  /*
   * Equivilant to IT Nexus (Initiator-Target)
   */
@@@ -894,9 -835,9 +900,9 @@@ struct qla_tgt_sess 
  };
  
  struct qla_tgt_cmd {
 +      struct se_cmd se_cmd;
        struct qla_tgt_sess *sess;
        int state;
 -      struct se_cmd se_cmd;
        struct work_struct free_work;
        struct work_struct work;
        /* Sense buffer that will be mapped into outgoing status */
        unsigned int free_sg:1;
        unsigned int aborted:1; /* Needed in case of SRR */
        unsigned int write_data_transferred:1;
 +      unsigned int ctx_dsd_alloced:1;
  
        struct scatterlist *sg; /* cmd data buffer SG vector */
        int sg_cnt;             /* SG segments count */
        struct scsi_qla_host *vha;
  
        struct atio_from_isp atio;
 +      /* t10dif */
 +      struct scatterlist *prot_sg;
 +      uint32_t prot_sg_cnt;
 +      uint32_t blk_sz;
 +      struct crc_context *ctx;
 +
  };
  
  struct qla_tgt_sess_work_param {
@@@ -973,10 -907,6 +979,10 @@@ struct qla_tgt_prm 
        int sense_buffer_len;
        int residual;
        int add_status_pkt;
 +      /* dif */
 +      struct scatterlist *prot_sg;
 +      uint16_t prot_seg_cnt;
 +      uint16_t tot_dsds;
  };
  
  struct qla_tgt_srr_imm {
@@@ -1052,8 -982,6 +1058,8 @@@ extern void qlt_24xx_atio_pkt_all_vps(s
  extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
  extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
  extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
 +extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *);
 +extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);
  extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
  extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
  extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
index 896cb23adb77f2e0fcb32de955a4c1c1ec5c035a,7b3a97026934abcec79f9b31f8edf0a2d0456fb0..e2beab962096cd10d18cf1df63920c79fad216b3
@@@ -472,11 -472,6 +472,11 @@@ static int tcm_qla2xxx_write_pending(st
        cmd->sg_cnt = se_cmd->t_data_nents;
        cmd->sg = se_cmd->t_data_sg;
  
 +      cmd->prot_sg_cnt = se_cmd->t_prot_nents;
 +      cmd->prot_sg = se_cmd->t_prot_sg;
 +      cmd->blk_sz  = se_cmd->se_dev->dev_attrib.block_size;
 +      se_cmd->pi_err = 0;
 +
        /*
         * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
         * the SGL mappings into PCIe memory for incoming FCP WRITE data.
@@@ -572,13 -567,8 +572,13 @@@ static void tcm_qla2xxx_handle_data_wor
                        return;
                }
  
 -              transport_generic_request_failure(&cmd->se_cmd,
 -                                                TCM_CHECK_CONDITION_ABORT_CMD);
 +              if (cmd->se_cmd.pi_err)
 +                      transport_generic_request_failure(&cmd->se_cmd,
 +                              cmd->se_cmd.pi_err);
 +              else
 +                      transport_generic_request_failure(&cmd->se_cmd,
 +                              TCM_CHECK_CONDITION_ABORT_CMD);
 +
                return;
        }
  
@@@ -594,27 -584,6 +594,27 @@@ static void tcm_qla2xxx_handle_data(str
        queue_work(tcm_qla2xxx_free_wq, &cmd->work);
  }
  
 +static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
 +{
 +      struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
 +
 +      /* take an extra kref to prevent cmd free too early.
 +       * need to wait for SCSI status/check condition to
 +       * finish responding generate by transport_generic_request_failure.
 +       */
 +      kref_get(&cmd->se_cmd.cmd_kref);
 +      transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
 +}
 +
 +/*
 + * Called from qla_target.c:qlt_do_ctio_completion()
 + */
 +static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
 +{
 +      INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
 +      queue_work(tcm_qla2xxx_free_wq, &cmd->work);
 +}
 +
  /*
   * Called from qla_target.c:qlt_issue_task_mgmt()
   */
@@@ -641,11 -610,6 +641,11 @@@ static int tcm_qla2xxx_queue_data_in(st
        cmd->sg = se_cmd->t_data_sg;
        cmd->offset = 0;
  
 +      cmd->prot_sg_cnt = se_cmd->t_prot_nents;
 +      cmd->prot_sg = se_cmd->t_prot_sg;
 +      cmd->blk_sz  = se_cmd->se_dev->dev_attrib.block_size;
 +      se_cmd->pi_err = 0;
 +
        /*
         * Now queue completed DATA_IN the qla2xxx LLD and response ring
         */
@@@ -1501,6 -1465,8 +1501,8 @@@ static int tcm_qla2xxx_check_initiator_
        struct qla_tgt_sess *sess = qla_tgt_sess;
        unsigned char port_name[36];
        unsigned long flags;
+       int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
+                      TCM_QLA2XXX_DEFAULT_TAGS;
  
        lport = vha->vha_tgt.target_lport_ptr;
        if (!lport) {
        }
        se_tpg = &tpg->se_tpg;
  
-       se_sess = transport_init_session(TARGET_PROT_NORMAL);
+       se_sess = transport_init_session_tags(num_tags,
+                                             sizeof(struct qla_tgt_cmd),
+                                             TARGET_PROT_NORMAL);
        if (IS_ERR(se_sess)) {
                pr_err("Unable to initialize struct se_session\n");
                return PTR_ERR(se_sess);
@@@ -1636,7 -1604,6 +1640,7 @@@ static void tcm_qla2xxx_update_sess(str
  static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
        .handle_cmd             = tcm_qla2xxx_handle_cmd,
        .handle_data            = tcm_qla2xxx_handle_data,
 +      .handle_dif_err         = tcm_qla2xxx_handle_dif_err,
        .handle_tmr             = tcm_qla2xxx_handle_tmr,
        .free_cmd               = tcm_qla2xxx_free_cmd,
        .free_mcmd              = tcm_qla2xxx_free_mcmd,
index 99fdb94039442b50694e66d53f63cab55a5efdae,1c326b63ca5571d88a80882e254205652299c54b..89ee5929eb6de4060536e89885aba5f13f19577c
@@@ -23,6 -23,7 +23,7 @@@
  #include <linux/virtio_config.h>
  #include <linux/virtio_scsi.h>
  #include <linux/cpu.h>
+ #include <linux/blkdev.h>
  #include <scsi/scsi_host.h>
  #include <scsi/scsi_device.h>
  #include <scsi/scsi_cmnd.h>
@@@ -37,6 -38,7 +38,7 @@@ struct virtio_scsi_cmd 
        struct completion *comp;
        union {
                struct virtio_scsi_cmd_req       cmd;
+               struct virtio_scsi_cmd_req_pi    cmd_pi;
                struct virtio_scsi_ctrl_tmf_req  tmf;
                struct virtio_scsi_ctrl_an_req   an;
        } req;
@@@ -73,12 -75,17 +75,12 @@@ struct virtio_scsi_vq 
   * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
   * (each virtqueue's affinity is set to the CPU that "owns" the queue).
   *
 - * An interesting effect of this policy is that only writes to req_vq need to
 - * take the tgt_lock.  Read can be done outside the lock because:
 + * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq
 + * could be done locklessly, but we do not do it yet.
   *
 - * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1.
 - *   In that case, no other CPU is reading req_vq: even if they were in
 - *   virtscsi_queuecommand_multi, they would be spinning on tgt_lock.
 - *
 - * - reads of req_vq only occur when the target is not idle (reqs != 0).
 - *   A CPU that enters virtscsi_queuecommand_multi will not modify req_vq.
 - *
 - * Similarly, decrements of reqs are never concurrent with writes of req_vq.
 + * Decrements of reqs are never concurrent with writes of req_vq: before the
 + * decrement reqs will be != 0; after the decrement the virtqueue completion
 + * routine will not use the req_vq so it can be changed by a new request.
   * Thus they can happen outside the tgt_lock, provided of course we make reqs
   * an atomic_t.
   */
@@@ -199,6 -206,7 +201,6 @@@ static void virtscsi_complete_cmd(struc
                        set_driver_byte(sc, DRIVER_SENSE);
        }
  
 -      mempool_free(cmd, virtscsi_cmd_pool);
        sc->scsi_done(sc);
  
        atomic_dec(&tgt->reqs);
@@@ -232,6 -240,38 +234,6 @@@ static void virtscsi_req_done(struct vi
        int index = vq->index - VIRTIO_SCSI_VQ_BASE;
        struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
  
 -      /*
 -       * Read req_vq before decrementing the reqs field in
 -       * virtscsi_complete_cmd.
 -       *
 -       * With barriers:
 -       *
 -       *      CPU #0                  virtscsi_queuecommand_multi (CPU #1)
 -       *      ------------------------------------------------------------
 -       *      lock vq_lock
 -       *      read req_vq
 -       *      read reqs (reqs = 1)
 -       *      write reqs (reqs = 0)
 -       *                              increment reqs (reqs = 1)
 -       *                              write req_vq
 -       *
 -       * Possible reordering without barriers:
 -       *
 -       *      CPU #0                  virtscsi_queuecommand_multi (CPU #1)
 -       *      ------------------------------------------------------------
 -       *      lock vq_lock
 -       *      read reqs (reqs = 1)
 -       *      write reqs (reqs = 0)
 -       *                              increment reqs (reqs = 1)
 -       *                              write req_vq
 -       *      read (wrong) req_vq
 -       *
 -       * We do not need a full smp_rmb, because req_vq is required to get
 -       * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored
 -       * in the virtqueue as the user token.
 -       */
 -      smp_read_barrier_depends();
 -
        virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
  };
  
@@@ -241,6 -281,8 +243,6 @@@ static void virtscsi_complete_free(stru
  
        if (cmd->comp)
                complete_all(cmd->comp);
 -      else
 -              mempool_free(cmd, virtscsi_cmd_pool);
  }
  
  static void virtscsi_ctrl_done(struct virtqueue *vq)
@@@ -393,13 -435,14 +395,13 @@@ static void virtscsi_event_done(struct 
   * @cmd               : command structure
   * @req_size  : size of the request buffer
   * @resp_size : size of the response buffer
 - * @gfp       : flags to use for memory allocations
   */
  static int virtscsi_add_cmd(struct virtqueue *vq,
                            struct virtio_scsi_cmd *cmd,
 -                          size_t req_size, size_t resp_size, gfp_t gfp)
 +                          size_t req_size, size_t resp_size)
  {
        struct scsi_cmnd *sc = cmd->sc;
-       struct scatterlist *sgs[4], req, resp;
+       struct scatterlist *sgs[6], req, resp;
        struct sg_table *out, *in;
        unsigned out_num = 0, in_num = 0;
  
        sgs[out_num++] = &req;
  
        /* Data-out buffer.  */
-       if (out)
+       if (out) {
+               /* Place WRITE protection SGLs before Data OUT payload */
+               if (scsi_prot_sg_count(sc))
+                       sgs[out_num++] = scsi_prot_sglist(sc);
                sgs[out_num++] = out->sgl;
+       }
  
        /* Response header.  */
        sg_init_one(&resp, &cmd->resp, resp_size);
        sgs[out_num + in_num++] = &resp;
  
        /* Data-in buffer */
-       if (in)
+       if (in) {
+               /* Place READ protection SGLs before Data IN payload */
+               if (scsi_prot_sg_count(sc))
+                       sgs[out_num + in_num++] = scsi_prot_sglist(sc);
                sgs[out_num + in_num++] = in->sgl;
+       }
  
 -      return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp);
 +      return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);
  }
  
  static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
                             struct virtio_scsi_cmd *cmd,
 -                           size_t req_size, size_t resp_size, gfp_t gfp)
 +                           size_t req_size, size_t resp_size)
  {
        unsigned long flags;
        int err;
        bool needs_kick = false;
  
        spin_lock_irqsave(&vq->vq_lock, flags);
 -      err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp);
 +      err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);
        if (!err)
                needs_kick = virtqueue_kick_prepare(vq->vq);
  
        return err;
  }
  
+ static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd,
+                                struct scsi_cmnd *sc)
+ {
+       cmd->lun[0] = 1;
+       cmd->lun[1] = sc->device->id;
+       cmd->lun[2] = (sc->device->lun >> 8) | 0x40;
+       cmd->lun[3] = sc->device->lun & 0xff;
+       cmd->tag = (unsigned long)sc;
+       cmd->task_attr = VIRTIO_SCSI_S_SIMPLE;
+       cmd->prio = 0;
+       cmd->crn = 0;
+ }
+ static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi,
+                                   struct scsi_cmnd *sc)
+ {
+       struct request *rq = sc->request;
+       struct blk_integrity *bi;
+       virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc);
+       if (!rq || !scsi_prot_sg_count(sc))
+               return;
+       bi = blk_get_integrity(rq->rq_disk);
+       if (sc->sc_data_direction == DMA_TO_DEVICE)
+               cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size;
+       else if (sc->sc_data_direction == DMA_FROM_DEVICE)
+               cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size;
+ }
  static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
                                 struct virtio_scsi_vq *req_vq,
                                 struct scsi_cmnd *sc)
  {
 -      struct virtio_scsi_cmd *cmd;
 -      int ret, req_size;
 -
        struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
 +      struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
++      int req_size;
 +
        BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
  
        /* TODO: check feature bit and fail if unsupported?  */
        dev_dbg(&sc->device->sdev_gendev,
                "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
  
 -      ret = SCSI_MLQUEUE_HOST_BUSY;
 -      cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
 -      if (!cmd)
 -              goto out;
 -
        memset(cmd, 0, sizeof(*cmd));
        cmd->sc = sc;
-       cmd->req.cmd = (struct virtio_scsi_cmd_req){
-               .lun[0] = 1,
-               .lun[1] = sc->device->id,
-               .lun[2] = (sc->device->lun >> 8) | 0x40,
-               .lun[3] = sc->device->lun & 0xff,
-               .tag = (unsigned long)sc,
-               .task_attr = VIRTIO_SCSI_S_SIMPLE,
-               .prio = 0,
-               .crn = 0,
-       };
  
        BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
-       memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
  
-       if (virtscsi_kick_cmd(req_vq, cmd,
-                             sizeof cmd->req.cmd, sizeof cmd->resp.cmd) != 0)
+       if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) {
+               virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc);
+               memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len);
+               req_size = sizeof(cmd->req.cmd_pi);
+       } else {
+               virtio_scsi_init_hdr(&cmd->req.cmd, sc);
+               memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
+               req_size = sizeof(cmd->req.cmd);
+       }
 -      if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd),
 -                            GFP_ATOMIC) == 0)
 -              ret = 0;
 -      else
 -              mempool_free(cmd, virtscsi_cmd_pool);
 -
 -out:
 -      return ret;
++      if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
 +              return SCSI_MLQUEUE_HOST_BUSY;
 +      return 0;
  }
  
  static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
@@@ -508,8 -600,12 +549,8 @@@ static struct virtio_scsi_vq *virtscsi_
  
        spin_lock_irqsave(&tgt->tgt_lock, flags);
  
 -      /*
 -       * The memory barrier after atomic_inc_return matches
 -       * the smp_read_barrier_depends() in virtscsi_req_done.
 -       */
        if (atomic_inc_return(&tgt->reqs) > 1)
 -              vq = ACCESS_ONCE(tgt->req_vq);
 +              vq = tgt->req_vq;
        else {
                queue_num = smp_processor_id();
                while (unlikely(queue_num >= vscsi->num_queues))
@@@ -540,7 -636,8 +581,7 @@@ static int virtscsi_tmf(struct virtio_s
  
        cmd->comp = &comp;
        if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
 -                            sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
 -                            GFP_NOIO) < 0)
 +                            sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)
                goto out;
  
        wait_for_completion(&comp);
@@@ -626,7 -723,6 +667,7 @@@ static struct scsi_host_template virtsc
        .name = "Virtio SCSI HBA",
        .proc_name = "virtio_scsi",
        .this_id = -1,
 +      .cmd_size = sizeof(struct virtio_scsi_cmd),
        .queuecommand = virtscsi_queuecommand_single,
        .eh_abort_handler = virtscsi_abort,
        .eh_device_reset_handler = virtscsi_device_reset,
@@@ -643,7 -739,6 +684,7 @@@ static struct scsi_host_template virtsc
        .name = "Virtio SCSI HBA",
        .proc_name = "virtio_scsi",
        .this_id = -1,
 +      .cmd_size = sizeof(struct virtio_scsi_cmd),
        .queuecommand = virtscsi_queuecommand_multi,
        .eh_abort_handler = virtscsi_abort,
        .eh_device_reset_handler = virtscsi_device_reset,
@@@ -695,12 -790,8 +736,12 @@@ static void __virtscsi_set_affinity(str
  
                vscsi->affinity_hint_set = true;
        } else {
 -              for (i = 0; i < vscsi->num_queues; i++)
 +              for (i = 0; i < vscsi->num_queues; i++) {
 +                      if (!vscsi->req_vqs[i].vq)
 +                              continue;
 +
                        virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
 +              }
  
                vscsi->affinity_hint_set = false;
        }
@@@ -820,7 -911,7 +861,7 @@@ static int virtscsi_probe(struct virtio
  {
        struct Scsi_Host *shost;
        struct virtio_scsi *vscsi;
-       int err;
+       int err, host_prot;
        u32 sg_elems, num_targets;
        u32 cmd_per_lun;
        u32 num_queues;
        shost->max_id = num_targets;
        shost->max_channel = 0;
        shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
+       if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
+               host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
+                           SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
+                           SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
+               scsi_host_set_prot(shost, host_prot);
+               scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+       }
        err = scsi_add_host(shost, &vdev->dev);
        if (err)
                goto scsi_add_host_failed;
@@@ -939,6 -1040,7 +990,7 @@@ static struct virtio_device_id id_table
  static unsigned int features[] = {
        VIRTIO_SCSI_F_HOTPLUG,
        VIRTIO_SCSI_F_CHANGE,
+       VIRTIO_SCSI_F_T10_PI,
  };
  
  static struct virtio_driver virtio_scsi_driver = {
index 9189bc0a87aef18df10bf2c0003d2c1e4c0bc65a,b87721a01b7464a17ca44f9212489cd6e46dc8ac..5663f4d19d028120ef51efb689dbff39d47e9e4c
@@@ -300,7 -300,7 +300,7 @@@ bool iscsit_check_np_match
                port = ntohs(sock_in->sin_port);
        }
  
-       if ((ip_match == true) && (np->np_port == port) &&
+       if (ip_match && (np->np_port == port) &&
            (np->np_network_transport == network_transport))
                return true;
  
@@@ -325,7 -325,7 +325,7 @@@ static struct iscsi_np *iscsit_get_np
                }
  
                match = iscsit_check_np_match(sockaddr, np, network_transport);
-               if (match == true) {
+               if (match) {
                        /*
                         * Increment the np_exports reference count now to
                         * prevent iscsit_del_np() below from being called
@@@ -460,7 -460,6 +460,7 @@@ int iscsit_del_np(struct iscsi_np *np
        spin_lock_bh(&np->np_thread_lock);
        np->np_exports--;
        if (np->np_exports) {
 +              np->enabled = true;
                spin_unlock_bh(&np->np_thread_lock);
                return 0;
        }
@@@ -1121,7 -1120,7 +1121,7 @@@ iscsit_get_immediate_data(struct iscsi_
        /*
         * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
         */
-       if (dump_payload == true)
+       if (dump_payload)
                goto after_immediate_data;
  
        immed_ret = iscsit_handle_immediate_data(cmd, hdr,
@@@ -3390,7 -3389,9 +3390,9 @@@ static bool iscsit_check_inaddr_any(str
  
  #define SENDTARGETS_BUF_LIMIT 32768U
  
- static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
+ static int
+ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
+                                 enum iscsit_transport_type network_transport)
  {
        char *payload = NULL;
        struct iscsi_conn *conn = cmd->conn;
                                struct iscsi_np *np = tpg_np->tpg_np;
                                bool inaddr_any = iscsit_check_inaddr_any(np);
  
+                               if (np->np_network_transport != network_transport)
+                                       continue;
                                if (!target_name_printed) {
                                        len = sprintf(buf, "TargetName=%s",
                                                      tiqn->tiqn);
  
                                len = sprintf(buf, "TargetAddress="
                                        "%s:%hu,%hu",
-                                       (inaddr_any == false) ?
-                                               np->np_ip : conn->local_ip,
-                                       (inaddr_any == false) ?
-                                               np->np_port : conn->local_port,
+                                       inaddr_any ? conn->local_ip : np->np_ip,
+                                       inaddr_any ? conn->local_port : np->np_port,
                                        tpg->tpgt);
                                len += 1;
  
  
  int
  iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
-                     struct iscsi_text_rsp *hdr)
+                     struct iscsi_text_rsp *hdr,
+                     enum iscsit_transport_type network_transport)
  {
        int text_length, padding;
  
-       text_length = iscsit_build_sendtargets_response(cmd);
+       text_length = iscsit_build_sendtargets_response(cmd, network_transport);
        if (text_length < 0)
                return text_length;
  
@@@ -3562,7 -3565,7 +3566,7 @@@ static int iscsit_send_text_rsp
        u32 tx_size = 0;
        int text_length, iov_count = 0, rc;
  
-       rc = iscsit_build_text_rsp(cmd, conn, hdr);
+       rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
        if (rc < 0)
                return rc;
  
@@@ -4234,8 -4237,6 +4238,6 @@@ int iscsit_close_connection
        if (conn->conn_transport->iscsit_wait_conn)
                conn->conn_transport->iscsit_wait_conn(conn);
  
-       iscsit_free_queue_reqs_for_conn(conn);
        /*
         * During Connection recovery drop unacknowledged out of order
         * commands for this connection, and prepare the other commands
                iscsit_clear_ooo_cmdsns_for_conn(conn);
                iscsit_release_commands_from_conn(conn);
        }
+       iscsit_free_queue_reqs_for_conn(conn);
  
        /*
         * Handle decrementing session or connection usage count if
index d9b1d88e1ad382f07ff8d64c5374b3caaac3a3ce,61519b9ff1e60b25475d54162f3812d14a61434f..fecb69535a1583abe6f70f663f1ecd18142c555f
@@@ -249,28 -249,6 +249,28 @@@ static void iscsi_login_set_conn_values
        mutex_unlock(&auth_id_lock);
  }
  
 +static __printf(2, 3) int iscsi_change_param_sprintf(
 +      struct iscsi_conn *conn,
 +      const char *fmt, ...)
 +{
 +      va_list args;
 +      unsigned char buf[64];
 +
 +      memset(buf, 0, sizeof buf);
 +
 +      va_start(args, fmt);
 +      vsnprintf(buf, sizeof buf, fmt, args);
 +      va_end(args);
 +
 +      if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
 +              iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 +                              ISCSI_LOGIN_STATUS_NO_RESOURCES);
 +              return -1;
 +      }
 +
 +      return 0;
 +}
 +
  /*
   *    This is the leading connection of a new session,
   *    or session reinstatement.
@@@ -361,6 -339,7 +361,6 @@@ static int iscsi_login_zero_tsih_s2
  {
        struct iscsi_node_attrib *na;
        struct iscsi_session *sess = conn->sess;
 -      unsigned char buf[32];
        bool iser = false;
  
        sess->tpg = conn->tpg;
         *
         * In our case, we have already located the struct iscsi_tiqn at this point.
         */
 -      memset(buf, 0, 32);
 -      sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
 -      if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
 -              iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 -                              ISCSI_LOGIN_STATUS_NO_RESOURCES);
 +      if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
                return -1;
 -      }
  
        /*
         * Workaround for Initiators that have broken connection recovery logic.
         *
         * "We would really like to get rid of this." Linux-iSCSI.org team
         */
 -      memset(buf, 0, 32);
 -      sprintf(buf, "ErrorRecoveryLevel=%d", na->default_erl);
 -      if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
 -              iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 -                              ISCSI_LOGIN_STATUS_NO_RESOURCES);
 +      if (iscsi_change_param_sprintf(conn, "ErrorRecoveryLevel=%d", na->default_erl))
                return -1;
 -      }
  
        if (iscsi_login_disable_FIM_keys(conn->param_list, conn) < 0)
                return -1;
                unsigned long mrdsl, off;
                int rc;
  
 -              sprintf(buf, "RDMAExtensions=Yes");
 -              if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
 -                      iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 -                              ISCSI_LOGIN_STATUS_NO_RESOURCES);
 +              if (iscsi_change_param_sprintf(conn, "RDMAExtensions=Yes"))
                        return -1;
 -              }
 +
                /*
                 * Make MaxRecvDataSegmentLength PAGE_SIZE aligned for
                 * Immediate Data + Unsolicitied Data-OUT if necessary..
                pr_warn("Aligning ISER MaxRecvDataSegmentLength: %lu down"
                        " to PAGE_SIZE\n", mrdsl);
  
 -              sprintf(buf, "MaxRecvDataSegmentLength=%lu\n", mrdsl);
 -              if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
 -                      iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 -                              ISCSI_LOGIN_STATUS_NO_RESOURCES);
 +              if (iscsi_change_param_sprintf(conn, "MaxRecvDataSegmentLength=%lu\n", mrdsl))
                        return -1;
 -              }
                /*
                 * ISER currently requires that ImmediateData + Unsolicited
                 * Data be disabled when protection / signature MRs are enabled.
@@@ -465,12 -461,19 +465,12 @@@ check_prot
                   (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS |
                    TARGET_PROT_DOUT_INSERT)) {
  
 -                      sprintf(buf, "ImmediateData=No");
 -                      if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
 -                              iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 -                                                  ISCSI_LOGIN_STATUS_NO_RESOURCES);
 +                      if (iscsi_change_param_sprintf(conn, "ImmediateData=No"))
                                return -1;
 -                      }
  
 -                      sprintf(buf, "InitialR2T=Yes");
 -                      if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
 -                              iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 -                                                  ISCSI_LOGIN_STATUS_NO_RESOURCES);
 +                      if (iscsi_change_param_sprintf(conn, "InitialR2T=Yes"))
                                return -1;
 -                      }
 +
                        pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for"
                                 " T10-PI enabled ISER session\n");
                }
@@@ -615,8 -618,13 +615,8 @@@ static int iscsi_login_non_zero_tsih_s2
         *
         * In our case, we have already located the struct iscsi_tiqn at this point.
         */
 -      memset(buf, 0, 32);
 -      sprintf(buf, "TargetPortalGroupTag=%hu", sess->tpg->tpgt);
 -      if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
 -              iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
 -                              ISCSI_LOGIN_STATUS_NO_RESOURCES);
 +      if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
                return -1;
 -      }
  
        return iscsi_login_disable_FIM_keys(conn->param_list, conn);
  }
@@@ -1145,7 -1153,7 +1145,7 @@@ iscsit_conn_set_transport(struct iscsi_
  void iscsi_target_login_sess_out(struct iscsi_conn *conn,
                struct iscsi_np *np, bool zero_tsih, bool new_sess)
  {
-       if (new_sess == false)
+       if (!new_sess)
                goto old_sess_out;
  
        pr_err("iSCSI Login negotiation failed.\n");
index 1431e8400d28b41c4ca21fc0563a8e3df1b7828a,2d4cb24bb1a02dc790b851d2c45d2c62540bcdb1..c3cb5c15efdaa4fe1e5ea2c4a90cd6eb0e167a15
@@@ -184,12 -184,11 +184,12 @@@ static void iscsit_clear_tpg_np_login_t
                return;
        }
  
 -      tpg_np->tpg_np->enabled = false;
 +      if (shutdown)
 +              tpg_np->tpg_np->enabled = false;
        iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown);
  }
  
- void iscsit_clear_tpg_np_login_threads(
static void iscsit_clear_tpg_np_login_threads(
        struct iscsi_portal_group *tpg,
        bool shutdown)
  {
@@@ -276,8 -275,6 +276,6 @@@ int iscsit_tpg_del_portal_group
        tpg->tpg_state = TPG_STATE_INACTIVE;
        spin_unlock(&tpg->tpg_state_lock);
  
-       iscsit_clear_tpg_np_login_threads(tpg, true);
        if (iscsit_release_sessions_for_tpg(tpg, force) < 0) {
                pr_err("Unable to delete iSCSI Target Portal Group:"
                        " %hu while active sessions exist, and force=0\n",
@@@ -453,7 -450,7 +451,7 @@@ static bool iscsit_tpg_check_network_po
  
                        match = iscsit_check_np_match(sockaddr, np,
                                                network_transport);
-                       if (match == true)
+                       if (match)
                                break;
                }
                spin_unlock(&tpg->tpg_np_lock);
@@@ -475,7 -472,7 +473,7 @@@ struct iscsi_tpg_np *iscsit_tpg_add_net
  
        if (!tpg_np_parent) {
                if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
-                               network_transport) == true) {
+                               network_transport)) {
                        pr_err("Network Portal: %s already exists on a"
                                " different TPG on %s\n", ip_str,
                                tpg->tpg_tiqn->tiqn);
index 73ab75ddaf42e9b3fc8e378cf3ae3669d1e040e8,1f4c015e90786937423e7c86bd3638c013f6dacd..6d2f37578b29cc0509d3898b7910bb35cd9d484c
@@@ -179,7 -179,7 +179,7 @@@ static void tcm_loop_submission_work(st
        struct tcm_loop_hba *tl_hba;
        struct tcm_loop_tpg *tl_tpg;
        struct scatterlist *sgl_bidi = NULL;
-       u32 sgl_bidi_count = 0;
+       u32 sgl_bidi_count = 0, transfer_length;
        int rc;
  
        tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  
        }
  
-       if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
+       transfer_length = scsi_transfer_length(sc);
+       if (!scsi_prot_sg_count(sc) &&
+           scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
                se_cmd->prot_pto = true;
+               /*
+                * loopback transport doesn't support
+                * WRITE_GENERATE, READ_STRIP protection
+                * information operations, go ahead unprotected.
+                */
+               transfer_length = scsi_bufflen(sc);
+       }
  
        rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
                        &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
-                       scsi_bufflen(sc), tcm_loop_sam_attr(sc),
+                       transfer_length, tcm_loop_sam_attr(sc),
                        sc->sc_data_direction, 0,
                        scsi_sglist(sc), scsi_sg_count(sc),
                        sgl_bidi, sgl_bidi_count,
@@@ -951,7 -960,7 +960,7 @@@ static int tcm_loop_port_link
        struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  
        atomic_inc(&tl_tpg->tl_tpg_port_count);
 -      smp_mb__after_atomic_inc();
 +      smp_mb__after_atomic();
        /*
         * Add Linux/SCSI struct scsi_device by HCTL
         */
@@@ -986,7 -995,7 +995,7 @@@ static void tcm_loop_port_unlink
        scsi_device_put(sd);
  
        atomic_dec(&tl_tpg->tl_tpg_port_count);
 -      smp_mb__after_atomic_dec();
 +      smp_mb__after_atomic();
  
        pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
  }
index 2179feed0d63aa83017dff321ddc86c11e8432d4,4ef11145c746d64651b0a17624879575f8178b9e..7fa62fc93e0b52d70ac49c67f5455a139935f55c
@@@ -504,7 -504,7 +504,7 @@@ void transport_deregister_session(struc
         * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
         * removal context.
         */
-       if (se_nacl && comp_nacl == true)
+       if (se_nacl && comp_nacl)
                target_put_nacl(se_nacl);
  
        transport_free_session(se_sess);
@@@ -562,7 -562,7 +562,7 @@@ static int transport_cmd_check_stop(str
  
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
  
-               complete(&cmd->t_transport_stop_comp);
+               complete_all(&cmd->t_transport_stop_comp);
                return 1;
        }
  
@@@ -687,7 -687,7 +687,7 @@@ void target_complete_cmd(struct se_cmd 
        if (cmd->transport_state & CMD_T_ABORTED &&
            cmd->transport_state & CMD_T_STOP) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               complete(&cmd->t_transport_stop_comp);
+               complete_all(&cmd->t_transport_stop_comp);
                return;
        } else if (!success) {
                INIT_WORK(&cmd->work, target_complete_failure_work);
  }
  EXPORT_SYMBOL(target_complete_cmd);
  
+ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
+ {
+       if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+               if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+                       cmd->residual_count += cmd->data_length - length;
+               } else {
+                       cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+                       cmd->residual_count = cmd->data_length - length;
+               }
+               cmd->data_length = length;
+       }
+       target_complete_cmd(cmd, scsi_status);
+ }
+ EXPORT_SYMBOL(target_complete_cmd_with_length);
  static void target_add_to_state_list(struct se_cmd *cmd)
  {
        struct se_device *dev = cmd->se_dev;
@@@ -736,7 -753,7 +753,7 @@@ void target_qf_do_work(struct work_stru
        list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
                list_del(&cmd->se_qf_node);
                atomic_dec(&dev->dev_qf_count);
 -              smp_mb__after_atomic_dec();
 +              smp_mb__after_atomic();
  
                pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
                        " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@@ -1149,7 -1166,7 +1166,7 @@@ transport_check_alloc_task_attr(struct 
         * Dormant to Active status.
         */
        cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
 -      smp_mb__after_atomic_inc();
 +      smp_mb__after_atomic();
        pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
                        cmd->se_ordered_id, cmd->sam_task_attr,
                        dev->transport->name);
@@@ -1706,7 -1723,7 +1723,7 @@@ static bool target_handle_task_attr(str
                return false;
        case MSG_ORDERED_TAG:
                atomic_inc(&dev->dev_ordered_sync);
 -              smp_mb__after_atomic_inc();
 +              smp_mb__after_atomic();
  
                pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
                         " se_ordered_id: %u\n",
                 * For SIMPLE and UNTAGGED Task Attribute commands
                 */
                atomic_inc(&dev->simple_cmds);
 -              smp_mb__after_atomic_inc();
 +              smp_mb__after_atomic();
                break;
        }
  
@@@ -1761,7 -1778,7 +1778,7 @@@ void target_execute_cmd(struct se_cmd *
                        cmd->se_tfo->get_task_tag(cmd));
  
                spin_unlock_irq(&cmd->t_state_lock);
-               complete(&cmd->t_transport_stop_comp);
+               complete_all(&cmd->t_transport_stop_comp);
                return;
        }
  
@@@ -1829,7 -1846,7 +1846,7 @@@ static void transport_complete_task_att
  
        if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
                atomic_dec(&dev->simple_cmds);
 -              smp_mb__after_atomic_dec();
 +              smp_mb__after_atomic();
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
                        " SIMPLE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
                atomic_dec(&dev->dev_ordered_sync);
 -              smp_mb__after_atomic_dec();
 +              smp_mb__after_atomic();
  
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@@ -1900,7 -1917,7 +1917,7 @@@ static void transport_handle_queue_full
        spin_lock_irq(&dev->qf_cmd_lock);
        list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
        atomic_inc(&dev->dev_qf_count);
 -      smp_mb__after_atomic_inc();
 +      smp_mb__after_atomic();
        spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
  
        schedule_work(&cmd->se_dev->qf_work_queue);
@@@ -2363,7 -2380,7 +2380,7 @@@ int target_get_sess_cmd(struct se_sessi
         * fabric acknowledgement that requires two target_put_sess_cmd()
         * invocations before se_cmd descriptor release.
         */
-       if (ack_kref == true) {
+       if (ack_kref) {
                kref_get(&se_cmd->cmd_kref);
                se_cmd->se_cmd_flags |= SCF_ACK_KREF;
        }
@@@ -2407,6 -2424,10 +2424,10 @@@ static void target_release_cmd_kref(str
   */
  int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
  {
+       if (!se_sess) {
+               se_cmd->se_tfo->release_cmd(se_cmd);
+               return 1;
+       }
        return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
                        &se_sess->sess_cmd_lock);
  }
@@@ -2875,7 -2896,7 +2896,7 @@@ void transport_send_task_abort(struct s
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
                        cmd->transport_state |= CMD_T_ABORTED;
                        cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
 -                      smp_mb__after_atomic_inc();
 +                      smp_mb__after_atomic();
                        return;
                }
        }
@@@ -2934,6 -2955,12 +2955,12 @@@ static void target_tmr_work(struct work
  int transport_generic_handle_tmr(
        struct se_cmd *cmd)
  {
+       unsigned long flags;
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       cmd->transport_state |= CMD_T_ACTIVE;
+       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
        INIT_WORK(&cmd->work, target_tmr_work);
        queue_work(cmd->se_dev->tmr_wq, &cmd->work);
        return 0;
diff --combined drivers/vhost/scsi.c
index e9c280f55819fae38704cc43dfad33be6c8c40cf,03e484fa1ef4eca9109b9a49d2d472551a05b455..4f4ffa4c604e081755a3b77dba0fccb24c6ded7d
@@@ -57,7 -57,8 +57,8 @@@
  #define TCM_VHOST_MAX_CDB_SIZE 32
  #define TCM_VHOST_DEFAULT_TAGS 256
  #define TCM_VHOST_PREALLOC_SGLS 2048
- #define TCM_VHOST_PREALLOC_PAGES 2048
+ #define TCM_VHOST_PREALLOC_UPAGES 2048
+ #define TCM_VHOST_PREALLOC_PROT_SGLS 512
  
  struct vhost_scsi_inflight {
        /* Wait for the flush operation to finish */
@@@ -79,10 -80,12 +80,12 @@@ struct tcm_vhost_cmd 
        u64 tvc_tag;
        /* The number of scatterlists associated with this cmd */
        u32 tvc_sgl_count;
+       u32 tvc_prot_sgl_count;
        /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
        u32 tvc_lun;
        /* Pointer to the SGL formatted memory from virtio-scsi */
        struct scatterlist *tvc_sgl;
+       struct scatterlist *tvc_prot_sgl;
        struct page **tvc_upages;
        /* Pointer to response */
        struct virtio_scsi_cmd_resp __user *tvc_resp;
@@@ -166,7 -169,8 +169,8 @@@ enum 
  };
  
  enum {
-       VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG)
+       VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
+                                              (1ULL << VIRTIO_SCSI_F_T10_PI)
  };
  
  #define VHOST_SCSI_MAX_TARGET 256
@@@ -456,12 -460,16 +460,16 @@@ static void tcm_vhost_release_cmd(struc
        struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
                                struct tcm_vhost_cmd, tvc_se_cmd);
        struct se_session *se_sess = se_cmd->se_sess;
+       int i;
  
        if (tv_cmd->tvc_sgl_count) {
-               u32 i;
                for (i = 0; i < tv_cmd->tvc_sgl_count; i++)
                        put_page(sg_page(&tv_cmd->tvc_sgl[i]));
        }
+       if (tv_cmd->tvc_prot_sgl_count) {
+               for (i = 0; i < tv_cmd->tvc_prot_sgl_count; i++)
+                       put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
+       }
  
        tcm_vhost_put_inflight(tv_cmd->inflight);
        percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
@@@ -606,7 -614,7 +614,7 @@@ tcm_vhost_do_evt_work(struct vhost_scs
  
  again:
        vhost_disable_notify(&vs->dev, vq);
 -      head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
 +      head = vhost_get_vq_desc(vq, vq->iov,
                        ARRAY_SIZE(vq->iov), &out, &in,
                        NULL, NULL);
        if (head < 0) {
@@@ -713,16 -721,14 +721,14 @@@ static void vhost_scsi_complete_cmd_wor
  }
  
  static struct tcm_vhost_cmd *
- vhost_scsi_get_tag(struct vhost_virtqueue *vq,
-                       struct tcm_vhost_tpg *tpg,
-                       struct virtio_scsi_cmd_req *v_req,
-                       u32 exp_data_len,
-                       int data_direction)
+ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
+                  unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
+                  u32 exp_data_len, int data_direction)
  {
        struct tcm_vhost_cmd *cmd;
        struct tcm_vhost_nexus *tv_nexus;
        struct se_session *se_sess;
-       struct scatterlist *sg;
+       struct scatterlist *sg, *prot_sg;
        struct page **pages;
        int tag;
  
  
        cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
        sg = cmd->tvc_sgl;
+       prot_sg = cmd->tvc_prot_sgl;
        pages = cmd->tvc_upages;
        memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
  
        cmd->tvc_sgl = sg;
+       cmd->tvc_prot_sgl = prot_sg;
        cmd->tvc_upages = pages;
        cmd->tvc_se_cmd.map_tag = tag;
-       cmd->tvc_tag = v_req->tag;
-       cmd->tvc_task_attr = v_req->task_attr;
+       cmd->tvc_tag = scsi_tag;
+       cmd->tvc_lun = lun;
+       cmd->tvc_task_attr = task_attr;
        cmd->tvc_exp_data_len = exp_data_len;
        cmd->tvc_data_direction = data_direction;
        cmd->tvc_nexus = tv_nexus;
        cmd->inflight = tcm_vhost_get_inflight(vq);
  
+       memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
        return cmd;
  }
  
@@@ -767,35 -778,28 +778,28 @@@ vhost_scsi_map_to_sgl(struct tcm_vhost_
                      struct scatterlist *sgl,
                      unsigned int sgl_count,
                      struct iovec *iov,
-                     int write)
+                     struct page **pages,
+                     bool write)
  {
        unsigned int npages = 0, pages_nr, offset, nbytes;
        struct scatterlist *sg = sgl;
        void __user *ptr = iov->iov_base;
        size_t len = iov->iov_len;
-       struct page **pages;
        int ret, i;
  
-       if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
-               pr_err("vhost_scsi_map_to_sgl() psgl_count: %u greater than"
-                      " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
-                       sgl_count, TCM_VHOST_PREALLOC_SGLS);
-               return -ENOBUFS;
-       }
        pages_nr = iov_num_pages(iov);
-       if (pages_nr > sgl_count)
+       if (pages_nr > sgl_count) {
+               pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
+                      " sgl_count: %u\n", pages_nr, sgl_count);
                return -ENOBUFS;
-       if (pages_nr > TCM_VHOST_PREALLOC_PAGES) {
+       }
+       if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
                pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
-                      " preallocated TCM_VHOST_PREALLOC_PAGES: %u\n",
-                       pages_nr, TCM_VHOST_PREALLOC_PAGES);
+                      " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
+                       pages_nr, TCM_VHOST_PREALLOC_UPAGES);
                return -ENOBUFS;
        }
  
-       pages = tv_cmd->tvc_upages;
        ret = get_user_pages_fast((unsigned long)ptr, pages_nr, write, pages);
        /* No pages were pinned */
        if (ret < 0)
@@@ -825,33 -829,32 +829,32 @@@ out
  static int
  vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
                          struct iovec *iov,
-                         unsigned int niov,
-                         int write)
+                         int niov,
+                         bool write)
  {
-       int ret;
-       unsigned int i;
-       u32 sgl_count;
-       struct scatterlist *sg;
+       struct scatterlist *sg = cmd->tvc_sgl;
+       unsigned int sgl_count = 0;
+       int ret, i;
  
-       /*
-        * Find out how long sglist needs to be
-        */
-       sgl_count = 0;
        for (i = 0; i < niov; i++)
                sgl_count += iov_num_pages(&iov[i]);
  
-       /* TODO overflow checking */
+       if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
+               pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
+                       " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
+                       sgl_count, TCM_VHOST_PREALLOC_SGLS);
+               return -ENOBUFS;
+       }
  
-       sg = cmd->tvc_sgl;
        pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
        sg_init_table(sg, sgl_count);
        cmd->tvc_sgl_count = sgl_count;
  
-       pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count);
+       pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
        for (i = 0; i < niov; i++) {
                ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
-                                           write);
+                                           cmd->tvc_upages, write);
                if (ret < 0) {
                        for (i = 0; i < cmd->tvc_sgl_count; i++)
                                put_page(sg_page(&cmd->tvc_sgl[i]));
                        cmd->tvc_sgl_count = 0;
                        return ret;
                }
                sg += ret;
                sgl_count -= ret;
        }
        return 0;
  }
  
+ static int
+ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
+                          struct iovec *iov,
+                          int niov,
+                          bool write)
+ {
+       struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
+       unsigned int prot_sgl_count = 0;
+       int ret, i;
+       for (i = 0; i < niov; i++)
+               prot_sgl_count += iov_num_pages(&iov[i]);
+       if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
+               pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
+                       " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
+                       prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
+               return -ENOBUFS;
+       }
+       pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
+                prot_sg, prot_sgl_count);
+       sg_init_table(prot_sg, prot_sgl_count);
+       cmd->tvc_prot_sgl_count = prot_sgl_count;
+       for (i = 0; i < niov; i++) {
+               ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
+                                           cmd->tvc_upages, write);
+               if (ret < 0) {
+                       for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
+                               put_page(sg_page(&cmd->tvc_prot_sgl[i]));
+                       cmd->tvc_prot_sgl_count = 0;
+                       return ret;
+               }
+               prot_sg += ret;
+               prot_sgl_count -= ret;
+       }
+       return 0;
+ }
  static void tcm_vhost_submission_work(struct work_struct *work)
  {
        struct tcm_vhost_cmd *cmd =
                container_of(work, struct tcm_vhost_cmd, work);
        struct tcm_vhost_nexus *tv_nexus;
        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
-       struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL;
-       int rc, sg_no_bidi = 0;
+       struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
+       int rc;
  
+       /* FIXME: BIDI operation */
        if (cmd->tvc_sgl_count) {
                sg_ptr = cmd->tvc_sgl;
- /* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
- #if 0
-               if (se_cmd->se_cmd_flags & SCF_BIDI) {
-                       sg_bidi_ptr = NULL;
-                       sg_no_bidi = 0;
-               }
- #endif
+               if (cmd->tvc_prot_sgl_count)
+                       sg_prot_ptr = cmd->tvc_prot_sgl;
+               else
+                       se_cmd->prot_pto = true;
        } else {
                sg_ptr = NULL;
        }
                        cmd->tvc_lun, cmd->tvc_exp_data_len,
                        cmd->tvc_task_attr, cmd->tvc_data_direction,
                        TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
-                       sg_bidi_ptr, sg_no_bidi, NULL, 0);
+                       NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@@ -926,12 -968,18 +968,18 @@@ vhost_scsi_handle_vq(struct vhost_scsi 
  {
        struct tcm_vhost_tpg **vs_tpg;
        struct virtio_scsi_cmd_req v_req;
+       struct virtio_scsi_cmd_req_pi v_req_pi;
        struct tcm_vhost_tpg *tpg;
        struct tcm_vhost_cmd *cmd;
-       u32 exp_data_len, data_first, data_num, data_direction;
+       u64 tag;
+       u32 exp_data_len, data_first, data_num, data_direction, prot_first;
        unsigned out, in, i;
-       int head, ret;
-       u8 target;
+       int head, ret, data_niov, prot_niov, prot_bytes;
+       size_t req_size;
+       u16 lun;
+       u8 *target, *lunp, task_attr;
+       bool hdr_pi;
+       void *req, *cdb;
  
        mutex_lock(&vq->mutex);
        /*
        vhost_disable_notify(&vs->dev, vq);
  
        for (;;) {
 -              head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
 +              head = vhost_get_vq_desc(vq, vq->iov,
                                        ARRAY_SIZE(vq->iov), &out, &in,
                                        NULL, NULL);
                pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
                        break;
                }
  
- /* FIXME: BIDI operation */
              /* FIXME: BIDI operation */
                if (out == 1 && in == 1) {
                        data_direction = DMA_NONE;
                        data_first = 0;
                        break;
                }
  
-               if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) {
-                       vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu"
-                               " bytes\n", vq->iov[0].iov_len);
 -              if (vs->dev.acked_features & VIRTIO_SCSI_F_T10_PI) {
++              if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
+                       req = &v_req_pi;
+                       lunp = &v_req_pi.lun[0];
+                       target = &v_req_pi.lun[1];
+                       req_size = sizeof(v_req_pi);
+                       hdr_pi = true;
+               } else {
+                       req = &v_req;
+                       lunp = &v_req.lun[0];
+                       target = &v_req.lun[1];
+                       req_size = sizeof(v_req);
+                       hdr_pi = false;
+               }
+               if (unlikely(vq->iov[0].iov_len < req_size)) {
+                       pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
+                              req_size, vq->iov[0].iov_len);
                        break;
                }
-               pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p,"
-                       " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req));
-               ret = __copy_from_user(&v_req, vq->iov[0].iov_base,
-                               sizeof(v_req));
+               ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size);
                if (unlikely(ret)) {
                        vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
                        break;
                }
  
                /* virtio-scsi spec requires byte 0 of the lun to be 1 */
-               if (unlikely(v_req.lun[0] != 1)) {
+               if (unlikely(*lunp != 1)) {
                        vhost_scsi_send_bad_target(vs, vq, head, out);
                        continue;
                }
  
-               /* Extract the tpgt */
-               target = v_req.lun[1];
-               tpg = ACCESS_ONCE(vs_tpg[target]);
+               tpg = ACCESS_ONCE(vs_tpg[*target]);
  
                /* Target does not exist, fail the request */
                if (unlikely(!tpg)) {
                        continue;
                }
  
+               data_niov = data_num;
+               prot_niov = prot_first = prot_bytes = 0;
+               /*
+                * Determine if any protection information iovecs are preceeding
+                * the actual data payload, and adjust data_first + data_niov
+                * values accordingly for vhost_scsi_map_iov_to_sgl() below.
+                *
+                * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
+                */
+               if (hdr_pi) {
+                       if (v_req_pi.pi_bytesout) {
+                               if (data_direction != DMA_TO_DEVICE) {
+                                       vq_err(vq, "Received non zero do_pi_niov"
+                                               ", but wrong data_direction\n");
+                                       goto err_cmd;
+                               }
+                               prot_bytes = v_req_pi.pi_bytesout;
+                       } else if (v_req_pi.pi_bytesin) {
+                               if (data_direction != DMA_FROM_DEVICE) {
+                                       vq_err(vq, "Received non zero di_pi_niov"
+                                               ", but wrong data_direction\n");
+                                       goto err_cmd;
+                               }
+                               prot_bytes = v_req_pi.pi_bytesin;
+                       }
+                       if (prot_bytes) {
+                               int tmp = 0;
+                               for (i = 0; i < data_num; i++) {
+                                       tmp += vq->iov[data_first + i].iov_len;
+                                       prot_niov++;
+                                       if (tmp >= prot_bytes)
+                                               break;
+                               }
+                               prot_first = data_first;
+                               data_first += prot_niov;
+                               data_niov = data_num - prot_niov;
+                       }
+                       tag = v_req_pi.tag;
+                       task_attr = v_req_pi.task_attr;
+                       cdb = &v_req_pi.cdb[0];
+                       lun = ((v_req_pi.lun[2] << 8) | v_req_pi.lun[3]) & 0x3FFF;
+               } else {
+                       tag = v_req.tag;
+                       task_attr = v_req.task_attr;
+                       cdb = &v_req.cdb[0];
+                       lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
+               }
                exp_data_len = 0;
-               for (i = 0; i < data_num; i++)
+               for (i = 0; i < data_niov; i++)
                        exp_data_len += vq->iov[data_first + i].iov_len;
+               /*
+                * Check that the recieved CDB size does not exceeded our
+                * hardcoded max for vhost-scsi
+                *
+                * TODO what if cdb was too small for varlen cdb header?
+                */
+               if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
+                       vq_err(vq, "Received SCSI CDB with command_size: %d that"
+                               " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
+                               scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
+                       goto err_cmd;
+               }
  
-               cmd = vhost_scsi_get_tag(vq, tpg, &v_req,
-                                        exp_data_len, data_direction);
+               cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
+                                        exp_data_len + prot_bytes,
+                                        data_direction);
                if (IS_ERR(cmd)) {
                        vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
                                        PTR_ERR(cmd));
                        goto err_cmd;
                }
                pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
                        ": %d\n", cmd, exp_data_len, data_direction);
  
                cmd->tvc_vq = vq;
                cmd->tvc_resp = vq->iov[out].iov_base;
  
-               /*
-                * Copy in the recieved CDB descriptor into cmd->tvc_cdb
-                * that will be used by tcm_vhost_new_cmd_map() and down into
-                * target_setup_cmd_from_cdb()
-                */
-               memcpy(cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE);
-               /*
-                * Check that the recieved CDB size does not exceeded our
-                * hardcoded max for tcm_vhost
-                */
-               /* TODO what if cdb was too small for varlen cdb header? */
-               if (unlikely(scsi_command_size(cmd->tvc_cdb) >
-                                       TCM_VHOST_MAX_CDB_SIZE)) {
-                       vq_err(vq, "Received SCSI CDB with command_size: %d that"
-                               " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
-                               scsi_command_size(cmd->tvc_cdb),
-                               TCM_VHOST_MAX_CDB_SIZE);
-                       goto err_free;
-               }
-               cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
                        cmd->tvc_cdb[0], cmd->tvc_lun);
  
+               if (prot_niov) {
+                       ret = vhost_scsi_map_iov_to_prot(cmd,
+                                       &vq->iov[prot_first], prot_niov,
+                                       data_direction == DMA_FROM_DEVICE);
+                       if (unlikely(ret)) {
+                               vq_err(vq, "Failed to map iov to"
+                                       " prot_sgl\n");
+                               goto err_free;
+                       }
+               }
                if (data_direction != DMA_NONE) {
                        ret = vhost_scsi_map_iov_to_sgl(cmd,
-                                       &vq->iov[data_first], data_num,
+                                       &vq->iov[data_first], data_niov,
                                        data_direction == DMA_FROM_DEVICE);
                        if (unlikely(ret)) {
                                vq_err(vq, "Failed to map iov to sgl\n");
                                goto err_free;
                        }
                }
                /*
                 * Save the descriptor from vhost_get_vq_desc() to be used to
                 * complete the virtio-scsi request in TCM callback context via
@@@ -1255,7 -1362,7 +1362,7 @@@ vhost_scsi_set_endpoint(struct vhost_sc
                        tpg->tv_tpg_vhost_count++;
                        tpg->vhost_scsi = vs;
                        vs_tpg[tpg->tport_tpgt] = tpg;
 -                      smp_mb__after_atomic_inc();
 +                      smp_mb__after_atomic();
                        match = true;
                }
                mutex_unlock(&tpg->tv_tpg_mutex);
@@@ -1373,9 -1480,6 +1480,9 @@@ err_dev
  
  static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
  {
 +      struct vhost_virtqueue *vq;
 +      int i;
 +
        if (features & ~VHOST_SCSI_FEATURES)
                return -EOPNOTSUPP;
  
                mutex_unlock(&vs->dev.mutex);
                return -EFAULT;
        }
 -      vs->dev.acked_features = features;
 -      smp_wmb();
 -      vhost_scsi_flush(vs);
 +
 +      for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
 +              vq = &vs->vqs[i].vq;
 +              mutex_lock(&vq->mutex);
 +              vq->acked_features = features;
 +              mutex_unlock(&vq->mutex);
 +      }
        mutex_unlock(&vs->dev.mutex);
        return 0;
  }
@@@ -1598,6 -1698,10 +1705,6 @@@ tcm_vhost_do_plug(struct tcm_vhost_tpg 
                return;
  
        mutex_lock(&vs->dev.mutex);
 -      if (!vhost_has_feature(&vs->dev, VIRTIO_SCSI_F_HOTPLUG)) {
 -              mutex_unlock(&vs->dev.mutex);
 -              return;
 -      }
  
        if (plug)
                reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
  
        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        mutex_lock(&vq->mutex);
 -      tcm_vhost_send_evt(vs, tpg, lun,
 -                      VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
 +      if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
 +              tcm_vhost_send_evt(vs, tpg, lun,
 +                                 VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
        mutex_unlock(&vq->mutex);
        mutex_unlock(&vs->dev.mutex);
  }
@@@ -1716,6 -1819,7 +1823,7 @@@ static void tcm_vhost_free_cmd_map_res(
                tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
  
                kfree(tv_cmd->tvc_sgl);
+               kfree(tv_cmd->tvc_prot_sgl);
                kfree(tv_cmd->tvc_upages);
        }
  }
@@@ -1750,7 -1854,7 +1858,7 @@@ static int tcm_vhost_make_nexus(struct 
        tv_nexus->tvn_se_sess = transport_init_session_tags(
                                        TCM_VHOST_DEFAULT_TAGS,
                                        sizeof(struct tcm_vhost_cmd),
-                                       TARGET_PROT_NORMAL);
+                                       TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
        if (IS_ERR(tv_nexus->tvn_se_sess)) {
                mutex_unlock(&tpg->tv_tpg_mutex);
                kfree(tv_nexus);
                }
  
                tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
-                                       TCM_VHOST_PREALLOC_PAGES, GFP_KERNEL);
+                                       TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
                if (!tv_cmd->tvc_upages) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
                        goto out;
                }
+               tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
+                                       TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
+               if (!tv_cmd->tvc_prot_sgl) {
+                       mutex_unlock(&tpg->tv_tpg_mutex);
+                       pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
+                       goto out;
+               }
        }
        /*
         * Since we are running in 'demo mode' this call with generate a
diff --combined include/scsi/scsi_cmnd.h
index e016e2ac38df8f6c570980a4624e9ccaefd0dd5e,a100c6e266c7d4037e872af5a5961c915462e2d7..42ed789ebafcf9ab04c759d7ef167e981aab2bc6
@@@ -7,6 -7,7 +7,7 @@@
  #include <linux/types.h>
  #include <linux/timer.h>
  #include <linux/scatterlist.h>
+ #include <scsi/scsi_device.h>
  
  struct Scsi_Host;
  struct scsi_device;
@@@ -133,15 -134,6 +134,15 @@@ struct scsi_cmnd 
        unsigned char tag;      /* SCSI-II queued command tag */
  };
  
 +/*
 + * Return the driver private allocation behind the command.
 + * Only works if cmd_size is set in the host template.
 + */
 +static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd)
 +{
 +      return cmd + 1;
 +}
 +
  /* make sure not to use it with REQ_TYPE_BLOCK_PC commands */
  static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
  {
@@@ -315,4 -307,20 +316,20 @@@ static inline void set_driver_byte(stru
        cmd->result = (cmd->result & 0x00ffffff) | (status << 24);
  }
  
+ static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
+ {
+       unsigned int xfer_len = blk_rq_bytes(scmd->request);
+       unsigned int prot_op = scsi_get_prot_op(scmd);
+       unsigned int sector_size = scmd->device->sector_size;
+       switch (prot_op) {
+       case SCSI_PROT_NORMAL:
+       case SCSI_PROT_WRITE_STRIP:
+       case SCSI_PROT_READ_INSERT:
+               return xfer_len;
+       }
+       return xfer_len + (xfer_len >> ilog2(sector_size)) * 8;
+ }
  #endif /* _SCSI_SCSI_CMND_H */