1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level;
42 module_param_named(debug_level, isert_debug_level, int, 0644);
43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex);
46 static LIST_HEAD(device_list);
47 static struct workqueue_struct *isert_comp_wq;
48 static struct workqueue_struct *isert_release_wq;
51 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
53 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
56 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
58 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
59 struct isert_rdma_wr *wr);
61 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
63 isert_rdma_post_recvl(struct isert_conn *isert_conn);
65 isert_rdma_accept(struct isert_conn *isert_conn);
66 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
69 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
71 return (conn->pi_support &&
72 cmd->prot_op != TARGET_PROT_NORMAL);
77 isert_qp_event_callback(struct ib_event *e, void *context)
79 struct isert_conn *isert_conn = context;
81 isert_err("%s (%d): conn %p\n",
82 ib_event_msg(e->event), e->event, isert_conn);
85 case IB_EVENT_COMM_EST:
86 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
88 case IB_EVENT_QP_LAST_WQE_REACHED:
89 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
97 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
101 ret = ib_query_device(ib_dev, devattr);
103 isert_err("ib_query_device() failed: %d\n", ret);
106 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
107 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
112 static struct isert_comp *
113 isert_comp_get(struct isert_conn *isert_conn)
115 struct isert_device *device = isert_conn->device;
116 struct isert_comp *comp;
119 mutex_lock(&device_list_mutex);
120 for (i = 0; i < device->comps_used; i++)
121 if (device->comps[i].active_qps <
122 device->comps[min].active_qps)
124 comp = &device->comps[min];
126 mutex_unlock(&device_list_mutex);
128 isert_info("conn %p, using comp %p min_index: %d\n",
129 isert_conn, comp, min);
135 isert_comp_put(struct isert_comp *comp)
137 mutex_lock(&device_list_mutex);
139 mutex_unlock(&device_list_mutex);
142 static struct ib_qp *
143 isert_create_qp(struct isert_conn *isert_conn,
144 struct isert_comp *comp,
145 struct rdma_cm_id *cma_id)
147 struct isert_device *device = isert_conn->device;
148 struct ib_qp_init_attr attr;
151 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
152 attr.event_handler = isert_qp_event_callback;
153 attr.qp_context = isert_conn;
154 attr.send_cq = comp->cq;
155 attr.recv_cq = comp->cq;
156 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
157 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
159 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
160 * work-around for RDMA_READs with ConnectX-2.
162 * Also, still make sure to have at least two SGEs for
163 * outgoing control PDU responses.
165 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
166 isert_conn->max_sge = attr.cap.max_send_sge;
168 attr.cap.max_recv_sge = 1;
169 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
170 attr.qp_type = IB_QPT_RC;
171 if (device->pi_capable)
172 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
174 ret = rdma_create_qp(cma_id, device->pd, &attr);
176 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
184 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
186 struct isert_comp *comp;
189 comp = isert_comp_get(isert_conn);
190 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
191 if (IS_ERR(isert_conn->qp)) {
192 ret = PTR_ERR(isert_conn->qp);
198 isert_comp_put(comp);
203 isert_cq_event_callback(struct ib_event *e, void *context)
205 isert_dbg("event: %d\n", e->event);
209 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
211 struct isert_device *device = isert_conn->device;
212 struct ib_device *ib_dev = device->ib_device;
213 struct iser_rx_desc *rx_desc;
214 struct ib_sge *rx_sg;
218 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
219 sizeof(struct iser_rx_desc), GFP_KERNEL);
220 if (!isert_conn->rx_descs)
223 rx_desc = isert_conn->rx_descs;
225 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
226 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
227 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
228 if (ib_dma_mapping_error(ib_dev, dma_addr))
231 rx_desc->dma_addr = dma_addr;
233 rx_sg = &rx_desc->rx_sg;
234 rx_sg->addr = rx_desc->dma_addr;
235 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
236 rx_sg->lkey = device->mr->lkey;
239 isert_conn->rx_desc_head = 0;
244 rx_desc = isert_conn->rx_descs;
245 for (j = 0; j < i; j++, rx_desc++) {
246 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
247 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
249 kfree(isert_conn->rx_descs);
250 isert_conn->rx_descs = NULL;
252 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
258 isert_free_rx_descriptors(struct isert_conn *isert_conn)
260 struct ib_device *ib_dev = isert_conn->device->ib_device;
261 struct iser_rx_desc *rx_desc;
264 if (!isert_conn->rx_descs)
267 rx_desc = isert_conn->rx_descs;
268 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
269 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
270 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
273 kfree(isert_conn->rx_descs);
274 isert_conn->rx_descs = NULL;
277 static void isert_cq_work(struct work_struct *);
278 static void isert_cq_callback(struct ib_cq *, void *);
281 isert_free_comps(struct isert_device *device)
285 for (i = 0; i < device->comps_used; i++) {
286 struct isert_comp *comp = &device->comps[i];
289 cancel_work_sync(&comp->work);
290 ib_destroy_cq(comp->cq);
293 kfree(device->comps);
297 isert_alloc_comps(struct isert_device *device,
298 struct ib_device_attr *attr)
300 int i, max_cqe, ret = 0;
302 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
303 device->ib_device->num_comp_vectors));
305 isert_info("Using %d CQs, %s supports %d vectors support "
306 "Fast registration %d pi_capable %d\n",
307 device->comps_used, device->ib_device->name,
308 device->ib_device->num_comp_vectors, device->use_fastreg,
311 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
313 if (!device->comps) {
314 isert_err("Unable to allocate completion contexts\n");
318 max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
320 for (i = 0; i < device->comps_used; i++) {
321 struct isert_comp *comp = &device->comps[i];
323 comp->device = device;
324 INIT_WORK(&comp->work, isert_cq_work);
325 comp->cq = ib_create_cq(device->ib_device,
327 isert_cq_event_callback,
330 if (IS_ERR(comp->cq)) {
331 isert_err("Unable to allocate cq\n");
332 ret = PTR_ERR(comp->cq);
337 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
344 isert_free_comps(device);
349 isert_create_device_ib_res(struct isert_device *device)
351 struct ib_device_attr *dev_attr;
354 dev_attr = &device->dev_attr;
355 ret = isert_query_device(device->ib_device, dev_attr);
359 /* asign function handlers */
360 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
361 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
362 device->use_fastreg = 1;
363 device->reg_rdma_mem = isert_reg_rdma;
364 device->unreg_rdma_mem = isert_unreg_rdma;
366 device->use_fastreg = 0;
367 device->reg_rdma_mem = isert_map_rdma;
368 device->unreg_rdma_mem = isert_unmap_cmd;
371 ret = isert_alloc_comps(device, dev_attr);
375 device->pd = ib_alloc_pd(device->ib_device);
376 if (IS_ERR(device->pd)) {
377 ret = PTR_ERR(device->pd);
378 isert_err("failed to allocate pd, device %p, ret=%d\n",
383 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE);
384 if (IS_ERR(device->mr)) {
385 ret = PTR_ERR(device->mr);
386 isert_err("failed to create dma mr, device %p, ret=%d\n",
391 /* Check signature cap */
392 device->pi_capable = dev_attr->device_cap_flags &
393 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
398 ib_dealloc_pd(device->pd);
400 isert_free_comps(device);
405 isert_free_device_ib_res(struct isert_device *device)
407 isert_info("device %p\n", device);
409 ib_dereg_mr(device->mr);
410 ib_dealloc_pd(device->pd);
411 isert_free_comps(device);
415 isert_device_put(struct isert_device *device)
417 mutex_lock(&device_list_mutex);
419 isert_info("device %p refcount %d\n", device, device->refcount);
420 if (!device->refcount) {
421 isert_free_device_ib_res(device);
422 list_del(&device->dev_node);
425 mutex_unlock(&device_list_mutex);
428 static struct isert_device *
429 isert_device_get(struct rdma_cm_id *cma_id)
431 struct isert_device *device;
434 mutex_lock(&device_list_mutex);
435 list_for_each_entry(device, &device_list, dev_node) {
436 if (device->ib_device->node_guid == cma_id->device->node_guid) {
438 isert_info("Found iser device %p refcount %d\n",
439 device, device->refcount);
440 mutex_unlock(&device_list_mutex);
445 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
447 mutex_unlock(&device_list_mutex);
448 return ERR_PTR(-ENOMEM);
451 INIT_LIST_HEAD(&device->dev_node);
453 device->ib_device = cma_id->device;
454 ret = isert_create_device_ib_res(device);
457 mutex_unlock(&device_list_mutex);
462 list_add_tail(&device->dev_node, &device_list);
463 isert_info("Created a new iser device %p refcount %d\n",
464 device, device->refcount);
465 mutex_unlock(&device_list_mutex);
471 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
473 struct fast_reg_descriptor *fr_desc, *tmp;
476 if (list_empty(&isert_conn->fr_pool))
479 isert_info("Freeing conn %p fastreg pool", isert_conn);
481 list_for_each_entry_safe(fr_desc, tmp,
482 &isert_conn->fr_pool, list) {
483 list_del(&fr_desc->list);
484 ib_free_fast_reg_page_list(fr_desc->data_frpl);
485 ib_dereg_mr(fr_desc->data_mr);
486 if (fr_desc->pi_ctx) {
487 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
488 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
489 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
490 kfree(fr_desc->pi_ctx);
496 if (i < isert_conn->fr_pool_size)
497 isert_warn("Pool still has %d regions registered\n",
498 isert_conn->fr_pool_size - i);
502 isert_create_pi_ctx(struct fast_reg_descriptor *desc,
503 struct ib_device *device,
506 struct ib_mr_init_attr mr_init_attr;
507 struct pi_context *pi_ctx;
510 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
512 isert_err("Failed to allocate pi context\n");
516 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
517 ISCSI_ISER_SG_TABLESIZE);
518 if (IS_ERR(pi_ctx->prot_frpl)) {
519 isert_err("Failed to allocate prot frpl err=%ld\n",
520 PTR_ERR(pi_ctx->prot_frpl));
521 ret = PTR_ERR(pi_ctx->prot_frpl);
525 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
526 if (IS_ERR(pi_ctx->prot_mr)) {
527 isert_err("Failed to allocate prot frmr err=%ld\n",
528 PTR_ERR(pi_ctx->prot_mr));
529 ret = PTR_ERR(pi_ctx->prot_mr);
532 desc->ind |= ISERT_PROT_KEY_VALID;
534 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
535 mr_init_attr.max_reg_descriptors = 2;
536 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
537 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
538 if (IS_ERR(pi_ctx->sig_mr)) {
539 isert_err("Failed to allocate signature enabled mr err=%ld\n",
540 PTR_ERR(pi_ctx->sig_mr));
541 ret = PTR_ERR(pi_ctx->sig_mr);
545 desc->pi_ctx = pi_ctx;
546 desc->ind |= ISERT_SIG_KEY_VALID;
547 desc->ind &= ~ISERT_PROTECTED;
552 ib_dereg_mr(desc->pi_ctx->prot_mr);
554 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
562 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
563 struct fast_reg_descriptor *fr_desc)
567 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
568 ISCSI_ISER_SG_TABLESIZE);
569 if (IS_ERR(fr_desc->data_frpl)) {
570 isert_err("Failed to allocate data frpl err=%ld\n",
571 PTR_ERR(fr_desc->data_frpl));
572 return PTR_ERR(fr_desc->data_frpl);
575 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
576 if (IS_ERR(fr_desc->data_mr)) {
577 isert_err("Failed to allocate data frmr err=%ld\n",
578 PTR_ERR(fr_desc->data_mr));
579 ret = PTR_ERR(fr_desc->data_mr);
582 fr_desc->ind |= ISERT_DATA_KEY_VALID;
584 isert_dbg("Created fr_desc %p\n", fr_desc);
589 ib_free_fast_reg_page_list(fr_desc->data_frpl);
595 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
597 struct fast_reg_descriptor *fr_desc;
598 struct isert_device *device = isert_conn->device;
599 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
600 struct se_node_acl *se_nacl = se_sess->se_node_acl;
603 * Setup the number of FRMRs based upon the number of tags
604 * available to session in iscsi_target_locate_portal().
606 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
607 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
609 isert_conn->fr_pool_size = 0;
610 for (i = 0; i < tag_num; i++) {
611 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
613 isert_err("Failed to allocate fast_reg descriptor\n");
618 ret = isert_create_fr_desc(device->ib_device,
619 device->pd, fr_desc);
621 isert_err("Failed to create fastreg descriptor err=%d\n",
627 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
628 isert_conn->fr_pool_size++;
631 isert_dbg("Creating conn %p fastreg pool size=%d",
632 isert_conn, isert_conn->fr_pool_size);
637 isert_conn_free_fastreg_pool(isert_conn);
642 isert_init_conn(struct isert_conn *isert_conn)
644 isert_conn->state = ISER_CONN_INIT;
645 INIT_LIST_HEAD(&isert_conn->accept_node);
646 init_completion(&isert_conn->login_comp);
647 init_completion(&isert_conn->login_req_comp);
648 init_completion(&isert_conn->wait);
649 kref_init(&isert_conn->kref);
650 mutex_init(&isert_conn->mutex);
651 spin_lock_init(&isert_conn->pool_lock);
652 INIT_LIST_HEAD(&isert_conn->fr_pool);
656 isert_free_login_buf(struct isert_conn *isert_conn)
658 struct ib_device *ib_dev = isert_conn->device->ib_device;
660 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
661 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
662 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
663 ISCSI_DEF_MAX_RECV_SEG_LEN,
665 kfree(isert_conn->login_buf);
669 isert_alloc_login_buf(struct isert_conn *isert_conn,
670 struct ib_device *ib_dev)
674 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
675 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
676 if (!isert_conn->login_buf) {
677 isert_err("Unable to allocate isert_conn->login_buf\n");
681 isert_conn->login_req_buf = isert_conn->login_buf;
682 isert_conn->login_rsp_buf = isert_conn->login_buf +
683 ISCSI_DEF_MAX_RECV_SEG_LEN;
685 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
686 isert_conn->login_buf, isert_conn->login_req_buf,
687 isert_conn->login_rsp_buf);
689 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
690 (void *)isert_conn->login_req_buf,
691 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
693 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
695 isert_err("login_req_dma mapping error: %d\n", ret);
696 isert_conn->login_req_dma = 0;
700 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
701 (void *)isert_conn->login_rsp_buf,
702 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
704 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
706 isert_err("login_rsp_dma mapping error: %d\n", ret);
707 isert_conn->login_rsp_dma = 0;
708 goto out_req_dma_map;
714 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
715 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
717 kfree(isert_conn->login_buf);
722 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
724 struct isert_np *isert_np = cma_id->context;
725 struct iscsi_np *np = isert_np->np;
726 struct isert_conn *isert_conn;
727 struct isert_device *device;
730 spin_lock_bh(&np->np_thread_lock);
732 spin_unlock_bh(&np->np_thread_lock);
733 isert_dbg("iscsi_np is not enabled, reject connect request\n");
734 return rdma_reject(cma_id, NULL, 0);
736 spin_unlock_bh(&np->np_thread_lock);
738 isert_dbg("cma_id: %p, portal: %p\n",
739 cma_id, cma_id->context);
741 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
745 isert_init_conn(isert_conn);
746 isert_conn->cm_id = cma_id;
748 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
752 device = isert_device_get(cma_id);
753 if (IS_ERR(device)) {
754 ret = PTR_ERR(device);
755 goto out_rsp_dma_map;
757 isert_conn->device = device;
759 /* Set max inflight RDMA READ requests */
760 isert_conn->initiator_depth = min_t(u8,
761 event->param.conn.initiator_depth,
762 device->dev_attr.max_qp_init_rd_atom);
763 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
765 ret = isert_conn_setup_qp(isert_conn, cma_id);
769 ret = isert_rdma_post_recvl(isert_conn);
773 ret = isert_rdma_accept(isert_conn);
777 mutex_lock(&isert_np->np_accept_mutex);
778 list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list);
779 mutex_unlock(&isert_np->np_accept_mutex);
781 isert_info("np %p: Allow accept_np to continue\n", np);
782 up(&isert_np->np_sem);
786 isert_device_put(device);
788 isert_free_login_buf(isert_conn);
791 rdma_reject(cma_id, NULL, 0);
796 isert_connect_release(struct isert_conn *isert_conn)
798 struct isert_device *device = isert_conn->device;
800 isert_dbg("conn %p\n", isert_conn);
804 if (device->use_fastreg)
805 isert_conn_free_fastreg_pool(isert_conn);
807 isert_free_rx_descriptors(isert_conn);
808 if (isert_conn->cm_id)
809 rdma_destroy_id(isert_conn->cm_id);
811 if (isert_conn->qp) {
812 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
814 isert_comp_put(comp);
815 ib_destroy_qp(isert_conn->qp);
818 if (isert_conn->login_buf)
819 isert_free_login_buf(isert_conn);
821 isert_device_put(device);
827 isert_connected_handler(struct rdma_cm_id *cma_id)
829 struct isert_conn *isert_conn = cma_id->qp->qp_context;
831 isert_info("conn %p\n", isert_conn);
833 if (!kref_get_unless_zero(&isert_conn->kref)) {
834 isert_warn("conn %p connect_release is running\n", isert_conn);
838 mutex_lock(&isert_conn->mutex);
839 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
840 isert_conn->state = ISER_CONN_UP;
841 mutex_unlock(&isert_conn->mutex);
845 isert_release_kref(struct kref *kref)
847 struct isert_conn *isert_conn = container_of(kref,
848 struct isert_conn, kref);
850 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
853 isert_connect_release(isert_conn);
857 isert_put_conn(struct isert_conn *isert_conn)
859 kref_put(&isert_conn->kref, isert_release_kref);
863 * isert_conn_terminate() - Initiate connection termination
864 * @isert_conn: isert connection struct
867 * In case the connection state is FULL_FEATURE, move state
868 * to TEMINATING and start teardown sequence (rdma_disconnect).
869 * In case the connection state is UP, complete flush as well.
871 * This routine must be called with mutex held. Thus it is
872 * safe to call multiple times.
875 isert_conn_terminate(struct isert_conn *isert_conn)
879 switch (isert_conn->state) {
880 case ISER_CONN_TERMINATING:
883 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
884 isert_info("Terminating conn %p state %d\n",
885 isert_conn, isert_conn->state);
886 isert_conn->state = ISER_CONN_TERMINATING;
887 err = rdma_disconnect(isert_conn->cm_id);
889 isert_warn("Failed rdma_disconnect isert_conn %p\n",
893 isert_warn("conn %p teminating in state %d\n",
894 isert_conn, isert_conn->state);
899 isert_np_cma_handler(struct isert_np *isert_np,
900 enum rdma_cm_event_type event)
902 isert_dbg("%s (%d): isert np %p\n",
903 rdma_event_msg(event), event, isert_np);
906 case RDMA_CM_EVENT_DEVICE_REMOVAL:
907 isert_np->np_cm_id = NULL;
909 case RDMA_CM_EVENT_ADDR_CHANGE:
910 isert_np->np_cm_id = isert_setup_id(isert_np);
911 if (IS_ERR(isert_np->np_cm_id)) {
912 isert_err("isert np %p setup id failed: %ld\n",
913 isert_np, PTR_ERR(isert_np->np_cm_id));
914 isert_np->np_cm_id = NULL;
918 isert_err("isert np %p Unexpected event %d\n",
926 isert_disconnected_handler(struct rdma_cm_id *cma_id,
927 enum rdma_cm_event_type event)
929 struct isert_np *isert_np = cma_id->context;
930 struct isert_conn *isert_conn;
932 if (isert_np->np_cm_id == cma_id)
933 return isert_np_cma_handler(cma_id->context, event);
935 isert_conn = cma_id->qp->qp_context;
937 mutex_lock(&isert_conn->mutex);
938 isert_conn_terminate(isert_conn);
939 mutex_unlock(&isert_conn->mutex);
941 isert_info("conn %p completing wait\n", isert_conn);
942 complete(&isert_conn->wait);
948 isert_connect_error(struct rdma_cm_id *cma_id)
950 struct isert_conn *isert_conn = cma_id->qp->qp_context;
952 isert_conn->cm_id = NULL;
953 isert_put_conn(isert_conn);
959 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
963 isert_info("%s (%d): status %d id %p np %p\n",
964 rdma_event_msg(event->event), event->event,
965 event->status, cma_id, cma_id->context);
967 switch (event->event) {
968 case RDMA_CM_EVENT_CONNECT_REQUEST:
969 ret = isert_connect_request(cma_id, event);
971 isert_err("failed handle connect request %d\n", ret);
973 case RDMA_CM_EVENT_ESTABLISHED:
974 isert_connected_handler(cma_id);
976 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
977 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
978 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
979 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
980 ret = isert_disconnected_handler(cma_id, event->event);
982 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
983 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
984 case RDMA_CM_EVENT_CONNECT_ERROR:
985 ret = isert_connect_error(cma_id);
988 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
996 isert_post_recv(struct isert_conn *isert_conn, u32 count)
998 struct ib_recv_wr *rx_wr, *rx_wr_failed;
1000 unsigned int rx_head = isert_conn->rx_desc_head;
1001 struct iser_rx_desc *rx_desc;
1003 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1004 rx_desc = &isert_conn->rx_descs[rx_head];
1005 rx_wr->wr_id = (uintptr_t)rx_desc;
1006 rx_wr->sg_list = &rx_desc->rx_sg;
1008 rx_wr->next = rx_wr + 1;
1009 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
1013 rx_wr->next = NULL; /* mark end of work requests list */
1015 isert_conn->post_recv_buf_count += count;
1016 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
1019 isert_err("ib_post_recv() failed with ret: %d\n", ret);
1020 isert_conn->post_recv_buf_count -= count;
1022 isert_dbg("Posted %d RX buffers\n", count);
1023 isert_conn->rx_desc_head = rx_head;
1029 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
1031 struct ib_device *ib_dev = isert_conn->cm_id->device;
1032 struct ib_send_wr send_wr, *send_wr_failed;
1035 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
1036 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1038 send_wr.next = NULL;
1039 send_wr.wr_id = (uintptr_t)tx_desc;
1040 send_wr.sg_list = tx_desc->tx_sg;
1041 send_wr.num_sge = tx_desc->num_sge;
1042 send_wr.opcode = IB_WR_SEND;
1043 send_wr.send_flags = IB_SEND_SIGNALED;
1045 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
1047 isert_err("ib_post_send() failed, ret: %d\n", ret);
1053 isert_create_send_desc(struct isert_conn *isert_conn,
1054 struct isert_cmd *isert_cmd,
1055 struct iser_tx_desc *tx_desc)
1057 struct isert_device *device = isert_conn->device;
1058 struct ib_device *ib_dev = device->ib_device;
1060 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
1061 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1063 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
1064 tx_desc->iser_header.flags = ISER_VER;
1066 tx_desc->num_sge = 1;
1067 tx_desc->isert_cmd = isert_cmd;
1069 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
1070 tx_desc->tx_sg[0].lkey = device->mr->lkey;
1071 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1076 isert_init_tx_hdrs(struct isert_conn *isert_conn,
1077 struct iser_tx_desc *tx_desc)
1079 struct isert_device *device = isert_conn->device;
1080 struct ib_device *ib_dev = device->ib_device;
1083 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1084 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1085 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1086 isert_err("ib_dma_mapping_error() failed\n");
1090 tx_desc->dma_addr = dma_addr;
1091 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1092 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1093 tx_desc->tx_sg[0].lkey = device->mr->lkey;
1095 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1096 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
1097 tx_desc->tx_sg[0].lkey);
1103 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1104 struct ib_send_wr *send_wr)
1106 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1108 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
1109 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
1110 send_wr->opcode = IB_WR_SEND;
1111 send_wr->sg_list = &tx_desc->tx_sg[0];
1112 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
1113 send_wr->send_flags = IB_SEND_SIGNALED;
1117 isert_rdma_post_recvl(struct isert_conn *isert_conn)
1119 struct ib_recv_wr rx_wr, *rx_wr_fail;
1123 memset(&sge, 0, sizeof(struct ib_sge));
1124 sge.addr = isert_conn->login_req_dma;
1125 sge.length = ISER_RX_LOGIN_SIZE;
1126 sge.lkey = isert_conn->device->mr->lkey;
1128 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1129 sge.addr, sge.length, sge.lkey);
1131 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1132 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
1133 rx_wr.sg_list = &sge;
1136 isert_conn->post_recv_buf_count++;
1137 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
1139 isert_err("ib_post_recv() failed: %d\n", ret);
1140 isert_conn->post_recv_buf_count--;
1147 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1150 struct isert_conn *isert_conn = conn->context;
1151 struct isert_device *device = isert_conn->device;
1152 struct ib_device *ib_dev = device->ib_device;
1153 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
1156 isert_create_send_desc(isert_conn, NULL, tx_desc);
1158 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1159 sizeof(struct iscsi_hdr));
1161 isert_init_tx_hdrs(isert_conn, tx_desc);
1164 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1166 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1167 length, DMA_TO_DEVICE);
1169 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1171 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1172 length, DMA_TO_DEVICE);
1174 tx_dsg->addr = isert_conn->login_rsp_dma;
1175 tx_dsg->length = length;
1176 tx_dsg->lkey = isert_conn->device->mr->lkey;
1177 tx_desc->num_sge = 2;
1179 if (!login->login_failed) {
1180 if (login->login_complete) {
1181 if (!conn->sess->sess_ops->SessionType &&
1182 isert_conn->device->use_fastreg) {
1183 ret = isert_conn_create_fastreg_pool(isert_conn);
1185 isert_err("Conn: %p failed to create"
1186 " fastreg pool\n", isert_conn);
1191 ret = isert_alloc_rx_descriptors(isert_conn);
1195 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1199 /* Now we are in FULL_FEATURE phase */
1200 mutex_lock(&isert_conn->mutex);
1201 isert_conn->state = ISER_CONN_FULL_FEATURE;
1202 mutex_unlock(&isert_conn->mutex);
1206 ret = isert_rdma_post_recvl(isert_conn);
1211 ret = isert_post_send(isert_conn, tx_desc);
1219 isert_rx_login_req(struct isert_conn *isert_conn)
1221 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1222 int rx_buflen = isert_conn->login_req_len;
1223 struct iscsi_conn *conn = isert_conn->conn;
1224 struct iscsi_login *login = conn->conn_login;
1227 isert_info("conn %p\n", isert_conn);
1229 WARN_ON_ONCE(!login);
1231 if (login->first_request) {
1232 struct iscsi_login_req *login_req =
1233 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1235 * Setup the initial iscsi_login values from the leading
1236 * login request PDU.
1238 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1239 login->current_stage =
1240 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1242 login->version_min = login_req->min_version;
1243 login->version_max = login_req->max_version;
1244 memcpy(login->isid, login_req->isid, 6);
1245 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1246 login->init_task_tag = login_req->itt;
1247 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1248 login->cid = be16_to_cpu(login_req->cid);
1249 login->tsih = be16_to_cpu(login_req->tsih);
1252 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1254 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1255 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1256 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1257 MAX_KEY_VALUE_PAIRS);
1258 memcpy(login->req_buf, &rx_desc->data[0], size);
1260 if (login->first_request) {
1261 complete(&isert_conn->login_comp);
1264 schedule_delayed_work(&conn->login_work, 0);
1267 static struct iscsi_cmd
1268 *isert_allocate_cmd(struct iscsi_conn *conn)
1270 struct isert_conn *isert_conn = conn->context;
1271 struct isert_cmd *isert_cmd;
1272 struct iscsi_cmd *cmd;
1274 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1276 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1279 isert_cmd = iscsit_priv_cmd(cmd);
1280 isert_cmd->conn = isert_conn;
1281 isert_cmd->iscsi_cmd = cmd;
1287 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1288 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1289 struct iser_rx_desc *rx_desc, unsigned char *buf)
1291 struct iscsi_conn *conn = isert_conn->conn;
1292 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1293 struct scatterlist *sg;
1294 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1295 bool dump_payload = false;
1297 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1301 imm_data = cmd->immediate_data;
1302 imm_data_len = cmd->first_burst_len;
1303 unsol_data = cmd->unsolicited_data;
1305 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1308 } else if (rc > 0) {
1309 dump_payload = true;
1316 sg = &cmd->se_cmd.t_data_sg[0];
1317 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1319 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1320 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1322 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1324 cmd->write_data_done += imm_data_len;
1326 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1327 spin_lock_bh(&cmd->istate_lock);
1328 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1329 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1330 spin_unlock_bh(&cmd->istate_lock);
1334 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1336 if (!rc && dump_payload == false && unsol_data)
1337 iscsit_set_unsoliticed_dataout(cmd);
1338 else if (dump_payload && imm_data)
1339 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1345 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1346 struct iser_rx_desc *rx_desc, unsigned char *buf)
1348 struct scatterlist *sg_start;
1349 struct iscsi_conn *conn = isert_conn->conn;
1350 struct iscsi_cmd *cmd = NULL;
1351 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1352 u32 unsol_data_len = ntoh24(hdr->dlength);
1353 int rc, sg_nents, sg_off, page_off;
1355 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1361 * FIXME: Unexpected unsolicited_data out
1363 if (!cmd->unsolicited_data) {
1364 isert_err("Received unexpected solicited data payload\n");
1369 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1370 "write_data_done: %u, data_length: %u\n",
1371 unsol_data_len, cmd->write_data_done,
1372 cmd->se_cmd.data_length);
1374 sg_off = cmd->write_data_done / PAGE_SIZE;
1375 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1376 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1377 page_off = cmd->write_data_done % PAGE_SIZE;
1379 * FIXME: Non page-aligned unsolicited_data out
1382 isert_err("unexpected non-page aligned data payload\n");
1386 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1387 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1388 sg_nents, &rx_desc->data[0], unsol_data_len);
1390 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1393 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1401 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1402 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1405 struct iscsi_conn *conn = isert_conn->conn;
1406 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1409 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1413 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1416 return iscsit_process_nop_out(conn, cmd, hdr);
1420 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1421 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1422 struct iscsi_text *hdr)
1424 struct iscsi_conn *conn = isert_conn->conn;
1425 u32 payload_length = ntoh24(hdr->dlength);
1427 unsigned char *text_in = NULL;
1429 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1433 if (payload_length) {
1434 text_in = kzalloc(payload_length, GFP_KERNEL);
1436 isert_err("Unable to allocate text_in of payload_length: %u\n",
1441 cmd->text_in_ptr = text_in;
1443 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1445 return iscsit_process_text_cmd(conn, cmd, hdr);
1449 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1450 uint32_t read_stag, uint64_t read_va,
1451 uint32_t write_stag, uint64_t write_va)
1453 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1454 struct iscsi_conn *conn = isert_conn->conn;
1455 struct iscsi_cmd *cmd;
1456 struct isert_cmd *isert_cmd;
1458 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1460 if (conn->sess->sess_ops->SessionType &&
1461 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1462 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1463 " ignoring\n", opcode);
1468 case ISCSI_OP_SCSI_CMD:
1469 cmd = isert_allocate_cmd(conn);
1473 isert_cmd = iscsit_priv_cmd(cmd);
1474 isert_cmd->read_stag = read_stag;
1475 isert_cmd->read_va = read_va;
1476 isert_cmd->write_stag = write_stag;
1477 isert_cmd->write_va = write_va;
1479 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1480 rx_desc, (unsigned char *)hdr);
1482 case ISCSI_OP_NOOP_OUT:
1483 cmd = isert_allocate_cmd(conn);
1487 isert_cmd = iscsit_priv_cmd(cmd);
1488 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1489 rx_desc, (unsigned char *)hdr);
1491 case ISCSI_OP_SCSI_DATA_OUT:
1492 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1493 (unsigned char *)hdr);
1495 case ISCSI_OP_SCSI_TMFUNC:
1496 cmd = isert_allocate_cmd(conn);
1500 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1501 (unsigned char *)hdr);
1503 case ISCSI_OP_LOGOUT:
1504 cmd = isert_allocate_cmd(conn);
1508 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1511 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
1512 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1516 cmd = isert_allocate_cmd(conn);
1521 isert_cmd = iscsit_priv_cmd(cmd);
1522 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1523 rx_desc, (struct iscsi_text *)hdr);
1526 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1535 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1537 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1538 uint64_t read_va = 0, write_va = 0;
1539 uint32_t read_stag = 0, write_stag = 0;
1542 switch (iser_hdr->flags & 0xF0) {
1544 if (iser_hdr->flags & ISER_RSV) {
1545 read_stag = be32_to_cpu(iser_hdr->read_stag);
1546 read_va = be64_to_cpu(iser_hdr->read_va);
1547 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1548 read_stag, (unsigned long long)read_va);
1550 if (iser_hdr->flags & ISER_WSV) {
1551 write_stag = be32_to_cpu(iser_hdr->write_stag);
1552 write_va = be64_to_cpu(iser_hdr->write_va);
1553 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1554 write_stag, (unsigned long long)write_va);
1557 isert_dbg("ISER ISCSI_CTRL PDU\n");
1560 isert_err("iSER Hello message\n");
1563 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1567 rc = isert_rx_opcode(isert_conn, rx_desc,
1568 read_stag, read_va, write_stag, write_va);
1572 isert_rcv_completion(struct iser_rx_desc *desc,
1573 struct isert_conn *isert_conn,
1576 struct ib_device *ib_dev = isert_conn->cm_id->device;
1577 struct iscsi_hdr *hdr;
1579 int rx_buflen, outstanding;
1581 if ((char *)desc == isert_conn->login_req_buf) {
1582 rx_dma = isert_conn->login_req_dma;
1583 rx_buflen = ISER_RX_LOGIN_SIZE;
1584 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1587 rx_dma = desc->dma_addr;
1588 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1589 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1593 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1595 hdr = &desc->iscsi_header;
1596 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1597 hdr->opcode, hdr->itt, hdr->flags,
1598 (int)(xfer_len - ISER_HEADERS_LEN));
1600 if ((char *)desc == isert_conn->login_req_buf) {
1601 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1602 if (isert_conn->conn) {
1603 struct iscsi_login *login = isert_conn->conn->conn_login;
1605 if (login && !login->first_request)
1606 isert_rx_login_req(isert_conn);
1608 mutex_lock(&isert_conn->mutex);
1609 complete(&isert_conn->login_req_comp);
1610 mutex_unlock(&isert_conn->mutex);
1612 isert_rx_do_work(desc, isert_conn);
1615 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1618 isert_conn->post_recv_buf_count--;
1619 isert_dbg("Decremented post_recv_buf_count: %d\n",
1620 isert_conn->post_recv_buf_count);
1622 if ((char *)desc == isert_conn->login_req_buf)
1625 outstanding = isert_conn->post_recv_buf_count;
1626 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1627 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1628 ISERT_MIN_POSTED_RX);
1629 err = isert_post_recv(isert_conn, count);
1631 isert_err("isert_post_recv() count: %d failed, %d\n",
1638 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1639 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1640 enum iser_ib_op_code op, struct isert_data_buf *data)
1642 struct ib_device *ib_dev = isert_conn->cm_id->device;
1644 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1645 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1647 data->len = length - offset;
1648 data->offset = offset;
1649 data->sg_off = data->offset / PAGE_SIZE;
1651 data->sg = &sg[data->sg_off];
1652 data->nents = min_t(unsigned int, nents - data->sg_off,
1653 ISCSI_ISER_SG_TABLESIZE);
1654 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1657 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1659 if (unlikely(!data->dma_nents)) {
1660 isert_err("Cmd: unable to dma map SGs %p\n", sg);
1664 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1665 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1671 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1673 struct ib_device *ib_dev = isert_conn->cm_id->device;
1675 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1676 memset(data, 0, sizeof(*data));
1682 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1684 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1686 isert_dbg("Cmd %p\n", isert_cmd);
1689 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1690 isert_unmap_data_buf(isert_conn, &wr->data);
1694 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1700 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
1707 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1709 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1711 isert_dbg("Cmd %p\n", isert_cmd);
1714 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
1715 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1716 isert_unmap_data_buf(isert_conn, &wr->prot);
1717 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1719 spin_lock_bh(&isert_conn->pool_lock);
1720 list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
1721 spin_unlock_bh(&isert_conn->pool_lock);
1726 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1727 isert_unmap_data_buf(isert_conn, &wr->data);
1735 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1737 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1738 struct isert_conn *isert_conn = isert_cmd->conn;
1739 struct iscsi_conn *conn = isert_conn->conn;
1740 struct isert_device *device = isert_conn->device;
1741 struct iscsi_text_rsp *hdr;
1743 isert_dbg("Cmd %p\n", isert_cmd);
1745 switch (cmd->iscsi_opcode) {
1746 case ISCSI_OP_SCSI_CMD:
1747 spin_lock_bh(&conn->cmd_lock);
1748 if (!list_empty(&cmd->i_conn_node))
1749 list_del_init(&cmd->i_conn_node);
1750 spin_unlock_bh(&conn->cmd_lock);
1752 if (cmd->data_direction == DMA_TO_DEVICE) {
1753 iscsit_stop_dataout_timer(cmd);
1755 * Check for special case during comp_err where
1756 * WRITE_PENDING has been handed off from core,
1757 * but requires an extra target_put_sess_cmd()
1758 * before transport_generic_free_cmd() below.
1761 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1762 struct se_cmd *se_cmd = &cmd->se_cmd;
1764 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1768 device->unreg_rdma_mem(isert_cmd, isert_conn);
1769 transport_generic_free_cmd(&cmd->se_cmd, 0);
1771 case ISCSI_OP_SCSI_TMFUNC:
1772 spin_lock_bh(&conn->cmd_lock);
1773 if (!list_empty(&cmd->i_conn_node))
1774 list_del_init(&cmd->i_conn_node);
1775 spin_unlock_bh(&conn->cmd_lock);
1777 transport_generic_free_cmd(&cmd->se_cmd, 0);
1779 case ISCSI_OP_REJECT:
1780 case ISCSI_OP_NOOP_OUT:
1782 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1783 /* If the continue bit is on, keep the command alive */
1784 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1787 spin_lock_bh(&conn->cmd_lock);
1788 if (!list_empty(&cmd->i_conn_node))
1789 list_del_init(&cmd->i_conn_node);
1790 spin_unlock_bh(&conn->cmd_lock);
1793 * Handle special case for REJECT when iscsi_add_reject*() has
1794 * overwritten the original iscsi_opcode assignment, and the
1795 * associated cmd->se_cmd needs to be released.
1797 if (cmd->se_cmd.se_tfo != NULL) {
1798 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1800 transport_generic_free_cmd(&cmd->se_cmd, 0);
1807 iscsit_release_cmd(cmd);
1813 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1815 if (tx_desc->dma_addr != 0) {
1816 isert_dbg("unmap single for tx_desc->dma_addr\n");
1817 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1818 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1819 tx_desc->dma_addr = 0;
1824 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1825 struct ib_device *ib_dev, bool comp_err)
1827 if (isert_cmd->pdu_buf_dma != 0) {
1828 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1829 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1830 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1831 isert_cmd->pdu_buf_dma = 0;
1834 isert_unmap_tx_desc(tx_desc, ib_dev);
1835 isert_put_cmd(isert_cmd, comp_err);
1839 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1841 struct ib_mr_status mr_status;
1844 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1846 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1847 goto fail_mr_status;
1850 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1852 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1854 switch (mr_status.sig_err.err_type) {
1855 case IB_SIG_BAD_GUARD:
1856 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1858 case IB_SIG_BAD_REFTAG:
1859 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1861 case IB_SIG_BAD_APPTAG:
1862 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1865 sec_offset_err = mr_status.sig_err.sig_err_offset;
1866 do_div(sec_offset_err, block_size);
1867 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1869 isert_err("PI error found type %d at sector 0x%llx "
1870 "expected 0x%x vs actual 0x%x\n",
1871 mr_status.sig_err.err_type,
1872 (unsigned long long)se_cmd->bad_sector,
1873 mr_status.sig_err.expected,
1874 mr_status.sig_err.actual);
1883 isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1884 struct isert_cmd *isert_cmd)
1886 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1887 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1888 struct se_cmd *se_cmd = &cmd->se_cmd;
1889 struct isert_conn *isert_conn = isert_cmd->conn;
1890 struct isert_device *device = isert_conn->device;
1893 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1894 ret = isert_check_pi_status(se_cmd,
1895 wr->fr_desc->pi_ctx->sig_mr);
1896 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1899 device->unreg_rdma_mem(isert_cmd, isert_conn);
1900 wr->send_wr_num = 0;
1902 transport_send_check_condition_and_sense(se_cmd,
1905 isert_put_response(isert_conn->conn, cmd);
1909 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1910 struct isert_cmd *isert_cmd)
1912 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1913 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1914 struct se_cmd *se_cmd = &cmd->se_cmd;
1915 struct isert_conn *isert_conn = isert_cmd->conn;
1916 struct isert_device *device = isert_conn->device;
1919 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1920 ret = isert_check_pi_status(se_cmd,
1921 wr->fr_desc->pi_ctx->sig_mr);
1922 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1925 iscsit_stop_dataout_timer(cmd);
1926 device->unreg_rdma_mem(isert_cmd, isert_conn);
1927 cmd->write_data_done = wr->data.len;
1928 wr->send_wr_num = 0;
1930 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1931 spin_lock_bh(&cmd->istate_lock);
1932 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1933 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1934 spin_unlock_bh(&cmd->istate_lock);
1937 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1938 transport_send_check_condition_and_sense(se_cmd,
1941 target_execute_cmd(se_cmd);
1946 isert_do_control_comp(struct work_struct *work)
1948 struct isert_cmd *isert_cmd = container_of(work,
1949 struct isert_cmd, comp_work);
1950 struct isert_conn *isert_conn = isert_cmd->conn;
1951 struct ib_device *ib_dev = isert_conn->cm_id->device;
1952 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1954 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1956 switch (cmd->i_state) {
1957 case ISTATE_SEND_TASKMGTRSP:
1958 iscsit_tmr_post_handler(cmd, cmd->conn);
1959 case ISTATE_SEND_REJECT: /* FALLTHRU */
1960 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1961 cmd->i_state = ISTATE_SENT_STATUS;
1962 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1965 case ISTATE_SEND_LOGOUTRSP:
1966 iscsit_logout_post_handler(cmd, cmd->conn);
1969 isert_err("Unknown i_state %d\n", cmd->i_state);
1976 isert_response_completion(struct iser_tx_desc *tx_desc,
1977 struct isert_cmd *isert_cmd,
1978 struct isert_conn *isert_conn,
1979 struct ib_device *ib_dev)
1981 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1983 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1984 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1985 cmd->i_state == ISTATE_SEND_REJECT ||
1986 cmd->i_state == ISTATE_SEND_TEXTRSP) {
1987 isert_unmap_tx_desc(tx_desc, ib_dev);
1989 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1990 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1994 cmd->i_state = ISTATE_SENT_STATUS;
1995 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1999 isert_snd_completion(struct iser_tx_desc *tx_desc,
2000 struct isert_conn *isert_conn)
2002 struct ib_device *ib_dev = isert_conn->cm_id->device;
2003 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
2004 struct isert_rdma_wr *wr;
2007 isert_unmap_tx_desc(tx_desc, ib_dev);
2010 wr = &isert_cmd->rdma_wr;
2012 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
2014 switch (wr->iser_ib_op) {
2016 isert_response_completion(tx_desc, isert_cmd,
2017 isert_conn, ib_dev);
2019 case ISER_IB_RDMA_WRITE:
2020 isert_completion_rdma_write(tx_desc, isert_cmd);
2022 case ISER_IB_RDMA_READ:
2023 isert_completion_rdma_read(tx_desc, isert_cmd);
2026 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
2033 * is_isert_tx_desc() - Indicate if the completion wr_id
2034 * is a TX descriptor or not.
2035 * @isert_conn: iser connection
2036 * @wr_id: completion WR identifier
2038 * Since we cannot rely on wc opcode in FLUSH errors
2039 * we must work around it by checking if the wr_id address
2040 * falls in the iser connection rx_descs buffer. If so
2041 * it is an RX descriptor, otherwize it is a TX.
2044 is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
2046 void *start = isert_conn->rx_descs;
2047 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
2049 if (wr_id >= start && wr_id < start + len)
2056 isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
2058 if (wc->wr_id == ISER_BEACON_WRID) {
2059 isert_info("conn %p completing wait_comp_err\n",
2061 complete(&isert_conn->wait_comp_err);
2062 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
2063 struct ib_device *ib_dev = isert_conn->cm_id->device;
2064 struct isert_cmd *isert_cmd;
2065 struct iser_tx_desc *desc;
2067 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2068 isert_cmd = desc->isert_cmd;
2070 isert_unmap_tx_desc(desc, ib_dev);
2072 isert_completion_put(desc, isert_cmd, ib_dev, true);
2074 isert_conn->post_recv_buf_count--;
2075 if (!isert_conn->post_recv_buf_count)
2076 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2081 isert_handle_wc(struct ib_wc *wc)
2083 struct isert_conn *isert_conn;
2084 struct iser_tx_desc *tx_desc;
2085 struct iser_rx_desc *rx_desc;
2087 isert_conn = wc->qp->qp_context;
2088 if (likely(wc->status == IB_WC_SUCCESS)) {
2089 if (wc->opcode == IB_WC_RECV) {
2090 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2091 isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
2093 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2094 isert_snd_completion(tx_desc, isert_conn);
2097 if (wc->status != IB_WC_WR_FLUSH_ERR)
2098 isert_err("%s (%d): wr id %llx vend_err %x\n",
2099 ib_wc_status_msg(wc->status), wc->status,
2100 wc->wr_id, wc->vendor_err);
2102 isert_dbg("%s (%d): wr id %llx\n",
2103 ib_wc_status_msg(wc->status), wc->status,
2106 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2107 isert_cq_comp_err(isert_conn, wc);
2112 isert_cq_work(struct work_struct *work)
2114 enum { isert_poll_budget = 65536 };
2115 struct isert_comp *comp = container_of(work, struct isert_comp,
2117 struct ib_wc *const wcs = comp->wcs;
2118 int i, n, completed = 0;
2120 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2121 for (i = 0; i < n; i++)
2122 isert_handle_wc(&wcs[i]);
2125 if (completed >= isert_poll_budget)
2129 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
2133 isert_cq_callback(struct ib_cq *cq, void *context)
2135 struct isert_comp *comp = context;
2137 queue_work(isert_comp_wq, &comp->work);
2141 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2143 struct ib_send_wr *wr_failed;
2146 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
2149 isert_err("ib_post_send failed with %d\n", ret);
2156 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2158 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2159 struct isert_conn *isert_conn = conn->context;
2160 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2161 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2162 &isert_cmd->tx_desc.iscsi_header;
2164 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2165 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2166 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2168 * Attach SENSE DATA payload to iSCSI Response PDU
2170 if (cmd->se_cmd.sense_buffer &&
2171 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2172 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2173 struct isert_device *device = isert_conn->device;
2174 struct ib_device *ib_dev = device->ib_device;
2175 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2176 u32 padding, pdu_len;
2178 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2180 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2182 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2183 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
2184 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
2186 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2187 (void *)cmd->sense_buffer, pdu_len,
2190 isert_cmd->pdu_buf_len = pdu_len;
2191 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2192 tx_dsg->length = pdu_len;
2193 tx_dsg->lkey = device->mr->lkey;
2194 isert_cmd->tx_desc.num_sge = 2;
2197 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2199 isert_dbg("Posting SCSI Response\n");
2201 return isert_post_response(isert_conn, isert_cmd);
2205 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2207 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2208 struct isert_conn *isert_conn = conn->context;
2209 struct isert_device *device = isert_conn->device;
2211 spin_lock_bh(&conn->cmd_lock);
2212 if (!list_empty(&cmd->i_conn_node))
2213 list_del_init(&cmd->i_conn_node);
2214 spin_unlock_bh(&conn->cmd_lock);
2216 if (cmd->data_direction == DMA_TO_DEVICE)
2217 iscsit_stop_dataout_timer(cmd);
2219 device->unreg_rdma_mem(isert_cmd, isert_conn);
2222 static enum target_prot_op
2223 isert_get_sup_prot_ops(struct iscsi_conn *conn)
2225 struct isert_conn *isert_conn = conn->context;
2226 struct isert_device *device = isert_conn->device;
2228 if (conn->tpg->tpg_attrib.t10_pi) {
2229 if (device->pi_capable) {
2230 isert_info("conn %p PI offload enabled\n", isert_conn);
2231 isert_conn->pi_support = true;
2232 return TARGET_PROT_ALL;
2236 isert_info("conn %p PI offload disabled\n", isert_conn);
2237 isert_conn->pi_support = false;
2239 return TARGET_PROT_NORMAL;
2243 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2244 bool nopout_response)
2246 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2247 struct isert_conn *isert_conn = conn->context;
2248 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2250 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2251 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2252 &isert_cmd->tx_desc.iscsi_header,
2254 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2255 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2257 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
2259 return isert_post_response(isert_conn, isert_cmd);
2263 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2265 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2266 struct isert_conn *isert_conn = conn->context;
2267 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2269 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2270 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2271 &isert_cmd->tx_desc.iscsi_header);
2272 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2273 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2275 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
2277 return isert_post_response(isert_conn, isert_cmd);
2281 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2283 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2284 struct isert_conn *isert_conn = conn->context;
2285 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2287 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2288 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2289 &isert_cmd->tx_desc.iscsi_header);
2290 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2291 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2293 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
2295 return isert_post_response(isert_conn, isert_cmd);
2299 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2301 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2302 struct isert_conn *isert_conn = conn->context;
2303 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2304 struct isert_device *device = isert_conn->device;
2305 struct ib_device *ib_dev = device->ib_device;
2306 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2307 struct iscsi_reject *hdr =
2308 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
2310 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2311 iscsit_build_reject(cmd, conn, hdr);
2312 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2314 hton24(hdr->dlength, ISCSI_HDR_LEN);
2315 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2316 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2318 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2319 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2320 tx_dsg->length = ISCSI_HDR_LEN;
2321 tx_dsg->lkey = device->mr->lkey;
2322 isert_cmd->tx_desc.num_sge = 2;
2324 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2326 isert_dbg("conn %p Posting Reject\n", isert_conn);
2328 return isert_post_response(isert_conn, isert_cmd);
2332 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2334 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2335 struct isert_conn *isert_conn = conn->context;
2336 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2337 struct iscsi_text_rsp *hdr =
2338 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2342 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2343 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2348 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2351 struct isert_device *device = isert_conn->device;
2352 struct ib_device *ib_dev = device->ib_device;
2353 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2354 void *txt_rsp_buf = cmd->buf_ptr;
2356 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2357 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2359 isert_cmd->pdu_buf_len = txt_rsp_len;
2360 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2361 tx_dsg->length = txt_rsp_len;
2362 tx_dsg->lkey = device->mr->lkey;
2363 isert_cmd->tx_desc.num_sge = 2;
2365 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2367 isert_dbg("conn %p Text Response\n", isert_conn);
2369 return isert_post_response(isert_conn, isert_cmd);
2373 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2374 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2375 u32 data_left, u32 offset)
2377 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2378 struct scatterlist *sg_start, *tmp_sg;
2379 struct isert_device *device = isert_conn->device;
2380 struct ib_device *ib_dev = device->ib_device;
2381 u32 sg_off, page_off;
2382 int i = 0, sg_nents;
2384 sg_off = offset / PAGE_SIZE;
2385 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2386 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2387 page_off = offset % PAGE_SIZE;
2389 send_wr->sg_list = ib_sge;
2390 send_wr->num_sge = sg_nents;
2391 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2393 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2395 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2396 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2398 (unsigned long long)tmp_sg->dma_address,
2399 tmp_sg->length, page_off);
2401 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2402 ib_sge->length = min_t(u32, data_left,
2403 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2404 ib_sge->lkey = device->mr->lkey;
2406 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2407 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2409 data_left -= ib_sge->length;
2411 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2414 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2415 send_wr->sg_list, send_wr->num_sge);
2421 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2422 struct isert_rdma_wr *wr)
2424 struct se_cmd *se_cmd = &cmd->se_cmd;
2425 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2426 struct isert_conn *isert_conn = conn->context;
2427 struct isert_data_buf *data = &wr->data;
2428 struct ib_send_wr *send_wr;
2429 struct ib_sge *ib_sge;
2430 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2431 int ret = 0, i, ib_sge_cnt;
2433 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2435 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2436 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2437 se_cmd->t_data_nents, se_cmd->data_length,
2438 offset, wr->iser_ib_op, &wr->data);
2442 data_left = data->len;
2443 offset = data->offset;
2445 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2447 isert_warn("Unable to allocate ib_sge\n");
2451 wr->ib_sge = ib_sge;
2453 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2454 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2457 isert_dbg("Unable to allocate wr->send_wr\n");
2462 wr->isert_cmd = isert_cmd;
2463 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2465 for (i = 0; i < wr->send_wr_num; i++) {
2466 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2467 data_len = min(data_left, rdma_write_max);
2469 send_wr->send_flags = 0;
2470 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2471 send_wr->opcode = IB_WR_RDMA_WRITE;
2472 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2473 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2474 if (i + 1 == wr->send_wr_num)
2475 send_wr->next = &isert_cmd->tx_desc.send_wr;
2477 send_wr->next = &wr->send_wr[i + 1];
2479 send_wr->opcode = IB_WR_RDMA_READ;
2480 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2481 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2482 if (i + 1 == wr->send_wr_num)
2483 send_wr->send_flags = IB_SEND_SIGNALED;
2485 send_wr->next = &wr->send_wr[i + 1];
2488 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2489 send_wr, data_len, offset);
2490 ib_sge += ib_sge_cnt;
2493 va_offset += data_len;
2494 data_left -= data_len;
2499 isert_unmap_data_buf(isert_conn, data);
2505 isert_map_fr_pagelist(struct ib_device *ib_dev,
2506 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2508 u64 start_addr, end_addr, page, chunk_start = 0;
2509 struct scatterlist *tmp_sg;
2510 int i = 0, new_chunk, last_ent, n_pages;
2514 last_ent = sg_nents - 1;
2515 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2516 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2518 chunk_start = start_addr;
2519 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2521 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2522 i, (unsigned long long)tmp_sg->dma_address,
2525 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2531 page = chunk_start & PAGE_MASK;
2533 fr_pl[n_pages++] = page;
2534 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2537 } while (page < end_addr);
2544 isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2548 memset(inv_wr, 0, sizeof(*inv_wr));
2549 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2550 inv_wr->opcode = IB_WR_LOCAL_INV;
2551 inv_wr->ex.invalidate_rkey = mr->rkey;
2554 rkey = ib_inc_rkey(mr->rkey);
2555 ib_update_fast_reg_key(mr, rkey);
2559 isert_fast_reg_mr(struct isert_conn *isert_conn,
2560 struct fast_reg_descriptor *fr_desc,
2561 struct isert_data_buf *mem,
2562 enum isert_indicator ind,
2565 struct isert_device *device = isert_conn->device;
2566 struct ib_device *ib_dev = device->ib_device;
2568 struct ib_fast_reg_page_list *frpl;
2569 struct ib_send_wr fr_wr, inv_wr;
2570 struct ib_send_wr *bad_wr, *wr = NULL;
2571 int ret, pagelist_len;
2574 if (mem->dma_nents == 1) {
2575 sge->lkey = device->mr->lkey;
2576 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2577 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2578 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2579 sge->addr, sge->length, sge->lkey);
2583 if (ind == ISERT_DATA_KEY_VALID) {
2584 /* Registering data buffer */
2585 mr = fr_desc->data_mr;
2586 frpl = fr_desc->data_frpl;
2588 /* Registering protection buffer */
2589 mr = fr_desc->pi_ctx->prot_mr;
2590 frpl = fr_desc->pi_ctx->prot_frpl;
2593 page_off = mem->offset % PAGE_SIZE;
2595 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2596 fr_desc, mem->nents, mem->offset);
2598 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2599 &frpl->page_list[0]);
2601 if (!(fr_desc->ind & ind)) {
2602 isert_inv_rkey(&inv_wr, mr);
2606 /* Prepare FASTREG WR */
2607 memset(&fr_wr, 0, sizeof(fr_wr));
2608 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2609 fr_wr.opcode = IB_WR_FAST_REG_MR;
2610 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2611 fr_wr.wr.fast_reg.page_list = frpl;
2612 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2613 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2614 fr_wr.wr.fast_reg.length = mem->len;
2615 fr_wr.wr.fast_reg.rkey = mr->rkey;
2616 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2623 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2625 isert_err("fast registration failed, ret:%d\n", ret);
2628 fr_desc->ind &= ~ind;
2630 sge->lkey = mr->lkey;
2631 sge->addr = frpl->page_list[0] + page_off;
2632 sge->length = mem->len;
2634 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2635 sge->addr, sge->length, sge->lkey);
2641 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2642 struct ib_sig_domain *domain)
2644 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2645 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2646 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2647 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2649 * At the moment we hard code those, but if in the future
2650 * the target core would like to use it, we will take it
2653 domain->sig.dif.apptag_check_mask = 0xffff;
2654 domain->sig.dif.app_escape = true;
2655 domain->sig.dif.ref_escape = true;
2656 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2657 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2658 domain->sig.dif.ref_remap = true;
2662 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2664 switch (se_cmd->prot_op) {
2665 case TARGET_PROT_DIN_INSERT:
2666 case TARGET_PROT_DOUT_STRIP:
2667 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2668 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2670 case TARGET_PROT_DOUT_INSERT:
2671 case TARGET_PROT_DIN_STRIP:
2672 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2673 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2675 case TARGET_PROT_DIN_PASS:
2676 case TARGET_PROT_DOUT_PASS:
2677 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2678 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2681 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2689 isert_set_prot_checks(u8 prot_checks)
2691 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2692 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2693 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2697 isert_reg_sig_mr(struct isert_conn *isert_conn,
2698 struct se_cmd *se_cmd,
2699 struct isert_rdma_wr *rdma_wr,
2700 struct fast_reg_descriptor *fr_desc)
2702 struct ib_send_wr sig_wr, inv_wr;
2703 struct ib_send_wr *bad_wr, *wr = NULL;
2704 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2705 struct ib_sig_attrs sig_attrs;
2708 memset(&sig_attrs, 0, sizeof(sig_attrs));
2709 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2713 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2715 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2716 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
2720 memset(&sig_wr, 0, sizeof(sig_wr));
2721 sig_wr.opcode = IB_WR_REG_SIG_MR;
2722 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2723 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
2725 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2726 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2727 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2728 if (se_cmd->t_prot_sg)
2729 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
2736 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2738 isert_err("fast registration failed, ret:%d\n", ret);
2741 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2743 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2744 rdma_wr->ib_sg[SIG].addr = 0;
2745 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2746 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2747 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2749 * We have protection guards on the wire
2750 * so we need to set a larget transfer
2752 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2754 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2755 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2756 rdma_wr->ib_sg[SIG].lkey);
2762 isert_handle_prot_cmd(struct isert_conn *isert_conn,
2763 struct isert_cmd *isert_cmd,
2764 struct isert_rdma_wr *wr)
2766 struct isert_device *device = isert_conn->device;
2767 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2770 if (!wr->fr_desc->pi_ctx) {
2771 ret = isert_create_pi_ctx(wr->fr_desc,
2775 isert_err("conn %p failed to allocate pi_ctx\n",
2781 if (se_cmd->t_prot_sg) {
2782 ret = isert_map_data_buf(isert_conn, isert_cmd,
2784 se_cmd->t_prot_nents,
2785 se_cmd->prot_length,
2786 0, wr->iser_ib_op, &wr->prot);
2788 isert_err("conn %p failed to map protection buffer\n",
2793 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2794 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2795 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2797 isert_err("conn %p failed to fast reg mr\n",
2799 goto unmap_prot_cmd;
2803 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2805 isert_err("conn %p failed to fast reg mr\n",
2807 goto unmap_prot_cmd;
2809 wr->fr_desc->ind |= ISERT_PROTECTED;
2814 if (se_cmd->t_prot_sg)
2815 isert_unmap_data_buf(isert_conn, &wr->prot);
2821 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2822 struct isert_rdma_wr *wr)
2824 struct se_cmd *se_cmd = &cmd->se_cmd;
2825 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2826 struct isert_conn *isert_conn = conn->context;
2827 struct fast_reg_descriptor *fr_desc = NULL;
2828 struct ib_send_wr *send_wr;
2829 struct ib_sge *ib_sg;
2832 unsigned long flags;
2834 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2836 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2837 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2838 se_cmd->t_data_nents, se_cmd->data_length,
2839 offset, wr->iser_ib_op, &wr->data);
2843 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2844 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2845 fr_desc = list_first_entry(&isert_conn->fr_pool,
2846 struct fast_reg_descriptor, list);
2847 list_del(&fr_desc->list);
2848 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2849 wr->fr_desc = fr_desc;
2852 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2853 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2857 if (isert_prot_cmd(isert_conn, se_cmd)) {
2858 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2862 ib_sg = &wr->ib_sg[SIG];
2864 ib_sg = &wr->ib_sg[DATA];
2867 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2868 wr->ib_sge = &wr->s_ib_sge;
2869 wr->send_wr_num = 1;
2870 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2871 wr->send_wr = &wr->s_send_wr;
2872 wr->isert_cmd = isert_cmd;
2874 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2875 send_wr->sg_list = &wr->s_ib_sge;
2876 send_wr->num_sge = 1;
2877 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2878 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2879 send_wr->opcode = IB_WR_RDMA_WRITE;
2880 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2881 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2882 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2883 0 : IB_SEND_SIGNALED;
2885 send_wr->opcode = IB_WR_RDMA_READ;
2886 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2887 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2888 send_wr->send_flags = IB_SEND_SIGNALED;
2895 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2896 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
2897 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2899 isert_unmap_data_buf(isert_conn, &wr->data);
2905 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2907 struct se_cmd *se_cmd = &cmd->se_cmd;
2908 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2909 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2910 struct isert_conn *isert_conn = conn->context;
2911 struct isert_device *device = isert_conn->device;
2912 struct ib_send_wr *wr_failed;
2915 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2916 isert_cmd, se_cmd->data_length);
2918 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2919 rc = device->reg_rdma_mem(conn, cmd, wr);
2921 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2925 if (!isert_prot_cmd(isert_conn, se_cmd)) {
2927 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2929 isert_create_send_desc(isert_conn, isert_cmd,
2930 &isert_cmd->tx_desc);
2931 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2932 &isert_cmd->tx_desc.iscsi_header);
2933 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2934 isert_init_send_wr(isert_conn, isert_cmd,
2935 &isert_cmd->tx_desc.send_wr);
2936 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2937 wr->send_wr_num += 1;
2940 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
2942 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2944 if (!isert_prot_cmd(isert_conn, se_cmd))
2945 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2946 "READ\n", isert_cmd);
2948 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2955 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2957 struct se_cmd *se_cmd = &cmd->se_cmd;
2958 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2959 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2960 struct isert_conn *isert_conn = conn->context;
2961 struct isert_device *device = isert_conn->device;
2962 struct ib_send_wr *wr_failed;
2965 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2966 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2967 wr->iser_ib_op = ISER_IB_RDMA_READ;
2968 rc = device->reg_rdma_mem(conn, cmd, wr);
2970 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2974 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
2976 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2978 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2985 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2990 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2991 ret = isert_put_nopin(cmd, conn, false);
2994 isert_err("Unknown immediate state: 0x%02x\n", state);
3003 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3005 struct isert_conn *isert_conn = conn->context;
3009 case ISTATE_SEND_LOGOUTRSP:
3010 ret = isert_put_logout_rsp(cmd, conn);
3012 isert_conn->logout_posted = true;
3014 case ISTATE_SEND_NOPIN:
3015 ret = isert_put_nopin(cmd, conn, true);
3017 case ISTATE_SEND_TASKMGTRSP:
3018 ret = isert_put_tm_rsp(cmd, conn);
3020 case ISTATE_SEND_REJECT:
3021 ret = isert_put_reject(cmd, conn);
3023 case ISTATE_SEND_TEXTRSP:
3024 ret = isert_put_text_rsp(cmd, conn);
3026 case ISTATE_SEND_STATUS:
3028 * Special case for sending non GOOD SCSI status from TX thread
3029 * context during pre se_cmd excecution failure.
3031 ret = isert_put_response(conn, cmd);
3034 isert_err("Unknown response state: 0x%02x\n", state);
3043 isert_setup_id(struct isert_np *isert_np)
3045 struct iscsi_np *np = isert_np->np;
3046 struct rdma_cm_id *id;
3047 struct sockaddr *sa;
3050 sa = (struct sockaddr *)&np->np_sockaddr;
3051 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
3053 id = rdma_create_id(isert_cma_handler, isert_np,
3054 RDMA_PS_TCP, IB_QPT_RC);
3056 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
3060 isert_dbg("id %p context %p\n", id, id->context);
3062 ret = rdma_bind_addr(id, sa);
3064 isert_err("rdma_bind_addr() failed: %d\n", ret);
3068 ret = rdma_listen(id, 0);
3070 isert_err("rdma_listen() failed: %d\n", ret);
3076 rdma_destroy_id(id);
3078 return ERR_PTR(ret);
3082 isert_setup_np(struct iscsi_np *np,
3083 struct __kernel_sockaddr_storage *ksockaddr)
3085 struct isert_np *isert_np;
3086 struct rdma_cm_id *isert_lid;
3089 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3091 isert_err("Unable to allocate struct isert_np\n");
3094 sema_init(&isert_np->np_sem, 0);
3095 mutex_init(&isert_np->np_accept_mutex);
3096 INIT_LIST_HEAD(&isert_np->np_accept_list);
3097 init_completion(&isert_np->np_login_comp);
3101 * Setup the np->np_sockaddr from the passed sockaddr setup
3102 * in iscsi_target_configfs.c code..
3104 memcpy(&np->np_sockaddr, ksockaddr,
3105 sizeof(struct __kernel_sockaddr_storage));
3107 isert_lid = isert_setup_id(isert_np);
3108 if (IS_ERR(isert_lid)) {
3109 ret = PTR_ERR(isert_lid);
3113 isert_np->np_cm_id = isert_lid;
3114 np->np_context = isert_np;
3125 isert_rdma_accept(struct isert_conn *isert_conn)
3127 struct rdma_cm_id *cm_id = isert_conn->cm_id;
3128 struct rdma_conn_param cp;
3131 memset(&cp, 0, sizeof(struct rdma_conn_param));
3132 cp.initiator_depth = isert_conn->initiator_depth;
3134 cp.rnr_retry_count = 7;
3136 ret = rdma_accept(cm_id, &cp);
3138 isert_err("rdma_accept() failed with: %d\n", ret);
3146 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3148 struct isert_conn *isert_conn = conn->context;
3151 isert_info("before login_req comp conn: %p\n", isert_conn);
3152 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3154 isert_err("isert_conn %p interrupted before got login req\n",
3158 reinit_completion(&isert_conn->login_req_comp);
3161 * For login requests after the first PDU, isert_rx_login_req() will
3162 * kick schedule_delayed_work(&conn->login_work) as the packet is
3163 * received, which turns this callback from iscsi_target_do_login_rx()
3166 if (!login->first_request)
3169 isert_rx_login_req(isert_conn);
3171 isert_info("before login_comp conn: %p\n", conn);
3172 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
3176 isert_info("processing login->req: %p\n", login->req);
3182 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3183 struct isert_conn *isert_conn)
3185 struct rdma_cm_id *cm_id = isert_conn->cm_id;
3186 struct rdma_route *cm_route = &cm_id->route;
3187 struct sockaddr_in *sock_in;
3188 struct sockaddr_in6 *sock_in6;
3190 conn->login_family = np->np_sockaddr.ss_family;
3192 if (np->np_sockaddr.ss_family == AF_INET6) {
3193 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3194 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3195 &sock_in6->sin6_addr.in6_u);
3196 conn->login_port = ntohs(sock_in6->sin6_port);
3198 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3199 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3200 &sock_in6->sin6_addr.in6_u);
3201 conn->local_port = ntohs(sock_in6->sin6_port);
3203 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3204 sprintf(conn->login_ip, "%pI4",
3205 &sock_in->sin_addr.s_addr);
3206 conn->login_port = ntohs(sock_in->sin_port);
3208 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3209 sprintf(conn->local_ip, "%pI4",
3210 &sock_in->sin_addr.s_addr);
3211 conn->local_port = ntohs(sock_in->sin_port);
3216 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3218 struct isert_np *isert_np = np->np_context;
3219 struct isert_conn *isert_conn;
3223 ret = down_interruptible(&isert_np->np_sem);
3227 spin_lock_bh(&np->np_thread_lock);
3228 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3229 spin_unlock_bh(&np->np_thread_lock);
3230 isert_dbg("np_thread_state %d\n",
3231 np->np_thread_state);
3233 * No point in stalling here when np_thread
3234 * is in state RESET/SHUTDOWN/EXIT - bail
3238 spin_unlock_bh(&np->np_thread_lock);
3240 mutex_lock(&isert_np->np_accept_mutex);
3241 if (list_empty(&isert_np->np_accept_list)) {
3242 mutex_unlock(&isert_np->np_accept_mutex);
3245 isert_conn = list_first_entry(&isert_np->np_accept_list,
3246 struct isert_conn, accept_node);
3247 list_del_init(&isert_conn->accept_node);
3248 mutex_unlock(&isert_np->np_accept_mutex);
3250 conn->context = isert_conn;
3251 isert_conn->conn = conn;
3253 isert_set_conn_info(np, conn, isert_conn);
3255 isert_dbg("Processing isert_conn: %p\n", isert_conn);
3261 isert_free_np(struct iscsi_np *np)
3263 struct isert_np *isert_np = np->np_context;
3264 struct isert_conn *isert_conn, *n;
3266 if (isert_np->np_cm_id)
3267 rdma_destroy_id(isert_np->np_cm_id);
3270 * FIXME: At this point we don't have a good way to insure
3271 * that at this point we don't have hanging connections that
3272 * completed RDMA establishment but didn't start iscsi login
3273 * process. So work-around this by cleaning up what ever piled
3274 * up in np_accept_list.
3276 mutex_lock(&isert_np->np_accept_mutex);
3277 if (!list_empty(&isert_np->np_accept_list)) {
3278 isert_info("Still have isert connections, cleaning up...\n");
3279 list_for_each_entry_safe(isert_conn, n,
3280 &isert_np->np_accept_list,
3282 isert_info("cleaning isert_conn %p state (%d)\n",
3283 isert_conn, isert_conn->state);
3284 isert_connect_release(isert_conn);
3287 mutex_unlock(&isert_np->np_accept_mutex);
3289 np->np_context = NULL;
3293 static void isert_release_work(struct work_struct *work)
3295 struct isert_conn *isert_conn = container_of(work,
3299 isert_info("Starting release conn %p\n", isert_conn);
3301 wait_for_completion(&isert_conn->wait);
3303 mutex_lock(&isert_conn->mutex);
3304 isert_conn->state = ISER_CONN_DOWN;
3305 mutex_unlock(&isert_conn->mutex);
3307 isert_info("Destroying conn %p\n", isert_conn);
3308 isert_put_conn(isert_conn);
3312 isert_wait4logout(struct isert_conn *isert_conn)
3314 struct iscsi_conn *conn = isert_conn->conn;
3316 isert_info("conn %p\n", isert_conn);
3318 if (isert_conn->logout_posted) {
3319 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
3320 wait_for_completion_timeout(&conn->conn_logout_comp,
3321 SECONDS_FOR_LOGOUT_COMP * HZ);
3326 isert_wait4cmds(struct iscsi_conn *conn)
3328 isert_info("iscsi_conn %p\n", conn);
3331 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3332 target_wait_for_sess_cmds(conn->sess->se_sess);
3337 isert_wait4flush(struct isert_conn *isert_conn)
3339 struct ib_recv_wr *bad_wr;
3341 isert_info("conn %p\n", isert_conn);
3343 init_completion(&isert_conn->wait_comp_err);
3344 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3345 /* post an indication that all flush errors were consumed */
3346 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
3347 isert_err("conn %p failed to post beacon", isert_conn);
3351 wait_for_completion(&isert_conn->wait_comp_err);
3354 static void isert_wait_conn(struct iscsi_conn *conn)
3356 struct isert_conn *isert_conn = conn->context;
3358 isert_info("Starting conn %p\n", isert_conn);
3360 mutex_lock(&isert_conn->mutex);
3362 * Only wait for wait_comp_err if the isert_conn made it
3363 * into full feature phase..
3365 if (isert_conn->state == ISER_CONN_INIT) {
3366 mutex_unlock(&isert_conn->mutex);
3369 isert_conn_terminate(isert_conn);
3370 mutex_unlock(&isert_conn->mutex);
3372 isert_wait4cmds(conn);
3373 isert_wait4flush(isert_conn);
3374 isert_wait4logout(isert_conn);
3376 INIT_WORK(&isert_conn->release_work, isert_release_work);
3377 queue_work(isert_release_wq, &isert_conn->release_work);
3380 static void isert_free_conn(struct iscsi_conn *conn)
3382 struct isert_conn *isert_conn = conn->context;
3384 isert_put_conn(isert_conn);
3387 static struct iscsit_transport iser_target_transport = {
3389 .transport_type = ISCSI_INFINIBAND,
3390 .priv_size = sizeof(struct isert_cmd),
3391 .owner = THIS_MODULE,
3392 .iscsit_setup_np = isert_setup_np,
3393 .iscsit_accept_np = isert_accept_np,
3394 .iscsit_free_np = isert_free_np,
3395 .iscsit_wait_conn = isert_wait_conn,
3396 .iscsit_free_conn = isert_free_conn,
3397 .iscsit_get_login_rx = isert_get_login_rx,
3398 .iscsit_put_login_tx = isert_put_login_tx,
3399 .iscsit_immediate_queue = isert_immediate_queue,
3400 .iscsit_response_queue = isert_response_queue,
3401 .iscsit_get_dataout = isert_get_dataout,
3402 .iscsit_queue_data_in = isert_put_datain,
3403 .iscsit_queue_status = isert_put_response,
3404 .iscsit_aborted_task = isert_aborted_task,
3405 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
3408 static int __init isert_init(void)
3412 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3413 WQ_UNBOUND | WQ_HIGHPRI, 0);
3414 if (!isert_comp_wq) {
3415 isert_err("Unable to allocate isert_comp_wq\n");
3420 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3421 WQ_UNBOUND_MAX_ACTIVE);
3422 if (!isert_release_wq) {
3423 isert_err("Unable to allocate isert_release_wq\n");
3425 goto destroy_comp_wq;
3428 iscsit_register_transport(&iser_target_transport);
3429 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3434 destroy_workqueue(isert_comp_wq);
3439 static void __exit isert_exit(void)
3441 flush_scheduled_work();
3442 destroy_workqueue(isert_release_wq);
3443 destroy_workqueue(isert_comp_wq);
3444 iscsit_unregister_transport(&iser_target_transport);
3445 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3448 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3449 MODULE_VERSION("1.0");
3450 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3451 MODULE_LICENSE("GPL");
3453 module_init(isert_init);
3454 module_exit(isert_exit);