2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
39 #include "iscsi_iser.h"
41 #define ISCSI_ISER_MAX_CONN 8
42 #define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
43 #define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
44 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
47 static int iser_cq_poll_limit = 512;
49 static void iser_cq_tasklet_fn(unsigned long data);
50 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
52 static void iser_cq_event_callback(struct ib_event *cause, void *context)
54 iser_err("cq event %s (%d)\n",
55 ib_event_msg(cause->event), cause->event);
58 static void iser_qp_event_callback(struct ib_event *cause, void *context)
60 iser_err("qp event %s (%d)\n",
61 ib_event_msg(cause->event), cause->event);
64 static void iser_event_handler(struct ib_event_handler *handler,
65 struct ib_event *event)
67 iser_err("async event %s (%d) on device %s port %d\n",
68 ib_event_msg(event->event), event->event,
69 event->device->name, event->element.port_num);
73 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
74 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
77 * returns 0 on success, -1 on failure
79 static int iser_create_device_ib_res(struct iser_device *device)
81 struct ib_device_attr *dev_attr = &device->dev_attr;
84 ret = ib_query_device(device->ib_device, dev_attr);
86 pr_warn("Query device failed for %s\n", device->ib_device->name);
90 /* Assign function handles - based on FMR support */
91 if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
92 device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
93 iser_info("FMR supported, using FMR for registration\n");
94 device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
95 device->iser_free_rdma_reg_res = iser_free_fmr_pool;
96 device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
97 device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
99 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
100 iser_info("FastReg supported, using FastReg for registration\n");
101 device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool;
102 device->iser_free_rdma_reg_res = iser_free_fastreg_pool;
103 device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg;
104 device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg;
106 iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
110 device->comps_used = min_t(int, num_online_cpus(),
111 device->ib_device->num_comp_vectors);
113 device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
118 max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
120 iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
121 device->comps_used, device->ib_device->name,
122 device->ib_device->num_comp_vectors, max_cqe);
124 device->pd = ib_alloc_pd(device->ib_device);
125 if (IS_ERR(device->pd))
128 for (i = 0; i < device->comps_used; i++) {
129 struct iser_comp *comp = &device->comps[i];
131 comp->device = device;
132 comp->cq = ib_create_cq(device->ib_device,
134 iser_cq_event_callback,
137 if (IS_ERR(comp->cq)) {
142 if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP))
145 tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
146 (unsigned long)comp);
149 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
150 IB_ACCESS_REMOTE_WRITE |
151 IB_ACCESS_REMOTE_READ);
152 if (IS_ERR(device->mr))
155 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
157 if (ib_register_event_handler(&device->event_handler))
163 ib_dereg_mr(device->mr);
165 for (i = 0; i < device->comps_used; i++)
166 tasklet_kill(&device->comps[i].tasklet);
168 for (i = 0; i < device->comps_used; i++) {
169 struct iser_comp *comp = &device->comps[i];
172 ib_destroy_cq(comp->cq);
174 ib_dealloc_pd(device->pd);
176 kfree(device->comps);
178 iser_err("failed to allocate an IB resource\n");
183 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
184 * CQ and PD created with the device associated with the adapator.
186 static void iser_free_device_ib_res(struct iser_device *device)
189 BUG_ON(device->mr == NULL);
191 for (i = 0; i < device->comps_used; i++) {
192 struct iser_comp *comp = &device->comps[i];
194 tasklet_kill(&comp->tasklet);
195 ib_destroy_cq(comp->cq);
199 (void)ib_unregister_event_handler(&device->event_handler);
200 (void)ib_dereg_mr(device->mr);
201 (void)ib_dealloc_pd(device->pd);
203 kfree(device->comps);
204 device->comps = NULL;
211 * iser_create_fmr_pool - Creates FMR pool and page_vector
213 * returns 0 on success, or errno code on failure
215 int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max)
217 struct iser_device *device = ib_conn->device;
218 struct ib_fmr_pool_param params;
221 ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) +
222 (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
224 if (!ib_conn->fmr.page_vec)
227 ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1);
229 params.page_shift = SHIFT_4K;
230 /* when the first/last SG element are not start/end *
231 * page aligned, the map whould be of N+1 pages */
232 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
233 /* make the pool size twice the max number of SCSI commands *
234 * the ML is expected to queue, watermark for unmap at 50% */
235 params.pool_size = cmds_max * 2;
236 params.dirty_watermark = cmds_max;
238 params.flush_function = NULL;
239 params.access = (IB_ACCESS_LOCAL_WRITE |
240 IB_ACCESS_REMOTE_WRITE |
241 IB_ACCESS_REMOTE_READ);
243 ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, ¶ms);
244 if (!IS_ERR(ib_conn->fmr.pool))
247 /* no FMR => no need for page_vec */
248 kfree(ib_conn->fmr.page_vec);
249 ib_conn->fmr.page_vec = NULL;
251 ret = PTR_ERR(ib_conn->fmr.pool);
252 ib_conn->fmr.pool = NULL;
253 if (ret != -ENOSYS) {
254 iser_err("FMR allocation failed, err %d\n", ret);
257 iser_warn("FMRs are not supported, using unaligned mode\n");
263 * iser_free_fmr_pool - releases the FMR pool and page vec
265 void iser_free_fmr_pool(struct ib_conn *ib_conn)
267 iser_info("freeing conn %p fmr pool %p\n",
268 ib_conn, ib_conn->fmr.pool);
270 if (ib_conn->fmr.pool != NULL)
271 ib_destroy_fmr_pool(ib_conn->fmr.pool);
273 ib_conn->fmr.pool = NULL;
275 kfree(ib_conn->fmr.page_vec);
276 ib_conn->fmr.page_vec = NULL;
280 iser_alloc_pi_ctx(struct ib_device *ib_device, struct ib_pd *pd,
281 struct fast_reg_descriptor *desc)
283 struct iser_pi_context *pi_ctx = NULL;
284 struct ib_mr_init_attr mr_init_attr = {.max_reg_descriptors = 2,
285 .flags = IB_MR_SIGNATURE_EN};
288 desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
292 pi_ctx = desc->pi_ctx;
294 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
295 ISCSI_ISER_SG_TABLESIZE);
296 if (IS_ERR(pi_ctx->prot_frpl)) {
297 ret = PTR_ERR(pi_ctx->prot_frpl);
298 goto prot_frpl_failure;
301 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd,
302 ISCSI_ISER_SG_TABLESIZE + 1);
303 if (IS_ERR(pi_ctx->prot_mr)) {
304 ret = PTR_ERR(pi_ctx->prot_mr);
305 goto prot_mr_failure;
307 desc->reg_indicators |= ISER_PROT_KEY_VALID;
309 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
310 if (IS_ERR(pi_ctx->sig_mr)) {
311 ret = PTR_ERR(pi_ctx->sig_mr);
314 desc->reg_indicators |= ISER_SIG_KEY_VALID;
315 desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
320 ib_dereg_mr(desc->pi_ctx->prot_mr);
322 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
330 iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
332 ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
333 ib_dereg_mr(pi_ctx->prot_mr);
334 ib_destroy_mr(pi_ctx->sig_mr);
339 iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd,
340 bool pi_enable, struct fast_reg_descriptor *desc)
344 desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
345 ISCSI_ISER_SG_TABLESIZE + 1);
346 if (IS_ERR(desc->data_frpl)) {
347 ret = PTR_ERR(desc->data_frpl);
348 iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n",
350 return PTR_ERR(desc->data_frpl);
353 desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1);
354 if (IS_ERR(desc->data_mr)) {
355 ret = PTR_ERR(desc->data_mr);
356 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
357 goto fast_reg_mr_failure;
359 desc->reg_indicators |= ISER_DATA_KEY_VALID;
362 ret = iser_alloc_pi_ctx(ib_device, pd, desc);
364 goto pi_ctx_alloc_failure;
368 pi_ctx_alloc_failure:
369 ib_dereg_mr(desc->data_mr);
371 ib_free_fast_reg_page_list(desc->data_frpl);
377 * iser_create_fastreg_pool - Creates pool of fast_reg descriptors
378 * for fast registration work requests.
379 * returns 0 on success, or errno code on failure
381 int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max)
383 struct iser_device *device = ib_conn->device;
384 struct fast_reg_descriptor *desc;
387 INIT_LIST_HEAD(&ib_conn->fastreg.pool);
388 ib_conn->fastreg.pool_size = 0;
389 for (i = 0; i < cmds_max; i++) {
390 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
392 iser_err("Failed to allocate a new fast_reg descriptor\n");
397 ret = iser_create_fastreg_desc(device->ib_device, device->pd,
398 ib_conn->pi_support, desc);
400 iser_err("Failed to create fastreg descriptor err=%d\n",
406 list_add_tail(&desc->list, &ib_conn->fastreg.pool);
407 ib_conn->fastreg.pool_size++;
413 iser_free_fastreg_pool(ib_conn);
418 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
420 void iser_free_fastreg_pool(struct ib_conn *ib_conn)
422 struct fast_reg_descriptor *desc, *tmp;
425 if (list_empty(&ib_conn->fastreg.pool))
428 iser_info("freeing conn %p fr pool\n", ib_conn);
430 list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) {
431 list_del(&desc->list);
432 ib_free_fast_reg_page_list(desc->data_frpl);
433 ib_dereg_mr(desc->data_mr);
435 iser_free_pi_ctx(desc->pi_ctx);
440 if (i < ib_conn->fastreg.pool_size)
441 iser_warn("pool still has %d regions registered\n",
442 ib_conn->fastreg.pool_size - i);
446 * iser_create_ib_conn_res - Queue-Pair (QP)
448 * returns 0 on success, -1 on failure
450 static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
452 struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
454 struct iser_device *device;
455 struct ib_device_attr *dev_attr;
456 struct ib_qp_init_attr init_attr;
458 int index, min_index = 0;
460 BUG_ON(ib_conn->device == NULL);
462 device = ib_conn->device;
463 dev_attr = &device->dev_attr;
465 memset(&init_attr, 0, sizeof init_attr);
467 mutex_lock(&ig.connlist_mutex);
468 /* select the CQ with the minimal number of usages */
469 for (index = 0; index < device->comps_used; index++) {
470 if (device->comps[index].active_qps <
471 device->comps[min_index].active_qps)
474 ib_conn->comp = &device->comps[min_index];
475 ib_conn->comp->active_qps++;
476 mutex_unlock(&ig.connlist_mutex);
477 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
479 init_attr.event_handler = iser_qp_event_callback;
480 init_attr.qp_context = (void *)ib_conn;
481 init_attr.send_cq = ib_conn->comp->cq;
482 init_attr.recv_cq = ib_conn->comp->cq;
483 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
484 init_attr.cap.max_send_sge = 2;
485 init_attr.cap.max_recv_sge = 1;
486 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
487 init_attr.qp_type = IB_QPT_RC;
488 if (ib_conn->pi_support) {
489 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
490 init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
491 iser_conn->max_cmds =
492 ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
494 if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
495 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
496 iser_conn->max_cmds =
497 ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
499 init_attr.cap.max_send_wr = dev_attr->max_qp_wr;
500 iser_conn->max_cmds =
501 ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr);
502 iser_dbg("device %s supports max_send_wr %d\n",
503 device->ib_device->name, dev_attr->max_qp_wr);
507 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
511 ib_conn->qp = ib_conn->cma_id->qp;
512 iser_info("setting conn %p cma_id %p qp %p\n",
513 ib_conn, ib_conn->cma_id,
514 ib_conn->cma_id->qp);
518 mutex_lock(&ig.connlist_mutex);
519 ib_conn->comp->active_qps--;
520 mutex_unlock(&ig.connlist_mutex);
521 iser_err("unable to alloc mem or create resource, err %d\n", ret);
527 * based on the resolved device node GUID see if there already allocated
528 * device for this device. If there's no such, create one.
531 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
533 struct iser_device *device;
535 mutex_lock(&ig.device_list_mutex);
537 list_for_each_entry(device, &ig.device_list, ig_list)
538 /* find if there's a match using the node GUID */
539 if (device->ib_device->node_guid == cma_id->device->node_guid)
542 device = kzalloc(sizeof *device, GFP_KERNEL);
546 /* assign this device to the device */
547 device->ib_device = cma_id->device;
548 /* init the device and link it into ig device list */
549 if (iser_create_device_ib_res(device)) {
554 list_add(&device->ig_list, &ig.device_list);
559 mutex_unlock(&ig.device_list_mutex);
563 /* if there's no demand for this device, release it */
564 static void iser_device_try_release(struct iser_device *device)
566 mutex_lock(&ig.device_list_mutex);
568 iser_info("device %p refcount %d\n", device, device->refcount);
569 if (!device->refcount) {
570 iser_free_device_ib_res(device);
571 list_del(&device->ig_list);
574 mutex_unlock(&ig.device_list_mutex);
578 * Called with state mutex held
580 static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
581 enum iser_conn_state comp,
582 enum iser_conn_state exch)
586 ret = (iser_conn->state == comp);
588 iser_conn->state = exch;
593 void iser_release_work(struct work_struct *work)
595 struct iser_conn *iser_conn;
597 iser_conn = container_of(work, struct iser_conn, release_work);
599 /* Wait for conn_stop to complete */
600 wait_for_completion(&iser_conn->stop_completion);
601 /* Wait for IB resouces cleanup to complete */
602 wait_for_completion(&iser_conn->ib_completion);
604 mutex_lock(&iser_conn->state_mutex);
605 iser_conn->state = ISER_CONN_DOWN;
606 mutex_unlock(&iser_conn->state_mutex);
608 iser_conn_release(iser_conn);
612 * iser_free_ib_conn_res - release IB related resources
613 * @iser_conn: iser connection struct
614 * @destroy: indicator if we need to try to release the
615 * iser device and memory regoins pool (only iscsi
616 * shutdown and DEVICE_REMOVAL will use this).
618 * This routine is called with the iser state mutex held
619 * so the cm_id removal is out of here. It is Safe to
620 * be invoked multiple times.
622 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
625 struct ib_conn *ib_conn = &iser_conn->ib_conn;
626 struct iser_device *device = ib_conn->device;
628 iser_info("freeing conn %p cma_id %p qp %p\n",
629 iser_conn, ib_conn->cma_id, ib_conn->qp);
631 if (ib_conn->qp != NULL) {
632 ib_conn->comp->active_qps--;
633 rdma_destroy_qp(ib_conn->cma_id);
638 if (iser_conn->rx_descs)
639 iser_free_rx_descriptors(iser_conn);
641 if (device != NULL) {
642 iser_device_try_release(device);
643 ib_conn->device = NULL;
649 * Frees all conn objects and deallocs conn descriptor
651 void iser_conn_release(struct iser_conn *iser_conn)
653 struct ib_conn *ib_conn = &iser_conn->ib_conn;
655 mutex_lock(&ig.connlist_mutex);
656 list_del(&iser_conn->conn_list);
657 mutex_unlock(&ig.connlist_mutex);
659 mutex_lock(&iser_conn->state_mutex);
660 /* In case we endup here without ep_disconnect being invoked. */
661 if (iser_conn->state != ISER_CONN_DOWN) {
662 iser_warn("iser conn %p state %d, expected state down.\n",
663 iser_conn, iser_conn->state);
664 iscsi_destroy_endpoint(iser_conn->ep);
665 iser_conn->state = ISER_CONN_DOWN;
668 * In case we never got to bind stage, we still need to
669 * release IB resources (which is safe to call more than once).
671 iser_free_ib_conn_res(iser_conn, true);
672 mutex_unlock(&iser_conn->state_mutex);
674 if (ib_conn->cma_id != NULL) {
675 rdma_destroy_id(ib_conn->cma_id);
676 ib_conn->cma_id = NULL;
683 * triggers start of the disconnect procedures and wait for them to be done
684 * Called with state mutex held
686 int iser_conn_terminate(struct iser_conn *iser_conn)
688 struct ib_conn *ib_conn = &iser_conn->ib_conn;
689 struct ib_send_wr *bad_wr;
692 /* terminate the iser conn only if the conn state is UP */
693 if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
694 ISER_CONN_TERMINATING))
697 iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
699 /* suspend queuing of new iscsi commands */
700 if (iser_conn->iscsi_conn)
701 iscsi_suspend_queue(iser_conn->iscsi_conn);
704 * In case we didn't already clean up the cma_id (peer initiated
705 * a disconnection), we need to Cause the CMA to change the QP
708 if (ib_conn->cma_id) {
709 err = rdma_disconnect(ib_conn->cma_id);
711 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
714 /* post an indication that all flush errors were consumed */
715 err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
717 iser_err("conn %p failed to post beacon", ib_conn);
721 wait_for_completion(&ib_conn->flush_comp);
728 * Called with state mutex held
730 static void iser_connect_error(struct rdma_cm_id *cma_id)
732 struct iser_conn *iser_conn;
734 iser_conn = (struct iser_conn *)cma_id->context;
735 iser_conn->state = ISER_CONN_TERMINATING;
739 * Called with state mutex held
741 static void iser_addr_handler(struct rdma_cm_id *cma_id)
743 struct iser_device *device;
744 struct iser_conn *iser_conn;
745 struct ib_conn *ib_conn;
748 iser_conn = (struct iser_conn *)cma_id->context;
749 if (iser_conn->state != ISER_CONN_PENDING)
753 ib_conn = &iser_conn->ib_conn;
754 device = iser_device_find_by_ib_device(cma_id);
756 iser_err("device lookup/creation failed\n");
757 iser_connect_error(cma_id);
761 ib_conn->device = device;
763 /* connection T10-PI support */
764 if (iser_pi_enable) {
765 if (!(device->dev_attr.device_cap_flags &
766 IB_DEVICE_SIGNATURE_HANDOVER)) {
767 iser_warn("T10-PI requested but not supported on %s, "
768 "continue without T10-PI\n",
769 ib_conn->device->ib_device->name);
770 ib_conn->pi_support = false;
772 ib_conn->pi_support = true;
776 ret = rdma_resolve_route(cma_id, 1000);
778 iser_err("resolve route failed: %d\n", ret);
779 iser_connect_error(cma_id);
785 * Called with state mutex held
787 static void iser_route_handler(struct rdma_cm_id *cma_id)
789 struct rdma_conn_param conn_param;
791 struct iser_cm_hdr req_hdr;
792 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
793 struct ib_conn *ib_conn = &iser_conn->ib_conn;
794 struct iser_device *device = ib_conn->device;
796 if (iser_conn->state != ISER_CONN_PENDING)
800 ret = iser_create_ib_conn_res(ib_conn);
804 memset(&conn_param, 0, sizeof conn_param);
805 conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
806 conn_param.initiator_depth = 1;
807 conn_param.retry_count = 7;
808 conn_param.rnr_retry_count = 6;
810 memset(&req_hdr, 0, sizeof(req_hdr));
811 req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
812 ISER_SEND_W_INV_NOT_SUPPORTED);
813 conn_param.private_data = (void *)&req_hdr;
814 conn_param.private_data_len = sizeof(struct iser_cm_hdr);
816 ret = rdma_connect(cma_id, &conn_param);
818 iser_err("failure connecting: %d\n", ret);
824 iser_connect_error(cma_id);
827 static void iser_connected_handler(struct rdma_cm_id *cma_id)
829 struct iser_conn *iser_conn;
830 struct ib_qp_attr attr;
831 struct ib_qp_init_attr init_attr;
833 iser_conn = (struct iser_conn *)cma_id->context;
834 if (iser_conn->state != ISER_CONN_PENDING)
838 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
839 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
841 iser_conn->state = ISER_CONN_UP;
842 complete(&iser_conn->up_completion);
845 static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
847 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
849 if (iser_conn_terminate(iser_conn)) {
850 if (iser_conn->iscsi_conn)
851 iscsi_conn_failure(iser_conn->iscsi_conn,
852 ISCSI_ERR_CONN_FAILED);
854 iser_err("iscsi_iser connection isn't bound\n");
858 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
861 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
864 * We are not guaranteed that we visited disconnected_handler
865 * by now, call it here to be safe that we handle CM drep
868 iser_disconnected_handler(cma_id);
869 iser_free_ib_conn_res(iser_conn, destroy);
870 complete(&iser_conn->ib_completion);
873 static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
875 struct iser_conn *iser_conn;
878 iser_conn = (struct iser_conn *)cma_id->context;
879 iser_info("%s (%d): status %d conn %p id %p\n",
880 rdma_event_msg(event->event), event->event,
881 event->status, cma_id->context, cma_id);
883 mutex_lock(&iser_conn->state_mutex);
884 switch (event->event) {
885 case RDMA_CM_EVENT_ADDR_RESOLVED:
886 iser_addr_handler(cma_id);
888 case RDMA_CM_EVENT_ROUTE_RESOLVED:
889 iser_route_handler(cma_id);
891 case RDMA_CM_EVENT_ESTABLISHED:
892 iser_connected_handler(cma_id);
894 case RDMA_CM_EVENT_ADDR_ERROR:
895 case RDMA_CM_EVENT_ROUTE_ERROR:
896 case RDMA_CM_EVENT_CONNECT_ERROR:
897 case RDMA_CM_EVENT_UNREACHABLE:
898 case RDMA_CM_EVENT_REJECTED:
899 iser_connect_error(cma_id);
901 case RDMA_CM_EVENT_DISCONNECTED:
902 case RDMA_CM_EVENT_ADDR_CHANGE:
903 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
904 iser_cleanup_handler(cma_id, false);
906 case RDMA_CM_EVENT_DEVICE_REMOVAL:
908 * we *must* destroy the device as we cannot rely
909 * on iscsid to be around to initiate error handling.
910 * also if we are not in state DOWN implicitly destroy
913 iser_cleanup_handler(cma_id, true);
914 if (iser_conn->state != ISER_CONN_DOWN) {
915 iser_conn->ib_conn.cma_id = NULL;
920 iser_err("Unexpected RDMA CM event: %s (%d)\n",
921 rdma_event_msg(event->event), event->event);
924 mutex_unlock(&iser_conn->state_mutex);
929 void iser_conn_init(struct iser_conn *iser_conn)
931 iser_conn->state = ISER_CONN_INIT;
932 iser_conn->ib_conn.post_recv_buf_count = 0;
933 init_completion(&iser_conn->ib_conn.flush_comp);
934 init_completion(&iser_conn->stop_completion);
935 init_completion(&iser_conn->ib_completion);
936 init_completion(&iser_conn->up_completion);
937 INIT_LIST_HEAD(&iser_conn->conn_list);
938 spin_lock_init(&iser_conn->ib_conn.lock);
939 mutex_init(&iser_conn->state_mutex);
943 * starts the process of connecting to the target
944 * sleeps until the connection is established or rejected
946 int iser_connect(struct iser_conn *iser_conn,
947 struct sockaddr *src_addr,
948 struct sockaddr *dst_addr,
951 struct ib_conn *ib_conn = &iser_conn->ib_conn;
954 mutex_lock(&iser_conn->state_mutex);
956 sprintf(iser_conn->name, "%pISp", dst_addr);
958 iser_info("connecting to: %s\n", iser_conn->name);
960 /* the device is known only --after-- address resolution */
961 ib_conn->device = NULL;
963 iser_conn->state = ISER_CONN_PENDING;
965 ib_conn->beacon.wr_id = ISER_BEACON_WRID;
966 ib_conn->beacon.opcode = IB_WR_SEND;
968 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
970 RDMA_PS_TCP, IB_QPT_RC);
971 if (IS_ERR(ib_conn->cma_id)) {
972 err = PTR_ERR(ib_conn->cma_id);
973 iser_err("rdma_create_id failed: %d\n", err);
977 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
979 iser_err("rdma_resolve_addr failed: %d\n", err);
984 wait_for_completion_interruptible(&iser_conn->up_completion);
986 if (iser_conn->state != ISER_CONN_UP) {
988 goto connect_failure;
991 mutex_unlock(&iser_conn->state_mutex);
993 mutex_lock(&ig.connlist_mutex);
994 list_add(&iser_conn->conn_list, &ig.connlist);
995 mutex_unlock(&ig.connlist_mutex);
999 ib_conn->cma_id = NULL;
1001 iser_conn->state = ISER_CONN_DOWN;
1003 mutex_unlock(&iser_conn->state_mutex);
1004 iser_conn_release(iser_conn);
1008 int iser_post_recvl(struct iser_conn *iser_conn)
1010 struct ib_recv_wr rx_wr, *rx_wr_failed;
1011 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1015 sge.addr = iser_conn->login_resp_dma;
1016 sge.length = ISER_RX_LOGIN_SIZE;
1017 sge.lkey = ib_conn->device->mr->lkey;
1019 rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf;
1020 rx_wr.sg_list = &sge;
1024 ib_conn->post_recv_buf_count++;
1025 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1027 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1028 ib_conn->post_recv_buf_count--;
1033 int iser_post_recvm(struct iser_conn *iser_conn, int count)
1035 struct ib_recv_wr *rx_wr, *rx_wr_failed;
1037 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1038 unsigned int my_rx_head = iser_conn->rx_desc_head;
1039 struct iser_rx_desc *rx_desc;
1041 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1042 rx_desc = &iser_conn->rx_descs[my_rx_head];
1043 rx_wr->wr_id = (uintptr_t)rx_desc;
1044 rx_wr->sg_list = &rx_desc->rx_sg;
1046 rx_wr->next = rx_wr + 1;
1047 my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1051 rx_wr->next = NULL; /* mark end of work requests list */
1053 ib_conn->post_recv_buf_count += count;
1054 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1056 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1057 ib_conn->post_recv_buf_count -= count;
1059 iser_conn->rx_desc_head = my_rx_head;
1065 * iser_start_send - Initiate a Send DTO operation
1067 * returns 0 on success, -1 on failure
1069 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
1073 struct ib_send_wr send_wr, *send_wr_failed;
1075 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1076 tx_desc->dma_addr, ISER_HEADERS_LEN,
1079 send_wr.next = NULL;
1080 send_wr.wr_id = (uintptr_t)tx_desc;
1081 send_wr.sg_list = tx_desc->tx_sg;
1082 send_wr.num_sge = tx_desc->num_sge;
1083 send_wr.opcode = IB_WR_SEND;
1084 send_wr.send_flags = signal ? IB_SEND_SIGNALED : 0;
1086 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
1088 iser_err("ib_post_send failed, ret:%d\n", ib_ret);
1094 * is_iser_tx_desc - Indicate if the completion wr_id
1095 * is a TX descriptor or not.
1096 * @iser_conn: iser connection
1097 * @wr_id: completion WR identifier
1099 * Since we cannot rely on wc opcode in FLUSH errors
1100 * we must work around it by checking if the wr_id address
1101 * falls in the iser connection rx_descs buffer. If so
1102 * it is an RX descriptor, otherwize it is a TX.
1105 is_iser_tx_desc(struct iser_conn *iser_conn, void *wr_id)
1107 void *start = iser_conn->rx_descs;
1108 int len = iser_conn->num_rx_descs * sizeof(*iser_conn->rx_descs);
1110 if (wr_id >= start && wr_id < start + len)
1117 * iser_handle_comp_error() - Handle error completion
1118 * @ib_conn: connection RDMA resources
1119 * @wc: work completion
1121 * Notes: We may handle a FLUSH error completion and in this case
1122 * we only cleanup in case TX type was DATAOUT. For non-FLUSH
1123 * error completion we should also notify iscsi layer that
1124 * connection is failed (in case we passed bind stage).
1127 iser_handle_comp_error(struct ib_conn *ib_conn,
1130 void *wr_id = (void *)(uintptr_t)wc->wr_id;
1131 struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
1134 if (wc->status != IB_WC_WR_FLUSH_ERR)
1135 if (iser_conn->iscsi_conn)
1136 iscsi_conn_failure(iser_conn->iscsi_conn,
1137 ISCSI_ERR_CONN_FAILED);
1139 if (wc->wr_id == ISER_FASTREG_LI_WRID)
1142 if (is_iser_tx_desc(iser_conn, wr_id)) {
1143 struct iser_tx_desc *desc = wr_id;
1145 if (desc->type == ISCSI_TX_DATAOUT)
1146 kmem_cache_free(ig.desc_cache, desc);
1148 ib_conn->post_recv_buf_count--;
1153 * iser_handle_wc - handle a single work completion
1154 * @wc: work completion
1156 * Soft-IRQ context, work completion can be either
1157 * SEND or RECV, and can turn out successful or
1158 * with error (or flush error).
1160 static void iser_handle_wc(struct ib_wc *wc)
1162 struct ib_conn *ib_conn;
1163 struct iser_tx_desc *tx_desc;
1164 struct iser_rx_desc *rx_desc;
1166 ib_conn = wc->qp->qp_context;
1167 if (likely(wc->status == IB_WC_SUCCESS)) {
1168 if (wc->opcode == IB_WC_RECV) {
1169 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
1170 iser_rcv_completion(rx_desc, wc->byte_len,
1173 if (wc->opcode == IB_WC_SEND) {
1174 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
1175 iser_snd_completion(tx_desc, ib_conn);
1177 iser_err("Unknown wc opcode %d\n", wc->opcode);
1180 if (wc->status != IB_WC_WR_FLUSH_ERR)
1181 iser_err("%s (%d): wr id %llx vend_err %x\n",
1182 ib_wc_status_msg(wc->status), wc->status,
1183 wc->wr_id, wc->vendor_err);
1185 iser_dbg("%s (%d): wr id %llx\n",
1186 ib_wc_status_msg(wc->status), wc->status,
1189 if (wc->wr_id == ISER_BEACON_WRID)
1190 /* all flush errors were consumed */
1191 complete(&ib_conn->flush_comp);
1193 iser_handle_comp_error(ib_conn, wc);
1198 * iser_cq_tasklet_fn - iSER completion polling loop
1199 * @data: iSER completion context
1201 * Soft-IRQ context, polling connection CQ until
1202 * either CQ was empty or we exausted polling budget
1204 static void iser_cq_tasklet_fn(unsigned long data)
1206 struct iser_comp *comp = (struct iser_comp *)data;
1207 struct ib_cq *cq = comp->cq;
1208 struct ib_wc *const wcs = comp->wcs;
1209 int i, n, completed = 0;
1211 while ((n = ib_poll_cq(cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
1212 for (i = 0; i < n; i++)
1213 iser_handle_wc(&wcs[i]);
1216 if (completed >= iser_cq_poll_limit)
1221 * It is assumed here that arming CQ only once its empty
1222 * would not cause interrupts to be missed.
1224 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1226 iser_dbg("got %d completions\n", completed);
1229 static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
1231 struct iser_comp *comp = cq_context;
1233 tasklet_schedule(&comp->tasklet);
1236 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1237 enum iser_data_dir cmd_dir, sector_t *sector)
1239 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
1240 struct fast_reg_descriptor *desc = reg->mem_h;
1241 unsigned long sector_size = iser_task->sc->device->sector_size;
1242 struct ib_mr_status mr_status;
1245 if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) {
1246 desc->reg_indicators &= ~ISER_FASTREG_PROTECTED;
1247 ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
1248 IB_MR_CHECK_SIG_STATUS, &mr_status);
1250 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1254 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1255 sector_t sector_off = mr_status.sig_err.sig_err_offset;
1257 do_div(sector_off, sector_size + 8);
1258 *sector = scsi_get_lba(iser_task->sc) + sector_off;
1260 pr_err("PI error found type %d at sector %llx "
1261 "expected %x vs actual %x\n",
1262 mr_status.sig_err.err_type,
1263 (unsigned long long)*sector,
1264 mr_status.sig_err.expected,
1265 mr_status.sig_err.actual);
1267 switch (mr_status.sig_err.err_type) {
1268 case IB_SIG_BAD_GUARD:
1270 case IB_SIG_BAD_REFTAG:
1272 case IB_SIG_BAD_APPTAG:
1280 /* Not alot we can do here, return ambiguous guard error */