2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <rdma/ib_cache.h>
45 #include <linux/atomic.h>
47 #include <scsi/scsi.h>
48 #include <scsi/scsi_device.h>
49 #include <scsi/scsi_dbg.h>
50 #include <scsi/scsi_tcq.h>
52 #include <scsi/scsi_transport_srp.h>
56 #define DRV_NAME "ib_srp"
57 #define PFX DRV_NAME ": "
58 #define DRV_VERSION "1.0"
59 #define DRV_RELDATE "July 1, 2013"
61 MODULE_AUTHOR("Roland Dreier");
62 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
63 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_VERSION(DRV_VERSION);
65 MODULE_INFO(release_date, DRV_RELDATE);
67 static unsigned int srp_sg_tablesize;
68 static unsigned int cmd_sg_entries;
69 static unsigned int indirect_sg_entries;
70 static bool allow_ext_sg;
71 static bool prefer_fr;
72 static bool register_always;
73 static int topspin_workarounds = 1;
75 module_param(srp_sg_tablesize, uint, 0444);
76 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
78 module_param(cmd_sg_entries, uint, 0444);
79 MODULE_PARM_DESC(cmd_sg_entries,
80 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
82 module_param(indirect_sg_entries, uint, 0444);
83 MODULE_PARM_DESC(indirect_sg_entries,
84 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SCSI_MAX_SG_CHAIN_SEGMENTS) ")");
86 module_param(allow_ext_sg, bool, 0444);
87 MODULE_PARM_DESC(allow_ext_sg,
88 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
90 module_param(topspin_workarounds, int, 0444);
91 MODULE_PARM_DESC(topspin_workarounds,
92 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
94 module_param(prefer_fr, bool, 0444);
95 MODULE_PARM_DESC(prefer_fr,
96 "Whether to use fast registration if both FMR and fast registration are supported");
98 module_param(register_always, bool, 0444);
99 MODULE_PARM_DESC(register_always,
100 "Use memory registration even for contiguous memory regions");
102 static struct kernel_param_ops srp_tmo_ops;
104 static int srp_reconnect_delay = 10;
105 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
107 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
109 static int srp_fast_io_fail_tmo = 15;
110 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
112 MODULE_PARM_DESC(fast_io_fail_tmo,
113 "Number of seconds between the observation of a transport"
114 " layer error and failing all I/O. \"off\" means that this"
115 " functionality is disabled.");
117 static int srp_dev_loss_tmo = 600;
118 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
120 MODULE_PARM_DESC(dev_loss_tmo,
121 "Maximum number of seconds that the SRP transport should"
122 " insulate transport layer errors. After this time has been"
123 " exceeded the SCSI host is removed. Should be"
124 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
125 " if fast_io_fail_tmo has not been set. \"off\" means that"
126 " this functionality is disabled.");
128 static unsigned ch_count;
129 module_param(ch_count, uint, 0444);
130 MODULE_PARM_DESC(ch_count,
131 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
133 static void srp_add_one(struct ib_device *device);
134 static void srp_remove_one(struct ib_device *device);
135 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr);
136 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr);
137 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
139 static struct scsi_transport_template *ib_srp_transport_template;
140 static struct workqueue_struct *srp_remove_wq;
142 static struct ib_client srp_client = {
145 .remove = srp_remove_one
148 static struct ib_sa_client srp_sa_client;
150 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
152 int tmo = *(int *)kp->arg;
155 return sprintf(buffer, "%d", tmo);
157 return sprintf(buffer, "off");
160 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
164 if (strncmp(val, "off", 3) != 0) {
165 res = kstrtoint(val, 0, &tmo);
171 if (kp->arg == &srp_reconnect_delay)
172 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
174 else if (kp->arg == &srp_fast_io_fail_tmo)
175 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
177 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
181 *(int *)kp->arg = tmo;
187 static struct kernel_param_ops srp_tmo_ops = {
192 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
194 return (struct srp_target_port *) host->hostdata;
197 static const char *srp_target_info(struct Scsi_Host *host)
199 return host_to_target(host)->target_name;
202 static int srp_target_is_topspin(struct srp_target_port *target)
204 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
205 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
207 return topspin_workarounds &&
208 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
209 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
212 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
214 enum dma_data_direction direction)
218 iu = kmalloc(sizeof *iu, gfp_mask);
222 iu->buf = kzalloc(size, gfp_mask);
226 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
228 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
232 iu->direction = direction;
244 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
249 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
255 static void srp_qp_event(struct ib_event *event, void *context)
257 pr_debug("QP event %s (%d)\n",
258 ib_event_msg(event->event), event->event);
261 static int srp_init_qp(struct srp_target_port *target,
264 struct ib_qp_attr *attr;
267 attr = kmalloc(sizeof *attr, GFP_KERNEL);
271 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
272 target->srp_host->port,
273 be16_to_cpu(target->pkey),
278 attr->qp_state = IB_QPS_INIT;
279 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
280 IB_ACCESS_REMOTE_WRITE);
281 attr->port_num = target->srp_host->port;
283 ret = ib_modify_qp(qp, attr,
294 static int srp_new_cm_id(struct srp_rdma_ch *ch)
296 struct srp_target_port *target = ch->target;
297 struct ib_cm_id *new_cm_id;
299 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
301 if (IS_ERR(new_cm_id))
302 return PTR_ERR(new_cm_id);
305 ib_destroy_cm_id(ch->cm_id);
306 ch->cm_id = new_cm_id;
307 ch->path.sgid = target->sgid;
308 ch->path.dgid = target->orig_dgid;
309 ch->path.pkey = target->pkey;
310 ch->path.service_id = target->service_id;
315 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
317 struct srp_device *dev = target->srp_host->srp_dev;
318 struct ib_fmr_pool_param fmr_param;
320 memset(&fmr_param, 0, sizeof(fmr_param));
321 fmr_param.pool_size = target->scsi_host->can_queue;
322 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
324 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
325 fmr_param.page_shift = ilog2(dev->mr_page_size);
326 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
327 IB_ACCESS_REMOTE_WRITE |
328 IB_ACCESS_REMOTE_READ);
330 return ib_create_fmr_pool(dev->pd, &fmr_param);
334 * srp_destroy_fr_pool() - free the resources owned by a pool
335 * @pool: Fast registration pool to be destroyed.
337 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
340 struct srp_fr_desc *d;
345 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
347 ib_free_fast_reg_page_list(d->frpl);
355 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
356 * @device: IB device to allocate fast registration descriptors for.
357 * @pd: Protection domain associated with the FR descriptors.
358 * @pool_size: Number of descriptors to allocate.
359 * @max_page_list_len: Maximum fast registration work request page list length.
361 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
362 struct ib_pd *pd, int pool_size,
363 int max_page_list_len)
365 struct srp_fr_pool *pool;
366 struct srp_fr_desc *d;
368 struct ib_fast_reg_page_list *frpl;
369 int i, ret = -EINVAL;
374 pool = kzalloc(sizeof(struct srp_fr_pool) +
375 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
378 pool->size = pool_size;
379 pool->max_page_list_len = max_page_list_len;
380 spin_lock_init(&pool->lock);
381 INIT_LIST_HEAD(&pool->free_list);
383 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
384 mr = ib_alloc_fast_reg_mr(pd, max_page_list_len);
390 frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
396 list_add_tail(&d->entry, &pool->free_list);
403 srp_destroy_fr_pool(pool);
411 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
412 * @pool: Pool to obtain descriptor from.
414 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
416 struct srp_fr_desc *d = NULL;
419 spin_lock_irqsave(&pool->lock, flags);
420 if (!list_empty(&pool->free_list)) {
421 d = list_first_entry(&pool->free_list, typeof(*d), entry);
424 spin_unlock_irqrestore(&pool->lock, flags);
430 * srp_fr_pool_put() - put an FR descriptor back in the free list
431 * @pool: Pool the descriptor was allocated from.
432 * @desc: Pointer to an array of fast registration descriptor pointers.
433 * @n: Number of descriptors to put back.
435 * Note: The caller must already have queued an invalidation request for
436 * desc->mr->rkey before calling this function.
438 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
444 spin_lock_irqsave(&pool->lock, flags);
445 for (i = 0; i < n; i++)
446 list_add(&desc[i]->entry, &pool->free_list);
447 spin_unlock_irqrestore(&pool->lock, flags);
450 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
452 struct srp_device *dev = target->srp_host->srp_dev;
454 return srp_create_fr_pool(dev->dev, dev->pd,
455 target->scsi_host->can_queue,
456 dev->max_pages_per_mr);
460 * srp_destroy_qp() - destroy an RDMA queue pair
461 * @ch: SRP RDMA channel.
463 * Change a queue pair into the error state and wait until all receive
464 * completions have been processed before destroying it. This avoids that
465 * the receive completion handler can access the queue pair while it is
468 static void srp_destroy_qp(struct srp_rdma_ch *ch)
470 static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
471 static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
472 struct ib_recv_wr *bad_wr;
475 /* Destroying a QP and reusing ch->done is only safe if not connected */
476 WARN_ON_ONCE(ch->connected);
478 ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
479 WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
483 init_completion(&ch->done);
484 ret = ib_post_recv(ch->qp, &wr, &bad_wr);
485 WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
487 wait_for_completion(&ch->done);
490 ib_destroy_qp(ch->qp);
493 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
495 struct srp_target_port *target = ch->target;
496 struct srp_device *dev = target->srp_host->srp_dev;
497 struct ib_qp_init_attr *init_attr;
498 struct ib_cq *recv_cq, *send_cq;
500 struct ib_fmr_pool *fmr_pool = NULL;
501 struct srp_fr_pool *fr_pool = NULL;
502 const int m = 1 + dev->use_fast_reg;
503 struct ib_cq_init_attr cq_attr = {};
506 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
510 /* + 1 for SRP_LAST_WR_ID */
511 cq_attr.cqe = target->queue_size + 1;
512 cq_attr.comp_vector = ch->comp_vector;
513 recv_cq = ib_create_cq(dev->dev, srp_recv_completion, NULL, ch,
515 if (IS_ERR(recv_cq)) {
516 ret = PTR_ERR(recv_cq);
520 cq_attr.cqe = m * target->queue_size;
521 cq_attr.comp_vector = ch->comp_vector;
522 send_cq = ib_create_cq(dev->dev, srp_send_completion, NULL, ch,
524 if (IS_ERR(send_cq)) {
525 ret = PTR_ERR(send_cq);
529 ib_req_notify_cq(recv_cq, IB_CQ_NEXT_COMP);
531 init_attr->event_handler = srp_qp_event;
532 init_attr->cap.max_send_wr = m * target->queue_size;
533 init_attr->cap.max_recv_wr = target->queue_size + 1;
534 init_attr->cap.max_recv_sge = 1;
535 init_attr->cap.max_send_sge = 1;
536 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
537 init_attr->qp_type = IB_QPT_RC;
538 init_attr->send_cq = send_cq;
539 init_attr->recv_cq = recv_cq;
541 qp = ib_create_qp(dev->pd, init_attr);
547 ret = srp_init_qp(target, qp);
551 if (dev->use_fast_reg && dev->has_fr) {
552 fr_pool = srp_alloc_fr_pool(target);
553 if (IS_ERR(fr_pool)) {
554 ret = PTR_ERR(fr_pool);
555 shost_printk(KERN_WARNING, target->scsi_host, PFX
556 "FR pool allocation failed (%d)\n", ret);
560 srp_destroy_fr_pool(ch->fr_pool);
561 ch->fr_pool = fr_pool;
562 } else if (!dev->use_fast_reg && dev->has_fmr) {
563 fmr_pool = srp_alloc_fmr_pool(target);
564 if (IS_ERR(fmr_pool)) {
565 ret = PTR_ERR(fmr_pool);
566 shost_printk(KERN_WARNING, target->scsi_host, PFX
567 "FMR pool allocation failed (%d)\n", ret);
571 ib_destroy_fmr_pool(ch->fmr_pool);
572 ch->fmr_pool = fmr_pool;
578 ib_destroy_cq(ch->recv_cq);
580 ib_destroy_cq(ch->send_cq);
583 ch->recv_cq = recv_cq;
584 ch->send_cq = send_cq;
593 ib_destroy_cq(send_cq);
596 ib_destroy_cq(recv_cq);
604 * Note: this function may be called without srp_alloc_iu_bufs() having been
605 * invoked. Hence the ch->[rt]x_ring checks.
607 static void srp_free_ch_ib(struct srp_target_port *target,
608 struct srp_rdma_ch *ch)
610 struct srp_device *dev = target->srp_host->srp_dev;
617 ib_destroy_cm_id(ch->cm_id);
621 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
625 if (dev->use_fast_reg) {
627 srp_destroy_fr_pool(ch->fr_pool);
630 ib_destroy_fmr_pool(ch->fmr_pool);
633 ib_destroy_cq(ch->send_cq);
634 ib_destroy_cq(ch->recv_cq);
637 * Avoid that the SCSI error handler tries to use this channel after
638 * it has been freed. The SCSI error handler can namely continue
639 * trying to perform recovery actions after scsi_remove_host()
645 ch->send_cq = ch->recv_cq = NULL;
648 for (i = 0; i < target->queue_size; ++i)
649 srp_free_iu(target->srp_host, ch->rx_ring[i]);
654 for (i = 0; i < target->queue_size; ++i)
655 srp_free_iu(target->srp_host, ch->tx_ring[i]);
661 static void srp_path_rec_completion(int status,
662 struct ib_sa_path_rec *pathrec,
665 struct srp_rdma_ch *ch = ch_ptr;
666 struct srp_target_port *target = ch->target;
670 shost_printk(KERN_ERR, target->scsi_host,
671 PFX "Got failed path rec status %d\n", status);
677 static int srp_lookup_path(struct srp_rdma_ch *ch)
679 struct srp_target_port *target = ch->target;
682 ch->path.numb_path = 1;
684 init_completion(&ch->done);
686 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
687 target->srp_host->srp_dev->dev,
688 target->srp_host->port,
690 IB_SA_PATH_REC_SERVICE_ID |
691 IB_SA_PATH_REC_DGID |
692 IB_SA_PATH_REC_SGID |
693 IB_SA_PATH_REC_NUMB_PATH |
695 SRP_PATH_REC_TIMEOUT_MS,
697 srp_path_rec_completion,
698 ch, &ch->path_query);
699 if (ch->path_query_id < 0)
700 return ch->path_query_id;
702 ret = wait_for_completion_interruptible(&ch->done);
707 shost_printk(KERN_WARNING, target->scsi_host,
708 PFX "Path record query failed\n");
713 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
715 struct srp_target_port *target = ch->target;
717 struct ib_cm_req_param param;
718 struct srp_login_req priv;
722 req = kzalloc(sizeof *req, GFP_KERNEL);
726 req->param.primary_path = &ch->path;
727 req->param.alternate_path = NULL;
728 req->param.service_id = target->service_id;
729 req->param.qp_num = ch->qp->qp_num;
730 req->param.qp_type = ch->qp->qp_type;
731 req->param.private_data = &req->priv;
732 req->param.private_data_len = sizeof req->priv;
733 req->param.flow_control = 1;
735 get_random_bytes(&req->param.starting_psn, 4);
736 req->param.starting_psn &= 0xffffff;
739 * Pick some arbitrary defaults here; we could make these
740 * module parameters if anyone cared about setting them.
742 req->param.responder_resources = 4;
743 req->param.remote_cm_response_timeout = 20;
744 req->param.local_cm_response_timeout = 20;
745 req->param.retry_count = target->tl_retry_count;
746 req->param.rnr_retry_count = 7;
747 req->param.max_cm_retries = 15;
749 req->priv.opcode = SRP_LOGIN_REQ;
751 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
752 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
753 SRP_BUF_FORMAT_INDIRECT);
754 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
755 SRP_MULTICHAN_SINGLE);
757 * In the published SRP specification (draft rev. 16a), the
758 * port identifier format is 8 bytes of ID extension followed
759 * by 8 bytes of GUID. Older drafts put the two halves in the
760 * opposite order, so that the GUID comes first.
762 * Targets conforming to these obsolete drafts can be
763 * recognized by the I/O Class they report.
765 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
766 memcpy(req->priv.initiator_port_id,
767 &target->sgid.global.interface_id, 8);
768 memcpy(req->priv.initiator_port_id + 8,
769 &target->initiator_ext, 8);
770 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
771 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
773 memcpy(req->priv.initiator_port_id,
774 &target->initiator_ext, 8);
775 memcpy(req->priv.initiator_port_id + 8,
776 &target->sgid.global.interface_id, 8);
777 memcpy(req->priv.target_port_id, &target->id_ext, 8);
778 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
782 * Topspin/Cisco SRP targets will reject our login unless we
783 * zero out the first 8 bytes of our initiator port ID and set
784 * the second 8 bytes to the local node GUID.
786 if (srp_target_is_topspin(target)) {
787 shost_printk(KERN_DEBUG, target->scsi_host,
788 PFX "Topspin/Cisco initiator port ID workaround "
789 "activated for target GUID %016llx\n",
790 be64_to_cpu(target->ioc_guid));
791 memset(req->priv.initiator_port_id, 0, 8);
792 memcpy(req->priv.initiator_port_id + 8,
793 &target->srp_host->srp_dev->dev->node_guid, 8);
796 status = ib_send_cm_req(ch->cm_id, &req->param);
803 static bool srp_queue_remove_work(struct srp_target_port *target)
805 bool changed = false;
807 spin_lock_irq(&target->lock);
808 if (target->state != SRP_TARGET_REMOVED) {
809 target->state = SRP_TARGET_REMOVED;
812 spin_unlock_irq(&target->lock);
815 queue_work(srp_remove_wq, &target->remove_work);
820 static void srp_disconnect_target(struct srp_target_port *target)
822 struct srp_rdma_ch *ch;
825 /* XXX should send SRP_I_LOGOUT request */
827 for (i = 0; i < target->ch_count; i++) {
829 ch->connected = false;
830 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
831 shost_printk(KERN_DEBUG, target->scsi_host,
832 PFX "Sending CM DREQ failed\n");
837 static void srp_free_req_data(struct srp_target_port *target,
838 struct srp_rdma_ch *ch)
840 struct srp_device *dev = target->srp_host->srp_dev;
841 struct ib_device *ibdev = dev->dev;
842 struct srp_request *req;
848 for (i = 0; i < target->req_ring_size; ++i) {
849 req = &ch->req_ring[i];
850 if (dev->use_fast_reg)
853 kfree(req->fmr_list);
854 kfree(req->map_page);
855 if (req->indirect_dma_addr) {
856 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
857 target->indirect_size,
860 kfree(req->indirect_desc);
867 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
869 struct srp_target_port *target = ch->target;
870 struct srp_device *srp_dev = target->srp_host->srp_dev;
871 struct ib_device *ibdev = srp_dev->dev;
872 struct srp_request *req;
875 int i, ret = -ENOMEM;
877 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
882 for (i = 0; i < target->req_ring_size; ++i) {
883 req = &ch->req_ring[i];
884 mr_list = kmalloc(target->cmd_sg_cnt * sizeof(void *),
888 if (srp_dev->use_fast_reg)
889 req->fr_list = mr_list;
891 req->fmr_list = mr_list;
892 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
893 sizeof(void *), GFP_KERNEL);
896 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
897 if (!req->indirect_desc)
900 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
901 target->indirect_size,
903 if (ib_dma_mapping_error(ibdev, dma_addr))
906 req->indirect_dma_addr = dma_addr;
915 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
916 * @shost: SCSI host whose attributes to remove from sysfs.
918 * Note: Any attributes defined in the host template and that did not exist
919 * before invocation of this function will be ignored.
921 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
923 struct device_attribute **attr;
925 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
926 device_remove_file(&shost->shost_dev, *attr);
929 static void srp_remove_target(struct srp_target_port *target)
931 struct srp_rdma_ch *ch;
934 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
936 srp_del_scsi_host_attr(target->scsi_host);
937 srp_rport_get(target->rport);
938 srp_remove_host(target->scsi_host);
939 scsi_remove_host(target->scsi_host);
940 srp_stop_rport_timers(target->rport);
941 srp_disconnect_target(target);
942 for (i = 0; i < target->ch_count; i++) {
944 srp_free_ch_ib(target, ch);
946 cancel_work_sync(&target->tl_err_work);
947 srp_rport_put(target->rport);
948 for (i = 0; i < target->ch_count; i++) {
950 srp_free_req_data(target, ch);
955 spin_lock(&target->srp_host->target_lock);
956 list_del(&target->list);
957 spin_unlock(&target->srp_host->target_lock);
959 scsi_host_put(target->scsi_host);
962 static void srp_remove_work(struct work_struct *work)
964 struct srp_target_port *target =
965 container_of(work, struct srp_target_port, remove_work);
967 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
969 srp_remove_target(target);
972 static void srp_rport_delete(struct srp_rport *rport)
974 struct srp_target_port *target = rport->lld_data;
976 srp_queue_remove_work(target);
980 * srp_connected_ch() - number of connected channels
981 * @target: SRP target port.
983 static int srp_connected_ch(struct srp_target_port *target)
987 for (i = 0; i < target->ch_count; i++)
988 c += target->ch[i].connected;
993 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
995 struct srp_target_port *target = ch->target;
998 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
1000 ret = srp_lookup_path(ch);
1005 init_completion(&ch->done);
1006 ret = srp_send_req(ch, multich);
1009 ret = wait_for_completion_interruptible(&ch->done);
1014 * The CM event handling code will set status to
1015 * SRP_PORT_REDIRECT if we get a port redirect REJ
1016 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1017 * redirect REJ back.
1019 switch (ch->status) {
1021 ch->connected = true;
1024 case SRP_PORT_REDIRECT:
1025 ret = srp_lookup_path(ch);
1030 case SRP_DLID_REDIRECT:
1033 case SRP_STALE_CONN:
1034 shost_printk(KERN_ERR, target->scsi_host, PFX
1035 "giving up on stale connection\n");
1036 ch->status = -ECONNRESET;
1045 static int srp_inv_rkey(struct srp_rdma_ch *ch, u32 rkey)
1047 struct ib_send_wr *bad_wr;
1048 struct ib_send_wr wr = {
1049 .opcode = IB_WR_LOCAL_INV,
1050 .wr_id = LOCAL_INV_WR_ID_MASK,
1054 .ex.invalidate_rkey = rkey,
1057 return ib_post_send(ch->qp, &wr, &bad_wr);
1060 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1061 struct srp_rdma_ch *ch,
1062 struct srp_request *req)
1064 struct srp_target_port *target = ch->target;
1065 struct srp_device *dev = target->srp_host->srp_dev;
1066 struct ib_device *ibdev = dev->dev;
1069 if (!scsi_sglist(scmnd) ||
1070 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1071 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1074 if (dev->use_fast_reg) {
1075 struct srp_fr_desc **pfr;
1077 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1078 res = srp_inv_rkey(ch, (*pfr)->mr->rkey);
1080 shost_printk(KERN_ERR, target->scsi_host, PFX
1081 "Queueing INV WR for rkey %#x failed (%d)\n",
1082 (*pfr)->mr->rkey, res);
1083 queue_work(system_long_wq,
1084 &target->tl_err_work);
1088 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1091 struct ib_pool_fmr **pfmr;
1093 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1094 ib_fmr_pool_unmap(*pfmr);
1097 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1098 scmnd->sc_data_direction);
1102 * srp_claim_req - Take ownership of the scmnd associated with a request.
1103 * @ch: SRP RDMA channel.
1104 * @req: SRP request.
1105 * @sdev: If not NULL, only take ownership for this SCSI device.
1106 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1107 * ownership of @req->scmnd if it equals @scmnd.
1110 * Either NULL or a pointer to the SCSI command the caller became owner of.
1112 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1113 struct srp_request *req,
1114 struct scsi_device *sdev,
1115 struct scsi_cmnd *scmnd)
1117 unsigned long flags;
1119 spin_lock_irqsave(&ch->lock, flags);
1121 (!sdev || req->scmnd->device == sdev) &&
1122 (!scmnd || req->scmnd == scmnd)) {
1128 spin_unlock_irqrestore(&ch->lock, flags);
1134 * srp_free_req() - Unmap data and add request to the free request list.
1135 * @ch: SRP RDMA channel.
1136 * @req: Request to be freed.
1137 * @scmnd: SCSI command associated with @req.
1138 * @req_lim_delta: Amount to be added to @target->req_lim.
1140 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1141 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1143 unsigned long flags;
1145 srp_unmap_data(scmnd, ch, req);
1147 spin_lock_irqsave(&ch->lock, flags);
1148 ch->req_lim += req_lim_delta;
1149 spin_unlock_irqrestore(&ch->lock, flags);
1152 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1153 struct scsi_device *sdev, int result)
1155 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1158 srp_free_req(ch, req, scmnd, 0);
1159 scmnd->result = result;
1160 scmnd->scsi_done(scmnd);
1164 static void srp_terminate_io(struct srp_rport *rport)
1166 struct srp_target_port *target = rport->lld_data;
1167 struct srp_rdma_ch *ch;
1168 struct Scsi_Host *shost = target->scsi_host;
1169 struct scsi_device *sdev;
1173 * Invoking srp_terminate_io() while srp_queuecommand() is running
1174 * is not safe. Hence the warning statement below.
1176 shost_for_each_device(sdev, shost)
1177 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1179 for (i = 0; i < target->ch_count; i++) {
1180 ch = &target->ch[i];
1182 for (j = 0; j < target->req_ring_size; ++j) {
1183 struct srp_request *req = &ch->req_ring[j];
1185 srp_finish_req(ch, req, NULL,
1186 DID_TRANSPORT_FAILFAST << 16);
1192 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1193 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1194 * srp_reset_device() or srp_reset_host() calls will occur while this function
1195 * is in progress. One way to realize that is not to call this function
1196 * directly but to call srp_reconnect_rport() instead since that last function
1197 * serializes calls of this function via rport->mutex and also blocks
1198 * srp_queuecommand() calls before invoking this function.
1200 static int srp_rport_reconnect(struct srp_rport *rport)
1202 struct srp_target_port *target = rport->lld_data;
1203 struct srp_rdma_ch *ch;
1205 bool multich = false;
1207 srp_disconnect_target(target);
1209 if (target->state == SRP_TARGET_SCANNING)
1213 * Now get a new local CM ID so that we avoid confusing the target in
1214 * case things are really fouled up. Doing so also ensures that all CM
1215 * callbacks will have finished before a new QP is allocated.
1217 for (i = 0; i < target->ch_count; i++) {
1218 ch = &target->ch[i];
1219 ret += srp_new_cm_id(ch);
1221 for (i = 0; i < target->ch_count; i++) {
1222 ch = &target->ch[i];
1223 for (j = 0; j < target->req_ring_size; ++j) {
1224 struct srp_request *req = &ch->req_ring[j];
1226 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1229 for (i = 0; i < target->ch_count; i++) {
1230 ch = &target->ch[i];
1232 * Whether or not creating a new CM ID succeeded, create a new
1233 * QP. This guarantees that all completion callback function
1234 * invocations have finished before request resetting starts.
1236 ret += srp_create_ch_ib(ch);
1238 INIT_LIST_HEAD(&ch->free_tx);
1239 for (j = 0; j < target->queue_size; ++j)
1240 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1243 target->qp_in_error = false;
1245 for (i = 0; i < target->ch_count; i++) {
1246 ch = &target->ch[i];
1249 ret = srp_connect_ch(ch, multich);
1254 shost_printk(KERN_INFO, target->scsi_host,
1255 PFX "reconnect succeeded\n");
1260 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1261 unsigned int dma_len, u32 rkey)
1263 struct srp_direct_buf *desc = state->desc;
1265 desc->va = cpu_to_be64(dma_addr);
1266 desc->key = cpu_to_be32(rkey);
1267 desc->len = cpu_to_be32(dma_len);
1269 state->total_len += dma_len;
1274 static int srp_map_finish_fmr(struct srp_map_state *state,
1275 struct srp_rdma_ch *ch)
1277 struct ib_pool_fmr *fmr;
1280 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1281 state->npages, io_addr);
1283 return PTR_ERR(fmr);
1285 *state->next_fmr++ = fmr;
1288 srp_map_desc(state, 0, state->dma_len, fmr->fmr->rkey);
1293 static int srp_map_finish_fr(struct srp_map_state *state,
1294 struct srp_rdma_ch *ch)
1296 struct srp_target_port *target = ch->target;
1297 struct srp_device *dev = target->srp_host->srp_dev;
1298 struct ib_send_wr *bad_wr;
1299 struct ib_send_wr wr;
1300 struct srp_fr_desc *desc;
1303 desc = srp_fr_pool_get(ch->fr_pool);
1307 rkey = ib_inc_rkey(desc->mr->rkey);
1308 ib_update_fast_reg_key(desc->mr, rkey);
1310 memcpy(desc->frpl->page_list, state->pages,
1311 sizeof(state->pages[0]) * state->npages);
1313 memset(&wr, 0, sizeof(wr));
1314 wr.opcode = IB_WR_FAST_REG_MR;
1315 wr.wr_id = FAST_REG_WR_ID_MASK;
1316 wr.wr.fast_reg.iova_start = state->base_dma_addr;
1317 wr.wr.fast_reg.page_list = desc->frpl;
1318 wr.wr.fast_reg.page_list_len = state->npages;
1319 wr.wr.fast_reg.page_shift = ilog2(dev->mr_page_size);
1320 wr.wr.fast_reg.length = state->dma_len;
1321 wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
1322 IB_ACCESS_REMOTE_READ |
1323 IB_ACCESS_REMOTE_WRITE);
1324 wr.wr.fast_reg.rkey = desc->mr->lkey;
1326 *state->next_fr++ = desc;
1329 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1332 return ib_post_send(ch->qp, &wr, &bad_wr);
1335 static int srp_finish_mapping(struct srp_map_state *state,
1336 struct srp_rdma_ch *ch)
1338 struct srp_target_port *target = ch->target;
1341 if (state->npages == 0)
1344 if (state->npages == 1 && !register_always)
1345 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1348 ret = target->srp_host->srp_dev->use_fast_reg ?
1349 srp_map_finish_fr(state, ch) :
1350 srp_map_finish_fmr(state, ch);
1360 static void srp_map_update_start(struct srp_map_state *state,
1361 struct scatterlist *sg, int sg_index,
1362 dma_addr_t dma_addr)
1364 state->unmapped_sg = sg;
1365 state->unmapped_index = sg_index;
1366 state->unmapped_addr = dma_addr;
1369 static int srp_map_sg_entry(struct srp_map_state *state,
1370 struct srp_rdma_ch *ch,
1371 struct scatterlist *sg, int sg_index,
1374 struct srp_target_port *target = ch->target;
1375 struct srp_device *dev = target->srp_host->srp_dev;
1376 struct ib_device *ibdev = dev->dev;
1377 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1378 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1387 * Once we're in direct map mode for a request, we don't
1388 * go back to FMR or FR mode, so no need to update anything
1389 * other than the descriptor.
1391 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1396 * Since not all RDMA HW drivers support non-zero page offsets for
1397 * FMR, if we start at an offset into a page, don't merge into the
1398 * current FMR mapping. Finish it out, and use the kernel's MR for
1401 if ((!dev->use_fast_reg && dma_addr & ~dev->mr_page_mask) ||
1402 dma_len > dev->mr_max_size) {
1403 ret = srp_finish_mapping(state, ch);
1407 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1408 srp_map_update_start(state, NULL, 0, 0);
1413 * If this is the first sg that will be mapped via FMR or via FR, save
1414 * our position. We need to know the first unmapped entry, its index,
1415 * and the first unmapped address within that entry to be able to
1416 * restart mapping after an error.
1418 if (!state->unmapped_sg)
1419 srp_map_update_start(state, sg, sg_index, dma_addr);
1422 unsigned offset = dma_addr & ~dev->mr_page_mask;
1423 if (state->npages == dev->max_pages_per_mr || offset != 0) {
1424 ret = srp_finish_mapping(state, ch);
1428 srp_map_update_start(state, sg, sg_index, dma_addr);
1431 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1434 state->base_dma_addr = dma_addr;
1435 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1436 state->dma_len += len;
1442 * If the last entry of the MR wasn't a full page, then we need to
1443 * close it out and start a new one -- we can only merge at page
1447 if (len != dev->mr_page_size) {
1448 ret = srp_finish_mapping(state, ch);
1450 srp_map_update_start(state, NULL, 0, 0);
1455 static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
1456 struct srp_request *req, struct scatterlist *scat,
1459 struct srp_target_port *target = ch->target;
1460 struct srp_device *dev = target->srp_host->srp_dev;
1461 struct ib_device *ibdev = dev->dev;
1462 struct scatterlist *sg;
1466 state->desc = req->indirect_desc;
1467 state->pages = req->map_page;
1468 if (dev->use_fast_reg) {
1469 state->next_fr = req->fr_list;
1470 use_mr = !!ch->fr_pool;
1472 state->next_fmr = req->fmr_list;
1473 use_mr = !!ch->fmr_pool;
1476 for_each_sg(scat, sg, count, i) {
1477 if (srp_map_sg_entry(state, ch, sg, i, use_mr)) {
1479 * Memory registration failed, so backtrack to the
1480 * first unmapped entry and continue on without using
1481 * memory registration.
1483 dma_addr_t dma_addr;
1484 unsigned int dma_len;
1487 sg = state->unmapped_sg;
1488 i = state->unmapped_index;
1490 dma_addr = ib_sg_dma_address(ibdev, sg);
1491 dma_len = ib_sg_dma_len(ibdev, sg);
1492 dma_len -= (state->unmapped_addr - dma_addr);
1493 dma_addr = state->unmapped_addr;
1495 srp_map_desc(state, dma_addr, dma_len, target->rkey);
1499 if (use_mr && srp_finish_mapping(state, ch))
1502 req->nmdesc = state->nmdesc;
1507 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1508 struct srp_request *req)
1510 struct srp_target_port *target = ch->target;
1511 struct scatterlist *scat;
1512 struct srp_cmd *cmd = req->cmd->buf;
1513 int len, nents, count;
1514 struct srp_device *dev;
1515 struct ib_device *ibdev;
1516 struct srp_map_state state;
1517 struct srp_indirect_buf *indirect_hdr;
1521 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1522 return sizeof (struct srp_cmd);
1524 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1525 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1526 shost_printk(KERN_WARNING, target->scsi_host,
1527 PFX "Unhandled data direction %d\n",
1528 scmnd->sc_data_direction);
1532 nents = scsi_sg_count(scmnd);
1533 scat = scsi_sglist(scmnd);
1535 dev = target->srp_host->srp_dev;
1538 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1539 if (unlikely(count == 0))
1542 fmt = SRP_DATA_DESC_DIRECT;
1543 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1545 if (count == 1 && !register_always) {
1547 * The midlayer only generated a single gather/scatter
1548 * entry, or DMA mapping coalesced everything to a
1549 * single entry. So a direct descriptor along with
1550 * the DMA MR suffices.
1552 struct srp_direct_buf *buf = (void *) cmd->add_data;
1554 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1555 buf->key = cpu_to_be32(target->rkey);
1556 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1563 * We have more than one scatter/gather entry, so build our indirect
1564 * descriptor table, trying to merge as many entries as we can.
1566 indirect_hdr = (void *) cmd->add_data;
1568 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1569 target->indirect_size, DMA_TO_DEVICE);
1571 memset(&state, 0, sizeof(state));
1572 srp_map_sg(&state, ch, req, scat, count);
1574 /* We've mapped the request, now pull as much of the indirect
1575 * descriptor table as we can into the command buffer. If this
1576 * target is not using an external indirect table, we are
1577 * guaranteed to fit into the command, as the SCSI layer won't
1578 * give us more S/G entries than we allow.
1580 if (state.ndesc == 1) {
1582 * Memory registration collapsed the sg-list into one entry,
1583 * so use a direct descriptor.
1585 struct srp_direct_buf *buf = (void *) cmd->add_data;
1587 *buf = req->indirect_desc[0];
1591 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1592 !target->allow_ext_sg)) {
1593 shost_printk(KERN_ERR, target->scsi_host,
1594 "Could not fit S/G list into SRP_CMD\n");
1598 count = min(state.ndesc, target->cmd_sg_cnt);
1599 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1601 fmt = SRP_DATA_DESC_INDIRECT;
1602 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1603 len += count * sizeof (struct srp_direct_buf);
1605 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1606 count * sizeof (struct srp_direct_buf));
1608 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1609 indirect_hdr->table_desc.key = cpu_to_be32(target->rkey);
1610 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1611 indirect_hdr->len = cpu_to_be32(state.total_len);
1613 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1614 cmd->data_out_desc_cnt = count;
1616 cmd->data_in_desc_cnt = count;
1618 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1622 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1623 cmd->buf_fmt = fmt << 4;
1631 * Return an IU and possible credit to the free pool
1633 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1634 enum srp_iu_type iu_type)
1636 unsigned long flags;
1638 spin_lock_irqsave(&ch->lock, flags);
1639 list_add(&iu->list, &ch->free_tx);
1640 if (iu_type != SRP_IU_RSP)
1642 spin_unlock_irqrestore(&ch->lock, flags);
1646 * Must be called with ch->lock held to protect req_lim and free_tx.
1647 * If IU is not sent, it must be returned using srp_put_tx_iu().
1650 * An upper limit for the number of allocated information units for each
1652 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1653 * more than Scsi_Host.can_queue requests.
1654 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1655 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1656 * one unanswered SRP request to an initiator.
1658 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1659 enum srp_iu_type iu_type)
1661 struct srp_target_port *target = ch->target;
1662 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1665 srp_send_completion(ch->send_cq, ch);
1667 if (list_empty(&ch->free_tx))
1670 /* Initiator responses to target requests do not consume credits */
1671 if (iu_type != SRP_IU_RSP) {
1672 if (ch->req_lim <= rsv) {
1673 ++target->zero_req_lim;
1680 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1681 list_del(&iu->list);
1685 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1687 struct srp_target_port *target = ch->target;
1689 struct ib_send_wr wr, *bad_wr;
1691 list.addr = iu->dma;
1693 list.lkey = target->lkey;
1696 wr.wr_id = (uintptr_t) iu;
1699 wr.opcode = IB_WR_SEND;
1700 wr.send_flags = IB_SEND_SIGNALED;
1702 return ib_post_send(ch->qp, &wr, &bad_wr);
1705 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1707 struct srp_target_port *target = ch->target;
1708 struct ib_recv_wr wr, *bad_wr;
1711 list.addr = iu->dma;
1712 list.length = iu->size;
1713 list.lkey = target->lkey;
1716 wr.wr_id = (uintptr_t) iu;
1720 return ib_post_recv(ch->qp, &wr, &bad_wr);
1723 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1725 struct srp_target_port *target = ch->target;
1726 struct srp_request *req;
1727 struct scsi_cmnd *scmnd;
1728 unsigned long flags;
1730 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1731 spin_lock_irqsave(&ch->lock, flags);
1732 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1733 spin_unlock_irqrestore(&ch->lock, flags);
1735 ch->tsk_mgmt_status = -1;
1736 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1737 ch->tsk_mgmt_status = rsp->data[3];
1738 complete(&ch->tsk_mgmt_done);
1740 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1742 req = (void *)scmnd->host_scribble;
1743 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1746 shost_printk(KERN_ERR, target->scsi_host,
1747 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1748 rsp->tag, ch - target->ch, ch->qp->qp_num);
1750 spin_lock_irqsave(&ch->lock, flags);
1751 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1752 spin_unlock_irqrestore(&ch->lock, flags);
1756 scmnd->result = rsp->status;
1758 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1759 memcpy(scmnd->sense_buffer, rsp->data +
1760 be32_to_cpu(rsp->resp_data_len),
1761 min_t(int, be32_to_cpu(rsp->sense_data_len),
1762 SCSI_SENSE_BUFFERSIZE));
1765 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1766 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1767 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1768 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1769 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1770 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1771 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1772 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1774 srp_free_req(ch, req, scmnd,
1775 be32_to_cpu(rsp->req_lim_delta));
1777 scmnd->host_scribble = NULL;
1778 scmnd->scsi_done(scmnd);
1782 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1785 struct srp_target_port *target = ch->target;
1786 struct ib_device *dev = target->srp_host->srp_dev->dev;
1787 unsigned long flags;
1791 spin_lock_irqsave(&ch->lock, flags);
1792 ch->req_lim += req_delta;
1793 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1794 spin_unlock_irqrestore(&ch->lock, flags);
1797 shost_printk(KERN_ERR, target->scsi_host, PFX
1798 "no IU available to send response\n");
1802 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1803 memcpy(iu->buf, rsp, len);
1804 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1806 err = srp_post_send(ch, iu, len);
1808 shost_printk(KERN_ERR, target->scsi_host, PFX
1809 "unable to post response: %d\n", err);
1810 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1816 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1817 struct srp_cred_req *req)
1819 struct srp_cred_rsp rsp = {
1820 .opcode = SRP_CRED_RSP,
1823 s32 delta = be32_to_cpu(req->req_lim_delta);
1825 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1826 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1827 "problems processing SRP_CRED_REQ\n");
1830 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1831 struct srp_aer_req *req)
1833 struct srp_target_port *target = ch->target;
1834 struct srp_aer_rsp rsp = {
1835 .opcode = SRP_AER_RSP,
1838 s32 delta = be32_to_cpu(req->req_lim_delta);
1840 shost_printk(KERN_ERR, target->scsi_host, PFX
1841 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
1843 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1844 shost_printk(KERN_ERR, target->scsi_host, PFX
1845 "problems processing SRP_AER_REQ\n");
1848 static void srp_handle_recv(struct srp_rdma_ch *ch, struct ib_wc *wc)
1850 struct srp_target_port *target = ch->target;
1851 struct ib_device *dev = target->srp_host->srp_dev->dev;
1852 struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
1856 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
1859 opcode = *(u8 *) iu->buf;
1862 shost_printk(KERN_ERR, target->scsi_host,
1863 PFX "recv completion, opcode 0x%02x\n", opcode);
1864 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1865 iu->buf, wc->byte_len, true);
1870 srp_process_rsp(ch, iu->buf);
1874 srp_process_cred_req(ch, iu->buf);
1878 srp_process_aer_req(ch, iu->buf);
1882 /* XXX Handle target logout */
1883 shost_printk(KERN_WARNING, target->scsi_host,
1884 PFX "Got target logout request\n");
1888 shost_printk(KERN_WARNING, target->scsi_host,
1889 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1893 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
1896 res = srp_post_recv(ch, iu);
1898 shost_printk(KERN_ERR, target->scsi_host,
1899 PFX "Recv failed with error code %d\n", res);
1903 * srp_tl_err_work() - handle a transport layer error
1904 * @work: Work structure embedded in an SRP target port.
1906 * Note: This function may get invoked before the rport has been created,
1907 * hence the target->rport test.
1909 static void srp_tl_err_work(struct work_struct *work)
1911 struct srp_target_port *target;
1913 target = container_of(work, struct srp_target_port, tl_err_work);
1915 srp_start_tl_fail_timers(target->rport);
1918 static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
1919 bool send_err, struct srp_rdma_ch *ch)
1921 struct srp_target_port *target = ch->target;
1923 if (wr_id == SRP_LAST_WR_ID) {
1924 complete(&ch->done);
1928 if (ch->connected && !target->qp_in_error) {
1929 if (wr_id & LOCAL_INV_WR_ID_MASK) {
1930 shost_printk(KERN_ERR, target->scsi_host, PFX
1931 "LOCAL_INV failed with status %s (%d)\n",
1932 ib_wc_status_msg(wc_status), wc_status);
1933 } else if (wr_id & FAST_REG_WR_ID_MASK) {
1934 shost_printk(KERN_ERR, target->scsi_host, PFX
1935 "FAST_REG_MR failed status %s (%d)\n",
1936 ib_wc_status_msg(wc_status), wc_status);
1938 shost_printk(KERN_ERR, target->scsi_host,
1939 PFX "failed %s status %s (%d) for iu %p\n",
1940 send_err ? "send" : "receive",
1941 ib_wc_status_msg(wc_status), wc_status,
1942 (void *)(uintptr_t)wr_id);
1944 queue_work(system_long_wq, &target->tl_err_work);
1946 target->qp_in_error = true;
1949 static void srp_recv_completion(struct ib_cq *cq, void *ch_ptr)
1951 struct srp_rdma_ch *ch = ch_ptr;
1954 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1955 while (ib_poll_cq(cq, 1, &wc) > 0) {
1956 if (likely(wc.status == IB_WC_SUCCESS)) {
1957 srp_handle_recv(ch, &wc);
1959 srp_handle_qp_err(wc.wr_id, wc.status, false, ch);
1964 static void srp_send_completion(struct ib_cq *cq, void *ch_ptr)
1966 struct srp_rdma_ch *ch = ch_ptr;
1970 while (ib_poll_cq(cq, 1, &wc) > 0) {
1971 if (likely(wc.status == IB_WC_SUCCESS)) {
1972 iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
1973 list_add(&iu->list, &ch->free_tx);
1975 srp_handle_qp_err(wc.wr_id, wc.status, true, ch);
1980 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1982 struct srp_target_port *target = host_to_target(shost);
1983 struct srp_rport *rport = target->rport;
1984 struct srp_rdma_ch *ch;
1985 struct srp_request *req;
1987 struct srp_cmd *cmd;
1988 struct ib_device *dev;
1989 unsigned long flags;
1993 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
1996 * The SCSI EH thread is the only context from which srp_queuecommand()
1997 * can get invoked for blocked devices (SDEV_BLOCK /
1998 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
1999 * locking the rport mutex if invoked from inside the SCSI EH.
2002 mutex_lock(&rport->mutex);
2004 scmnd->result = srp_chkready(target->rport);
2005 if (unlikely(scmnd->result))
2008 WARN_ON_ONCE(scmnd->request->tag < 0);
2009 tag = blk_mq_unique_tag(scmnd->request);
2010 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2011 idx = blk_mq_unique_tag_to_tag(tag);
2012 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2013 dev_name(&shost->shost_gendev), tag, idx,
2014 target->req_ring_size);
2016 spin_lock_irqsave(&ch->lock, flags);
2017 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2018 spin_unlock_irqrestore(&ch->lock, flags);
2023 req = &ch->req_ring[idx];
2024 dev = target->srp_host->srp_dev->dev;
2025 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2028 scmnd->host_scribble = (void *) req;
2031 memset(cmd, 0, sizeof *cmd);
2033 cmd->opcode = SRP_CMD;
2034 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2036 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2041 len = srp_map_data(scmnd, ch, req);
2043 shost_printk(KERN_ERR, target->scsi_host,
2044 PFX "Failed to map data (%d)\n", len);
2046 * If we ran out of memory descriptors (-ENOMEM) because an
2047 * application is queuing many requests with more than
2048 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2049 * to reduce queue depth temporarily.
2051 scmnd->result = len == -ENOMEM ?
2052 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2056 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2059 if (srp_post_send(ch, iu, len)) {
2060 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2068 mutex_unlock(&rport->mutex);
2073 srp_unmap_data(scmnd, ch, req);
2076 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2079 * Avoid that the loops that iterate over the request ring can
2080 * encounter a dangling SCSI command pointer.
2085 if (scmnd->result) {
2086 scmnd->scsi_done(scmnd);
2089 ret = SCSI_MLQUEUE_HOST_BUSY;
2096 * Note: the resources allocated in this function are freed in
2099 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2101 struct srp_target_port *target = ch->target;
2104 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2108 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2113 for (i = 0; i < target->queue_size; ++i) {
2114 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2116 GFP_KERNEL, DMA_FROM_DEVICE);
2117 if (!ch->rx_ring[i])
2121 for (i = 0; i < target->queue_size; ++i) {
2122 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2124 GFP_KERNEL, DMA_TO_DEVICE);
2125 if (!ch->tx_ring[i])
2128 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2134 for (i = 0; i < target->queue_size; ++i) {
2135 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2136 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2149 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2151 uint64_t T_tr_ns, max_compl_time_ms;
2152 uint32_t rq_tmo_jiffies;
2155 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2156 * table 91), both the QP timeout and the retry count have to be set
2157 * for RC QP's during the RTR to RTS transition.
2159 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2160 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2163 * Set target->rq_tmo_jiffies to one second more than the largest time
2164 * it can take before an error completion is generated. See also
2165 * C9-140..142 in the IBTA spec for more information about how to
2166 * convert the QP Local ACK Timeout value to nanoseconds.
2168 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2169 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2170 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2171 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2173 return rq_tmo_jiffies;
2176 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2177 struct srp_login_rsp *lrsp,
2178 struct srp_rdma_ch *ch)
2180 struct srp_target_port *target = ch->target;
2181 struct ib_qp_attr *qp_attr = NULL;
2186 if (lrsp->opcode == SRP_LOGIN_RSP) {
2187 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2188 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2191 * Reserve credits for task management so we don't
2192 * bounce requests back to the SCSI mid-layer.
2194 target->scsi_host->can_queue
2195 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2196 target->scsi_host->can_queue);
2197 target->scsi_host->cmd_per_lun
2198 = min_t(int, target->scsi_host->can_queue,
2199 target->scsi_host->cmd_per_lun);
2201 shost_printk(KERN_WARNING, target->scsi_host,
2202 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2208 ret = srp_alloc_iu_bufs(ch);
2214 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2218 qp_attr->qp_state = IB_QPS_RTR;
2219 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2223 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2227 for (i = 0; i < target->queue_size; i++) {
2228 struct srp_iu *iu = ch->rx_ring[i];
2230 ret = srp_post_recv(ch, iu);
2235 qp_attr->qp_state = IB_QPS_RTS;
2236 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2240 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2242 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2246 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2255 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2256 struct ib_cm_event *event,
2257 struct srp_rdma_ch *ch)
2259 struct srp_target_port *target = ch->target;
2260 struct Scsi_Host *shost = target->scsi_host;
2261 struct ib_class_port_info *cpi;
2264 switch (event->param.rej_rcvd.reason) {
2265 case IB_CM_REJ_PORT_CM_REDIRECT:
2266 cpi = event->param.rej_rcvd.ari;
2267 ch->path.dlid = cpi->redirect_lid;
2268 ch->path.pkey = cpi->redirect_pkey;
2269 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2270 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2272 ch->status = ch->path.dlid ?
2273 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2276 case IB_CM_REJ_PORT_REDIRECT:
2277 if (srp_target_is_topspin(target)) {
2279 * Topspin/Cisco SRP gateways incorrectly send
2280 * reject reason code 25 when they mean 24
2283 memcpy(ch->path.dgid.raw,
2284 event->param.rej_rcvd.ari, 16);
2286 shost_printk(KERN_DEBUG, shost,
2287 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2288 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2289 be64_to_cpu(ch->path.dgid.global.interface_id));
2291 ch->status = SRP_PORT_REDIRECT;
2293 shost_printk(KERN_WARNING, shost,
2294 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2295 ch->status = -ECONNRESET;
2299 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2300 shost_printk(KERN_WARNING, shost,
2301 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2302 ch->status = -ECONNRESET;
2305 case IB_CM_REJ_CONSUMER_DEFINED:
2306 opcode = *(u8 *) event->private_data;
2307 if (opcode == SRP_LOGIN_REJ) {
2308 struct srp_login_rej *rej = event->private_data;
2309 u32 reason = be32_to_cpu(rej->reason);
2311 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2312 shost_printk(KERN_WARNING, shost,
2313 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2315 shost_printk(KERN_WARNING, shost, PFX
2316 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2318 target->orig_dgid.raw, reason);
2320 shost_printk(KERN_WARNING, shost,
2321 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2322 " opcode 0x%02x\n", opcode);
2323 ch->status = -ECONNRESET;
2326 case IB_CM_REJ_STALE_CONN:
2327 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2328 ch->status = SRP_STALE_CONN;
2332 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2333 event->param.rej_rcvd.reason);
2334 ch->status = -ECONNRESET;
2338 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2340 struct srp_rdma_ch *ch = cm_id->context;
2341 struct srp_target_port *target = ch->target;
2344 switch (event->event) {
2345 case IB_CM_REQ_ERROR:
2346 shost_printk(KERN_DEBUG, target->scsi_host,
2347 PFX "Sending CM REQ failed\n");
2349 ch->status = -ECONNRESET;
2352 case IB_CM_REP_RECEIVED:
2354 srp_cm_rep_handler(cm_id, event->private_data, ch);
2357 case IB_CM_REJ_RECEIVED:
2358 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2361 srp_cm_rej_handler(cm_id, event, ch);
2364 case IB_CM_DREQ_RECEIVED:
2365 shost_printk(KERN_WARNING, target->scsi_host,
2366 PFX "DREQ received - connection closed\n");
2367 ch->connected = false;
2368 if (ib_send_cm_drep(cm_id, NULL, 0))
2369 shost_printk(KERN_ERR, target->scsi_host,
2370 PFX "Sending CM DREP failed\n");
2371 queue_work(system_long_wq, &target->tl_err_work);
2374 case IB_CM_TIMEWAIT_EXIT:
2375 shost_printk(KERN_ERR, target->scsi_host,
2376 PFX "connection closed\n");
2382 case IB_CM_MRA_RECEIVED:
2383 case IB_CM_DREQ_ERROR:
2384 case IB_CM_DREP_RECEIVED:
2388 shost_printk(KERN_WARNING, target->scsi_host,
2389 PFX "Unhandled CM event %d\n", event->event);
2394 complete(&ch->done);
2400 * srp_change_queue_depth - setting device queue depth
2401 * @sdev: scsi device struct
2402 * @qdepth: requested queue depth
2404 * Returns queue depth.
2407 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2409 if (!sdev->tagged_supported)
2411 return scsi_change_queue_depth(sdev, qdepth);
2414 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2417 struct srp_target_port *target = ch->target;
2418 struct srp_rport *rport = target->rport;
2419 struct ib_device *dev = target->srp_host->srp_dev->dev;
2421 struct srp_tsk_mgmt *tsk_mgmt;
2423 if (!ch->connected || target->qp_in_error)
2426 init_completion(&ch->tsk_mgmt_done);
2429 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2430 * invoked while a task management function is being sent.
2432 mutex_lock(&rport->mutex);
2433 spin_lock_irq(&ch->lock);
2434 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2435 spin_unlock_irq(&ch->lock);
2438 mutex_unlock(&rport->mutex);
2443 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2446 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2448 tsk_mgmt->opcode = SRP_TSK_MGMT;
2449 int_to_scsilun(lun, &tsk_mgmt->lun);
2450 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
2451 tsk_mgmt->tsk_mgmt_func = func;
2452 tsk_mgmt->task_tag = req_tag;
2454 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2456 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2457 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2458 mutex_unlock(&rport->mutex);
2462 mutex_unlock(&rport->mutex);
2464 if (!wait_for_completion_timeout(&ch->tsk_mgmt_done,
2465 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
2471 static int srp_abort(struct scsi_cmnd *scmnd)
2473 struct srp_target_port *target = host_to_target(scmnd->device->host);
2474 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2477 struct srp_rdma_ch *ch;
2480 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2484 tag = blk_mq_unique_tag(scmnd->request);
2485 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2486 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2488 ch = &target->ch[ch_idx];
2489 if (!srp_claim_req(ch, req, NULL, scmnd))
2491 shost_printk(KERN_ERR, target->scsi_host,
2492 "Sending SRP abort for tag %#x\n", tag);
2493 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2494 SRP_TSK_ABORT_TASK) == 0)
2496 else if (target->rport->state == SRP_RPORT_LOST)
2500 srp_free_req(ch, req, scmnd, 0);
2501 scmnd->result = DID_ABORT << 16;
2502 scmnd->scsi_done(scmnd);
2507 static int srp_reset_device(struct scsi_cmnd *scmnd)
2509 struct srp_target_port *target = host_to_target(scmnd->device->host);
2510 struct srp_rdma_ch *ch;
2513 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2515 ch = &target->ch[0];
2516 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2519 if (ch->tsk_mgmt_status)
2522 for (i = 0; i < target->ch_count; i++) {
2523 ch = &target->ch[i];
2524 for (i = 0; i < target->req_ring_size; ++i) {
2525 struct srp_request *req = &ch->req_ring[i];
2527 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2534 static int srp_reset_host(struct scsi_cmnd *scmnd)
2536 struct srp_target_port *target = host_to_target(scmnd->device->host);
2538 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2540 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2543 static int srp_slave_configure(struct scsi_device *sdev)
2545 struct Scsi_Host *shost = sdev->host;
2546 struct srp_target_port *target = host_to_target(shost);
2547 struct request_queue *q = sdev->request_queue;
2548 unsigned long timeout;
2550 if (sdev->type == TYPE_DISK) {
2551 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2552 blk_queue_rq_timeout(q, timeout);
2558 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2561 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2563 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2566 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2569 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2571 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2574 static ssize_t show_service_id(struct device *dev,
2575 struct device_attribute *attr, char *buf)
2577 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2579 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2582 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2585 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2587 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2590 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2593 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2595 return sprintf(buf, "%pI6\n", target->sgid.raw);
2598 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2601 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2602 struct srp_rdma_ch *ch = &target->ch[0];
2604 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2607 static ssize_t show_orig_dgid(struct device *dev,
2608 struct device_attribute *attr, char *buf)
2610 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2612 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2615 static ssize_t show_req_lim(struct device *dev,
2616 struct device_attribute *attr, char *buf)
2618 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2619 struct srp_rdma_ch *ch;
2620 int i, req_lim = INT_MAX;
2622 for (i = 0; i < target->ch_count; i++) {
2623 ch = &target->ch[i];
2624 req_lim = min(req_lim, ch->req_lim);
2626 return sprintf(buf, "%d\n", req_lim);
2629 static ssize_t show_zero_req_lim(struct device *dev,
2630 struct device_attribute *attr, char *buf)
2632 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2634 return sprintf(buf, "%d\n", target->zero_req_lim);
2637 static ssize_t show_local_ib_port(struct device *dev,
2638 struct device_attribute *attr, char *buf)
2640 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2642 return sprintf(buf, "%d\n", target->srp_host->port);
2645 static ssize_t show_local_ib_device(struct device *dev,
2646 struct device_attribute *attr, char *buf)
2648 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2650 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2653 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2656 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2658 return sprintf(buf, "%d\n", target->ch_count);
2661 static ssize_t show_comp_vector(struct device *dev,
2662 struct device_attribute *attr, char *buf)
2664 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2666 return sprintf(buf, "%d\n", target->comp_vector);
2669 static ssize_t show_tl_retry_count(struct device *dev,
2670 struct device_attribute *attr, char *buf)
2672 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2674 return sprintf(buf, "%d\n", target->tl_retry_count);
2677 static ssize_t show_cmd_sg_entries(struct device *dev,
2678 struct device_attribute *attr, char *buf)
2680 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2682 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2685 static ssize_t show_allow_ext_sg(struct device *dev,
2686 struct device_attribute *attr, char *buf)
2688 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2690 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2693 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2694 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2695 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2696 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2697 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2698 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2699 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2700 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2701 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2702 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2703 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2704 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2705 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2706 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2707 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2708 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2710 static struct device_attribute *srp_host_attrs[] = {
2713 &dev_attr_service_id,
2717 &dev_attr_orig_dgid,
2719 &dev_attr_zero_req_lim,
2720 &dev_attr_local_ib_port,
2721 &dev_attr_local_ib_device,
2723 &dev_attr_comp_vector,
2724 &dev_attr_tl_retry_count,
2725 &dev_attr_cmd_sg_entries,
2726 &dev_attr_allow_ext_sg,
2730 static struct scsi_host_template srp_template = {
2731 .module = THIS_MODULE,
2732 .name = "InfiniBand SRP initiator",
2733 .proc_name = DRV_NAME,
2734 .slave_configure = srp_slave_configure,
2735 .info = srp_target_info,
2736 .queuecommand = srp_queuecommand,
2737 .change_queue_depth = srp_change_queue_depth,
2738 .eh_abort_handler = srp_abort,
2739 .eh_device_reset_handler = srp_reset_device,
2740 .eh_host_reset_handler = srp_reset_host,
2741 .skip_settle_delay = true,
2742 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2743 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2745 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2746 .use_clustering = ENABLE_CLUSTERING,
2747 .shost_attrs = srp_host_attrs,
2749 .track_queue_depth = 1,
2752 static int srp_sdev_count(struct Scsi_Host *host)
2754 struct scsi_device *sdev;
2757 shost_for_each_device(sdev, host)
2763 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2765 struct srp_rport_identifiers ids;
2766 struct srp_rport *rport;
2768 target->state = SRP_TARGET_SCANNING;
2769 sprintf(target->target_name, "SRP.T10:%016llX",
2770 be64_to_cpu(target->id_ext));
2772 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2775 memcpy(ids.port_id, &target->id_ext, 8);
2776 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2777 ids.roles = SRP_RPORT_ROLE_TARGET;
2778 rport = srp_rport_add(target->scsi_host, &ids);
2779 if (IS_ERR(rport)) {
2780 scsi_remove_host(target->scsi_host);
2781 return PTR_ERR(rport);
2784 rport->lld_data = target;
2785 target->rport = rport;
2787 spin_lock(&host->target_lock);
2788 list_add_tail(&target->list, &host->target_list);
2789 spin_unlock(&host->target_lock);
2791 scsi_scan_target(&target->scsi_host->shost_gendev,
2792 0, target->scsi_id, SCAN_WILD_CARD, 0);
2794 if (srp_connected_ch(target) < target->ch_count ||
2795 target->qp_in_error) {
2796 shost_printk(KERN_INFO, target->scsi_host,
2797 PFX "SCSI scan failed - removing SCSI host\n");
2798 srp_queue_remove_work(target);
2802 pr_debug(PFX "%s: SCSI scan succeeded - detected %d LUNs\n",
2803 dev_name(&target->scsi_host->shost_gendev),
2804 srp_sdev_count(target->scsi_host));
2806 spin_lock_irq(&target->lock);
2807 if (target->state == SRP_TARGET_SCANNING)
2808 target->state = SRP_TARGET_LIVE;
2809 spin_unlock_irq(&target->lock);
2815 static void srp_release_dev(struct device *dev)
2817 struct srp_host *host =
2818 container_of(dev, struct srp_host, dev);
2820 complete(&host->released);
2823 static struct class srp_class = {
2824 .name = "infiniband_srp",
2825 .dev_release = srp_release_dev
2829 * srp_conn_unique() - check whether the connection to a target is unique
2831 * @target: SRP target port.
2833 static bool srp_conn_unique(struct srp_host *host,
2834 struct srp_target_port *target)
2836 struct srp_target_port *t;
2839 if (target->state == SRP_TARGET_REMOVED)
2844 spin_lock(&host->target_lock);
2845 list_for_each_entry(t, &host->target_list, list) {
2847 target->id_ext == t->id_ext &&
2848 target->ioc_guid == t->ioc_guid &&
2849 target->initiator_ext == t->initiator_ext) {
2854 spin_unlock(&host->target_lock);
2861 * Target ports are added by writing
2863 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
2864 * pkey=<P_Key>,service_id=<service ID>
2866 * to the add_target sysfs attribute.
2870 SRP_OPT_ID_EXT = 1 << 0,
2871 SRP_OPT_IOC_GUID = 1 << 1,
2872 SRP_OPT_DGID = 1 << 2,
2873 SRP_OPT_PKEY = 1 << 3,
2874 SRP_OPT_SERVICE_ID = 1 << 4,
2875 SRP_OPT_MAX_SECT = 1 << 5,
2876 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
2877 SRP_OPT_IO_CLASS = 1 << 7,
2878 SRP_OPT_INITIATOR_EXT = 1 << 8,
2879 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
2880 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
2881 SRP_OPT_SG_TABLESIZE = 1 << 11,
2882 SRP_OPT_COMP_VECTOR = 1 << 12,
2883 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
2884 SRP_OPT_QUEUE_SIZE = 1 << 14,
2885 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
2889 SRP_OPT_SERVICE_ID),
2892 static const match_table_t srp_opt_tokens = {
2893 { SRP_OPT_ID_EXT, "id_ext=%s" },
2894 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
2895 { SRP_OPT_DGID, "dgid=%s" },
2896 { SRP_OPT_PKEY, "pkey=%x" },
2897 { SRP_OPT_SERVICE_ID, "service_id=%s" },
2898 { SRP_OPT_MAX_SECT, "max_sect=%d" },
2899 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
2900 { SRP_OPT_IO_CLASS, "io_class=%x" },
2901 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
2902 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
2903 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
2904 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
2905 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
2906 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
2907 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
2908 { SRP_OPT_ERR, NULL }
2911 static int srp_parse_options(const char *buf, struct srp_target_port *target)
2913 char *options, *sep_opt;
2916 substring_t args[MAX_OPT_ARGS];
2922 options = kstrdup(buf, GFP_KERNEL);
2927 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
2931 token = match_token(p, srp_opt_tokens, args);
2935 case SRP_OPT_ID_EXT:
2936 p = match_strdup(args);
2941 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
2945 case SRP_OPT_IOC_GUID:
2946 p = match_strdup(args);
2951 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
2956 p = match_strdup(args);
2961 if (strlen(p) != 32) {
2962 pr_warn("bad dest GID parameter '%s'\n", p);
2967 for (i = 0; i < 16; ++i) {
2968 strlcpy(dgid, p + i * 2, sizeof(dgid));
2969 if (sscanf(dgid, "%hhx",
2970 &target->orig_dgid.raw[i]) < 1) {
2980 if (match_hex(args, &token)) {
2981 pr_warn("bad P_Key parameter '%s'\n", p);
2984 target->pkey = cpu_to_be16(token);
2987 case SRP_OPT_SERVICE_ID:
2988 p = match_strdup(args);
2993 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
2997 case SRP_OPT_MAX_SECT:
2998 if (match_int(args, &token)) {
2999 pr_warn("bad max sect parameter '%s'\n", p);
3002 target->scsi_host->max_sectors = token;
3005 case SRP_OPT_QUEUE_SIZE:
3006 if (match_int(args, &token) || token < 1) {
3007 pr_warn("bad queue_size parameter '%s'\n", p);
3010 target->scsi_host->can_queue = token;
3011 target->queue_size = token + SRP_RSP_SQ_SIZE +
3012 SRP_TSK_MGMT_SQ_SIZE;
3013 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3014 target->scsi_host->cmd_per_lun = token;
3017 case SRP_OPT_MAX_CMD_PER_LUN:
3018 if (match_int(args, &token) || token < 1) {
3019 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3023 target->scsi_host->cmd_per_lun = token;
3026 case SRP_OPT_IO_CLASS:
3027 if (match_hex(args, &token)) {
3028 pr_warn("bad IO class parameter '%s'\n", p);
3031 if (token != SRP_REV10_IB_IO_CLASS &&
3032 token != SRP_REV16A_IB_IO_CLASS) {
3033 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3034 token, SRP_REV10_IB_IO_CLASS,
3035 SRP_REV16A_IB_IO_CLASS);
3038 target->io_class = token;
3041 case SRP_OPT_INITIATOR_EXT:
3042 p = match_strdup(args);
3047 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3051 case SRP_OPT_CMD_SG_ENTRIES:
3052 if (match_int(args, &token) || token < 1 || token > 255) {
3053 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3057 target->cmd_sg_cnt = token;
3060 case SRP_OPT_ALLOW_EXT_SG:
3061 if (match_int(args, &token)) {
3062 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3065 target->allow_ext_sg = !!token;
3068 case SRP_OPT_SG_TABLESIZE:
3069 if (match_int(args, &token) || token < 1 ||
3070 token > SCSI_MAX_SG_CHAIN_SEGMENTS) {
3071 pr_warn("bad max sg_tablesize parameter '%s'\n",
3075 target->sg_tablesize = token;
3078 case SRP_OPT_COMP_VECTOR:
3079 if (match_int(args, &token) || token < 0) {
3080 pr_warn("bad comp_vector parameter '%s'\n", p);
3083 target->comp_vector = token;
3086 case SRP_OPT_TL_RETRY_COUNT:
3087 if (match_int(args, &token) || token < 2 || token > 7) {
3088 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3092 target->tl_retry_count = token;
3096 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3102 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3105 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3106 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3107 !(srp_opt_tokens[i].token & opt_mask))
3108 pr_warn("target creation request is missing parameter '%s'\n",
3109 srp_opt_tokens[i].pattern);
3111 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3112 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3113 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3114 target->scsi_host->cmd_per_lun,
3115 target->scsi_host->can_queue);
3122 static ssize_t srp_create_target(struct device *dev,
3123 struct device_attribute *attr,
3124 const char *buf, size_t count)
3126 struct srp_host *host =
3127 container_of(dev, struct srp_host, dev);
3128 struct Scsi_Host *target_host;
3129 struct srp_target_port *target;
3130 struct srp_rdma_ch *ch;
3131 struct srp_device *srp_dev = host->srp_dev;
3132 struct ib_device *ibdev = srp_dev->dev;
3133 int ret, node_idx, node, cpu, i;
3134 bool multich = false;
3136 target_host = scsi_host_alloc(&srp_template,
3137 sizeof (struct srp_target_port));
3141 target_host->transportt = ib_srp_transport_template;
3142 target_host->max_channel = 0;
3143 target_host->max_id = 1;
3144 target_host->max_lun = -1LL;
3145 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3147 target = host_to_target(target_host);
3149 target->io_class = SRP_REV16A_IB_IO_CLASS;
3150 target->scsi_host = target_host;
3151 target->srp_host = host;
3152 target->lkey = host->srp_dev->mr->lkey;
3153 target->rkey = host->srp_dev->mr->rkey;
3154 target->cmd_sg_cnt = cmd_sg_entries;
3155 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3156 target->allow_ext_sg = allow_ext_sg;
3157 target->tl_retry_count = 7;
3158 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3161 * Avoid that the SCSI host can be removed by srp_remove_target()
3162 * before this function returns.
3164 scsi_host_get(target->scsi_host);
3166 mutex_lock(&host->add_target_mutex);
3168 ret = srp_parse_options(buf, target);
3172 ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
3176 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3178 if (!srp_conn_unique(target->srp_host, target)) {
3179 shost_printk(KERN_INFO, target->scsi_host,
3180 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3181 be64_to_cpu(target->id_ext),
3182 be64_to_cpu(target->ioc_guid),
3183 be64_to_cpu(target->initiator_ext));
3188 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3189 target->cmd_sg_cnt < target->sg_tablesize) {
3190 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3191 target->sg_tablesize = target->cmd_sg_cnt;
3194 target_host->sg_tablesize = target->sg_tablesize;
3195 target->indirect_size = target->sg_tablesize *
3196 sizeof (struct srp_direct_buf);
3197 target->max_iu_len = sizeof (struct srp_cmd) +
3198 sizeof (struct srp_indirect_buf) +
3199 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3201 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3202 INIT_WORK(&target->remove_work, srp_remove_work);
3203 spin_lock_init(&target->lock);
3204 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
3209 target->ch_count = max_t(unsigned, num_online_nodes(),
3211 min(4 * num_online_nodes(),
3212 ibdev->num_comp_vectors),
3213 num_online_cpus()));
3214 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3220 for_each_online_node(node) {
3221 const int ch_start = (node_idx * target->ch_count /
3222 num_online_nodes());
3223 const int ch_end = ((node_idx + 1) * target->ch_count /
3224 num_online_nodes());
3225 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3226 num_online_nodes() + target->comp_vector)
3227 % ibdev->num_comp_vectors;
3228 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3229 num_online_nodes() + target->comp_vector)
3230 % ibdev->num_comp_vectors;
3233 for_each_online_cpu(cpu) {
3234 if (cpu_to_node(cpu) != node)
3236 if (ch_start + cpu_idx >= ch_end)
3238 ch = &target->ch[ch_start + cpu_idx];
3239 ch->target = target;
3240 ch->comp_vector = cv_start == cv_end ? cv_start :
3241 cv_start + cpu_idx % (cv_end - cv_start);
3242 spin_lock_init(&ch->lock);
3243 INIT_LIST_HEAD(&ch->free_tx);
3244 ret = srp_new_cm_id(ch);
3246 goto err_disconnect;
3248 ret = srp_create_ch_ib(ch);
3250 goto err_disconnect;
3252 ret = srp_alloc_req_data(ch);
3254 goto err_disconnect;
3256 ret = srp_connect_ch(ch, multich);
3258 shost_printk(KERN_ERR, target->scsi_host,
3259 PFX "Connection %d/%d failed\n",
3262 if (node_idx == 0 && cpu_idx == 0) {
3263 goto err_disconnect;
3265 srp_free_ch_ib(target, ch);
3266 srp_free_req_data(target, ch);
3267 target->ch_count = ch - target->ch;
3278 target->scsi_host->nr_hw_queues = target->ch_count;
3280 ret = srp_add_target(host, target);
3282 goto err_disconnect;
3284 if (target->state != SRP_TARGET_REMOVED) {
3285 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3286 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3287 be64_to_cpu(target->id_ext),
3288 be64_to_cpu(target->ioc_guid),
3289 be16_to_cpu(target->pkey),
3290 be64_to_cpu(target->service_id),
3291 target->sgid.raw, target->orig_dgid.raw);
3297 mutex_unlock(&host->add_target_mutex);
3299 scsi_host_put(target->scsi_host);
3304 srp_disconnect_target(target);
3306 for (i = 0; i < target->ch_count; i++) {
3307 ch = &target->ch[i];
3308 srp_free_ch_ib(target, ch);
3309 srp_free_req_data(target, ch);
3316 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3318 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3321 struct srp_host *host = container_of(dev, struct srp_host, dev);
3323 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3326 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3328 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3331 struct srp_host *host = container_of(dev, struct srp_host, dev);
3333 return sprintf(buf, "%d\n", host->port);
3336 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3338 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3340 struct srp_host *host;
3342 host = kzalloc(sizeof *host, GFP_KERNEL);
3346 INIT_LIST_HEAD(&host->target_list);
3347 spin_lock_init(&host->target_lock);
3348 init_completion(&host->released);
3349 mutex_init(&host->add_target_mutex);
3350 host->srp_dev = device;
3353 host->dev.class = &srp_class;
3354 host->dev.parent = device->dev->dma_device;
3355 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3357 if (device_register(&host->dev))
3359 if (device_create_file(&host->dev, &dev_attr_add_target))
3361 if (device_create_file(&host->dev, &dev_attr_ibdev))
3363 if (device_create_file(&host->dev, &dev_attr_port))
3369 device_unregister(&host->dev);
3377 static void srp_add_one(struct ib_device *device)
3379 struct srp_device *srp_dev;
3380 struct ib_device_attr *dev_attr;
3381 struct srp_host *host;
3382 int mr_page_shift, s, e, p;
3383 u64 max_pages_per_mr;
3385 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
3389 if (ib_query_device(device, dev_attr)) {
3390 pr_warn("Query device failed for %s\n", device->name);
3394 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
3398 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3399 device->map_phys_fmr && device->unmap_fmr);
3400 srp_dev->has_fr = (dev_attr->device_cap_flags &
3401 IB_DEVICE_MEM_MGT_EXTENSIONS);
3402 if (!srp_dev->has_fmr && !srp_dev->has_fr)
3403 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3405 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3406 (!srp_dev->has_fmr || prefer_fr));
3409 * Use the smallest page size supported by the HCA, down to a
3410 * minimum of 4096 bytes. We're unlikely to build large sglists
3411 * out of smaller entries.
3413 mr_page_shift = max(12, ffs(dev_attr->page_size_cap) - 1);
3414 srp_dev->mr_page_size = 1 << mr_page_shift;
3415 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3416 max_pages_per_mr = dev_attr->max_mr_size;
3417 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3418 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3420 if (srp_dev->use_fast_reg) {
3421 srp_dev->max_pages_per_mr =
3422 min_t(u32, srp_dev->max_pages_per_mr,
3423 dev_attr->max_fast_reg_page_list_len);
3425 srp_dev->mr_max_size = srp_dev->mr_page_size *
3426 srp_dev->max_pages_per_mr;
3427 pr_debug("%s: mr_page_shift = %d, dev_attr->max_mr_size = %#llx, dev_attr->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3428 device->name, mr_page_shift, dev_attr->max_mr_size,
3429 dev_attr->max_fast_reg_page_list_len,
3430 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3432 INIT_LIST_HEAD(&srp_dev->dev_list);
3434 srp_dev->dev = device;
3435 srp_dev->pd = ib_alloc_pd(device);
3436 if (IS_ERR(srp_dev->pd))
3439 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
3440 IB_ACCESS_LOCAL_WRITE |
3441 IB_ACCESS_REMOTE_READ |
3442 IB_ACCESS_REMOTE_WRITE);
3443 if (IS_ERR(srp_dev->mr))
3446 if (device->node_type == RDMA_NODE_IB_SWITCH) {
3451 e = device->phys_port_cnt;
3454 for (p = s; p <= e; ++p) {
3455 host = srp_add_port(srp_dev, p);
3457 list_add_tail(&host->list, &srp_dev->dev_list);
3460 ib_set_client_data(device, &srp_client, srp_dev);
3465 ib_dealloc_pd(srp_dev->pd);
3474 static void srp_remove_one(struct ib_device *device)
3476 struct srp_device *srp_dev;
3477 struct srp_host *host, *tmp_host;
3478 struct srp_target_port *target;
3480 srp_dev = ib_get_client_data(device, &srp_client);
3484 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3485 device_unregister(&host->dev);
3487 * Wait for the sysfs entry to go away, so that no new
3488 * target ports can be created.
3490 wait_for_completion(&host->released);
3493 * Remove all target ports.
3495 spin_lock(&host->target_lock);
3496 list_for_each_entry(target, &host->target_list, list)
3497 srp_queue_remove_work(target);
3498 spin_unlock(&host->target_lock);
3501 * Wait for tl_err and target port removal tasks.
3503 flush_workqueue(system_long_wq);
3504 flush_workqueue(srp_remove_wq);
3509 ib_dereg_mr(srp_dev->mr);
3510 ib_dealloc_pd(srp_dev->pd);
3515 static struct srp_function_template ib_srp_transport_functions = {
3516 .has_rport_state = true,
3517 .reset_timer_if_blocked = true,
3518 .reconnect_delay = &srp_reconnect_delay,
3519 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3520 .dev_loss_tmo = &srp_dev_loss_tmo,
3521 .reconnect = srp_rport_reconnect,
3522 .rport_delete = srp_rport_delete,
3523 .terminate_rport_io = srp_terminate_io,
3526 static int __init srp_init_module(void)
3530 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
3532 if (srp_sg_tablesize) {
3533 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3534 if (!cmd_sg_entries)
3535 cmd_sg_entries = srp_sg_tablesize;
3538 if (!cmd_sg_entries)
3539 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3541 if (cmd_sg_entries > 255) {
3542 pr_warn("Clamping cmd_sg_entries to 255\n");
3543 cmd_sg_entries = 255;
3546 if (!indirect_sg_entries)
3547 indirect_sg_entries = cmd_sg_entries;
3548 else if (indirect_sg_entries < cmd_sg_entries) {
3549 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3551 indirect_sg_entries = cmd_sg_entries;
3554 srp_remove_wq = create_workqueue("srp_remove");
3555 if (!srp_remove_wq) {
3561 ib_srp_transport_template =
3562 srp_attach_transport(&ib_srp_transport_functions);
3563 if (!ib_srp_transport_template)
3566 ret = class_register(&srp_class);
3568 pr_err("couldn't register class infiniband_srp\n");
3572 ib_sa_register_client(&srp_sa_client);
3574 ret = ib_register_client(&srp_client);
3576 pr_err("couldn't register IB client\n");
3584 ib_sa_unregister_client(&srp_sa_client);
3585 class_unregister(&srp_class);
3588 srp_release_transport(ib_srp_transport_template);
3591 destroy_workqueue(srp_remove_wq);
3595 static void __exit srp_cleanup_module(void)
3597 ib_unregister_client(&srp_client);
3598 ib_sa_unregister_client(&srp_sa_client);
3599 class_unregister(&srp_class);
3600 srp_release_transport(ib_srp_transport_template);
3601 destroy_workqueue(srp_remove_wq);
3604 module_init(srp_init_module);
3605 module_exit(srp_cleanup_module);