2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Encapsulates the major functions managing:
50 #include <linux/interrupt.h>
51 #include <linux/pci.h> /* for Tavor hack below */
52 #include <linux/slab.h>
54 #include "xprt_rdma.h"
61 # define RPCDBG_FACILITY RPCDBG_TRANS
69 * handle replies in tasklet context, using a single, global list
70 * rdma tasklet function -- just turn around and call the func
71 * for all replies on the list
74 static DEFINE_SPINLOCK(rpcrdma_tk_lock_g);
75 static LIST_HEAD(rpcrdma_tasklets_g);
78 rpcrdma_run_tasklet(unsigned long data)
80 struct rpcrdma_rep *rep;
81 void (*func)(struct rpcrdma_rep *);
85 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
86 while (!list_empty(&rpcrdma_tasklets_g)) {
87 rep = list_entry(rpcrdma_tasklets_g.next,
88 struct rpcrdma_rep, rr_list);
89 list_del(&rep->rr_list);
92 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
97 rpcrdma_recv_buffer_put(rep);
99 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
101 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
104 static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);
107 rpcrdma_schedule_tasklet(struct rpcrdma_rep *rep)
111 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
112 list_add_tail(&rep->rr_list, &rpcrdma_tasklets_g);
113 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
114 tasklet_schedule(&rpcrdma_tasklet_g);
118 rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
120 struct rpcrdma_ep *ep = context;
122 dprintk("RPC: %s: QP error %X on device %s ep %p\n",
123 __func__, event->event, event->device->name, context);
124 if (ep->rep_connected == 1) {
125 ep->rep_connected = -EIO;
127 wake_up_all(&ep->rep_connect_wait);
132 rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
134 struct rpcrdma_ep *ep = context;
136 dprintk("RPC: %s: CQ error %X on device %s ep %p\n",
137 __func__, event->event, event->device->name, context);
138 if (ep->rep_connected == 1) {
139 ep->rep_connected = -EIO;
141 wake_up_all(&ep->rep_connect_wait);
146 void rpcrdma_event_process(struct ib_wc *wc)
148 struct rpcrdma_mw *frmr;
149 struct rpcrdma_rep *rep =
150 (struct rpcrdma_rep *)(unsigned long) wc->wr_id;
152 dprintk("RPC: %s: event rep %p status %X opcode %X length %u\n",
153 __func__, rep, wc->status, wc->opcode, wc->byte_len);
155 if (!rep) /* send or bind completion that we don't care about */
158 if (IB_WC_SUCCESS != wc->status) {
159 dprintk("RPC: %s: WC opcode %d status %X, connection lost\n",
160 __func__, wc->opcode, wc->status);
162 if (wc->opcode != IB_WC_FAST_REG_MR && wc->opcode != IB_WC_LOCAL_INV)
163 rpcrdma_schedule_tasklet(rep);
167 switch (wc->opcode) {
168 case IB_WC_FAST_REG_MR:
169 frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
170 frmr->r.frmr.state = FRMR_IS_VALID;
172 case IB_WC_LOCAL_INV:
173 frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
174 frmr->r.frmr.state = FRMR_IS_INVALID;
177 rep->rr_len = wc->byte_len;
178 ib_dma_sync_single_for_cpu(
179 rdmab_to_ia(rep->rr_buffer)->ri_id->device,
180 rep->rr_iov.addr, rep->rr_len, DMA_FROM_DEVICE);
181 /* Keep (only) the most recent credits, after check validity */
182 if (rep->rr_len >= 16) {
183 struct rpcrdma_msg *p =
184 (struct rpcrdma_msg *) rep->rr_base;
185 unsigned int credits = ntohl(p->rm_credit);
187 dprintk("RPC: %s: server"
188 " dropped credits to 0!\n", __func__);
191 } else if (credits > rep->rr_buffer->rb_max_requests) {
192 dprintk("RPC: %s: server"
193 " over-crediting: %d (%d)\n",
195 rep->rr_buffer->rb_max_requests);
196 credits = rep->rr_buffer->rb_max_requests;
198 atomic_set(&rep->rr_buffer->rb_credits, credits);
202 rpcrdma_schedule_tasklet(rep);
205 dprintk("RPC: %s: unexpected WC event %X\n",
206 __func__, wc->opcode);
212 rpcrdma_cq_poll(struct ib_cq *cq)
218 rc = ib_poll_cq(cq, 1, &wc);
220 dprintk("RPC: %s: ib_poll_cq failed %i\n",
227 rpcrdma_event_process(&wc);
234 * rpcrdma_cq_event_upcall
236 * This upcall handles recv, send, bind and unbind events.
237 * It is reentrant but processes single events in order to maintain
238 * ordering of receives to keep server credits.
240 * It is the responsibility of the scheduled tasklet to return
241 * recv buffers to the pool. NOTE: this affects synchronization of
242 * connection shutdown. That is, the structures required for
243 * the completion of the reply handler must remain intact until
244 * all memory has been reclaimed.
246 * Note that send events are suppressed and do not result in an upcall.
249 rpcrdma_cq_event_upcall(struct ib_cq *cq, void *context)
253 rc = rpcrdma_cq_poll(cq);
257 rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
259 dprintk("RPC: %s: ib_req_notify_cq failed %i\n",
268 static const char * const conn[] = {
285 rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
287 struct rpcrdma_xprt *xprt = id->context;
288 struct rpcrdma_ia *ia = &xprt->rx_ia;
289 struct rpcrdma_ep *ep = &xprt->rx_ep;
291 struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr;
293 struct ib_qp_attr attr;
294 struct ib_qp_init_attr iattr;
297 switch (event->event) {
298 case RDMA_CM_EVENT_ADDR_RESOLVED:
299 case RDMA_CM_EVENT_ROUTE_RESOLVED:
301 complete(&ia->ri_done);
303 case RDMA_CM_EVENT_ADDR_ERROR:
304 ia->ri_async_rc = -EHOSTUNREACH;
305 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
307 complete(&ia->ri_done);
309 case RDMA_CM_EVENT_ROUTE_ERROR:
310 ia->ri_async_rc = -ENETUNREACH;
311 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
313 complete(&ia->ri_done);
315 case RDMA_CM_EVENT_ESTABLISHED:
317 ib_query_qp(ia->ri_id->qp, &attr,
318 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
320 dprintk("RPC: %s: %d responder resources"
322 __func__, attr.max_dest_rd_atomic, attr.max_rd_atomic);
324 case RDMA_CM_EVENT_CONNECT_ERROR:
325 connstate = -ENOTCONN;
327 case RDMA_CM_EVENT_UNREACHABLE:
328 connstate = -ENETDOWN;
330 case RDMA_CM_EVENT_REJECTED:
331 connstate = -ECONNREFUSED;
333 case RDMA_CM_EVENT_DISCONNECTED:
334 connstate = -ECONNABORTED;
336 case RDMA_CM_EVENT_DEVICE_REMOVAL:
339 dprintk("RPC: %s: %s: %pI4:%u (ep 0x%p event 0x%x)\n",
341 (event->event <= 11) ? conn[event->event] :
342 "unknown connection error",
343 &addr->sin_addr.s_addr,
344 ntohs(addr->sin_port),
346 atomic_set(&rpcx_to_rdmax(ep->rep_xprt)->rx_buf.rb_credits, 1);
347 dprintk("RPC: %s: %sconnected\n",
348 __func__, connstate > 0 ? "" : "dis");
349 ep->rep_connected = connstate;
351 wake_up_all(&ep->rep_connect_wait);
354 dprintk("RPC: %s: unexpected CM event %d\n",
355 __func__, event->event);
360 if (connstate == 1) {
361 int ird = attr.max_dest_rd_atomic;
362 int tird = ep->rep_remote_cma.responder_resources;
363 printk(KERN_INFO "rpcrdma: connection to %pI4:%u "
364 "on %s, memreg %d slots %d ird %d%s\n",
365 &addr->sin_addr.s_addr,
366 ntohs(addr->sin_port),
367 ia->ri_id->device->name,
368 ia->ri_memreg_strategy,
369 xprt->rx_buf.rb_max_requests,
370 ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
371 } else if (connstate < 0) {
372 printk(KERN_INFO "rpcrdma: connection to %pI4:%u closed (%d)\n",
373 &addr->sin_addr.s_addr,
374 ntohs(addr->sin_port),
382 static struct rdma_cm_id *
383 rpcrdma_create_id(struct rpcrdma_xprt *xprt,
384 struct rpcrdma_ia *ia, struct sockaddr *addr)
386 struct rdma_cm_id *id;
389 init_completion(&ia->ri_done);
391 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
394 dprintk("RPC: %s: rdma_create_id() failed %i\n",
399 ia->ri_async_rc = -ETIMEDOUT;
400 rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
402 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
406 wait_for_completion_interruptible_timeout(&ia->ri_done,
407 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
408 rc = ia->ri_async_rc;
412 ia->ri_async_rc = -ETIMEDOUT;
413 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
415 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
419 wait_for_completion_interruptible_timeout(&ia->ri_done,
420 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
421 rc = ia->ri_async_rc;
433 * Drain any cq, prior to teardown.
436 rpcrdma_clean_cq(struct ib_cq *cq)
441 while (1 == ib_poll_cq(cq, 1, &wc))
445 dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
446 __func__, count, wc.opcode);
450 * Exported functions.
454 * Open and initialize an Interface Adapter.
455 * o initializes fields of struct rpcrdma_ia, including
456 * interface and provider attributes and protection zone.
459 rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
462 struct ib_device_attr devattr;
463 struct rpcrdma_ia *ia = &xprt->rx_ia;
465 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
466 if (IS_ERR(ia->ri_id)) {
467 rc = PTR_ERR(ia->ri_id);
471 ia->ri_pd = ib_alloc_pd(ia->ri_id->device);
472 if (IS_ERR(ia->ri_pd)) {
473 rc = PTR_ERR(ia->ri_pd);
474 dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
480 * Query the device to determine if the requested memory
481 * registration strategy is supported. If it isn't, set the
482 * strategy to a globally supported model.
484 rc = ib_query_device(ia->ri_id->device, &devattr);
486 dprintk("RPC: %s: ib_query_device failed %d\n",
491 if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
492 ia->ri_have_dma_lkey = 1;
493 ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
497 case RPCRDMA_MEMWINDOWS:
498 case RPCRDMA_MEMWINDOWS_ASYNC:
499 if (!(devattr.device_cap_flags & IB_DEVICE_MEM_WINDOW)) {
500 dprintk("RPC: %s: MEMWINDOWS registration "
501 "specified but not supported by adapter, "
502 "using slower RPCRDMA_REGISTER\n",
504 memreg = RPCRDMA_REGISTER;
507 case RPCRDMA_MTHCAFMR:
508 if (!ia->ri_id->device->alloc_fmr) {
509 #if RPCRDMA_PERSISTENT_REGISTRATION
510 dprintk("RPC: %s: MTHCAFMR registration "
511 "specified but not supported by adapter, "
512 "using riskier RPCRDMA_ALLPHYSICAL\n",
514 memreg = RPCRDMA_ALLPHYSICAL;
516 dprintk("RPC: %s: MTHCAFMR registration "
517 "specified but not supported by adapter, "
518 "using slower RPCRDMA_REGISTER\n",
520 memreg = RPCRDMA_REGISTER;
525 /* Requires both frmr reg and local dma lkey */
526 if ((devattr.device_cap_flags &
527 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
528 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) {
529 #if RPCRDMA_PERSISTENT_REGISTRATION
530 dprintk("RPC: %s: FRMR registration "
531 "specified but not supported by adapter, "
532 "using riskier RPCRDMA_ALLPHYSICAL\n",
534 memreg = RPCRDMA_ALLPHYSICAL;
536 dprintk("RPC: %s: FRMR registration "
537 "specified but not supported by adapter, "
538 "using slower RPCRDMA_REGISTER\n",
540 memreg = RPCRDMA_REGISTER;
543 /* Mind the ia limit on FRMR page list depth */
544 ia->ri_max_frmr_depth = min_t(unsigned int,
545 RPCRDMA_MAX_DATA_SEGS,
546 devattr.max_fast_reg_page_list_len);
552 * Optionally obtain an underlying physical identity mapping in
553 * order to do a memory window-based bind. This base registration
554 * is protected from remote access - that is enabled only by binding
555 * for the specific bytes targeted during each RPC operation, and
556 * revoked after the corresponding completion similar to a storage
560 case RPCRDMA_BOUNCEBUFFERS:
561 case RPCRDMA_REGISTER:
564 #if RPCRDMA_PERSISTENT_REGISTRATION
565 case RPCRDMA_ALLPHYSICAL:
566 mem_priv = IB_ACCESS_LOCAL_WRITE |
567 IB_ACCESS_REMOTE_WRITE |
568 IB_ACCESS_REMOTE_READ;
571 case RPCRDMA_MEMWINDOWS_ASYNC:
572 case RPCRDMA_MEMWINDOWS:
573 mem_priv = IB_ACCESS_LOCAL_WRITE |
576 case RPCRDMA_MTHCAFMR:
577 if (ia->ri_have_dma_lkey)
579 mem_priv = IB_ACCESS_LOCAL_WRITE;
581 ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv);
582 if (IS_ERR(ia->ri_bind_mem)) {
583 printk(KERN_ALERT "%s: ib_get_dma_mr for "
584 "phys register failed with %lX\n\t"
585 "Will continue with degraded performance\n",
586 __func__, PTR_ERR(ia->ri_bind_mem));
587 memreg = RPCRDMA_REGISTER;
588 ia->ri_bind_mem = NULL;
592 printk(KERN_ERR "%s: invalid memory registration mode %d\n",
597 dprintk("RPC: %s: memory registration strategy is %d\n",
600 /* Else will do memory reg/dereg for each chunk */
601 ia->ri_memreg_strategy = memreg;
605 rdma_destroy_id(ia->ri_id);
612 * Clean up/close an IA.
613 * o if event handles and PD have been initialized, free them.
617 rpcrdma_ia_close(struct rpcrdma_ia *ia)
621 dprintk("RPC: %s: entering\n", __func__);
622 if (ia->ri_bind_mem != NULL) {
623 rc = ib_dereg_mr(ia->ri_bind_mem);
624 dprintk("RPC: %s: ib_dereg_mr returned %i\n",
627 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
629 rdma_destroy_qp(ia->ri_id);
630 rdma_destroy_id(ia->ri_id);
633 if (ia->ri_pd != NULL && !IS_ERR(ia->ri_pd)) {
634 rc = ib_dealloc_pd(ia->ri_pd);
635 dprintk("RPC: %s: ib_dealloc_pd returned %i\n",
641 * Create unconnected endpoint.
644 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
645 struct rpcrdma_create_data_internal *cdata)
647 struct ib_device_attr devattr;
650 rc = ib_query_device(ia->ri_id->device, &devattr);
652 dprintk("RPC: %s: ib_query_device failed %d\n",
657 /* check provider's send/recv wr limits */
658 if (cdata->max_requests > devattr.max_qp_wr)
659 cdata->max_requests = devattr.max_qp_wr;
661 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
662 ep->rep_attr.qp_context = ep;
663 /* send_cq and recv_cq initialized below */
664 ep->rep_attr.srq = NULL;
665 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
666 switch (ia->ri_memreg_strategy) {
670 /* Add room for frmr register and invalidate WRs.
671 * 1. FRMR reg WR for head
672 * 2. FRMR invalidate WR for head
673 * 3. N FRMR reg WRs for pagelist
674 * 4. N FRMR invalidate WRs for pagelist
675 * 5. FRMR reg WR for tail
676 * 6. FRMR invalidate WR for tail
677 * 7. The RDMA_SEND WR
680 /* Calculate N if the device max FRMR depth is smaller than
681 * RPCRDMA_MAX_DATA_SEGS.
683 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
684 int delta = RPCRDMA_MAX_DATA_SEGS -
685 ia->ri_max_frmr_depth;
688 depth += 2; /* FRMR reg + invalidate */
689 delta -= ia->ri_max_frmr_depth;
693 ep->rep_attr.cap.max_send_wr *= depth;
694 if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) {
695 cdata->max_requests = devattr.max_qp_wr / depth;
696 if (!cdata->max_requests)
698 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
703 case RPCRDMA_MEMWINDOWS_ASYNC:
704 case RPCRDMA_MEMWINDOWS:
705 /* Add room for mw_binds+unbinds - overkill! */
706 ep->rep_attr.cap.max_send_wr++;
707 ep->rep_attr.cap.max_send_wr *= (2 * RPCRDMA_MAX_SEGS);
708 if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr)
714 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
715 ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
716 ep->rep_attr.cap.max_recv_sge = 1;
717 ep->rep_attr.cap.max_inline_data = 0;
718 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
719 ep->rep_attr.qp_type = IB_QPT_RC;
720 ep->rep_attr.port_num = ~0;
722 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
723 "iovs: send %d recv %d\n",
725 ep->rep_attr.cap.max_send_wr,
726 ep->rep_attr.cap.max_recv_wr,
727 ep->rep_attr.cap.max_send_sge,
728 ep->rep_attr.cap.max_recv_sge);
730 /* set trigger for requesting send completion */
731 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 /* - 1*/;
732 switch (ia->ri_memreg_strategy) {
733 case RPCRDMA_MEMWINDOWS_ASYNC:
734 case RPCRDMA_MEMWINDOWS:
735 ep->rep_cqinit -= RPCRDMA_MAX_SEGS;
740 if (ep->rep_cqinit <= 2)
744 init_waitqueue_head(&ep->rep_connect_wait);
745 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
748 * Create a single cq for receive dto and mw_bind (only ever
749 * care about unbind, really). Send completions are suppressed.
750 * Use single threaded tasklet upcalls to maintain ordering.
752 ep->rep_cq = ib_create_cq(ia->ri_id->device, rpcrdma_cq_event_upcall,
753 rpcrdma_cq_async_error_upcall, NULL,
754 ep->rep_attr.cap.max_recv_wr +
755 ep->rep_attr.cap.max_send_wr + 1, 0);
756 if (IS_ERR(ep->rep_cq)) {
757 rc = PTR_ERR(ep->rep_cq);
758 dprintk("RPC: %s: ib_create_cq failed: %i\n",
763 rc = ib_req_notify_cq(ep->rep_cq, IB_CQ_NEXT_COMP);
765 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
770 ep->rep_attr.send_cq = ep->rep_cq;
771 ep->rep_attr.recv_cq = ep->rep_cq;
773 /* Initialize cma parameters */
775 /* RPC/RDMA does not use private data */
776 ep->rep_remote_cma.private_data = NULL;
777 ep->rep_remote_cma.private_data_len = 0;
779 /* Client offers RDMA Read but does not initiate */
780 ep->rep_remote_cma.initiator_depth = 0;
781 if (ia->ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS)
782 ep->rep_remote_cma.responder_resources = 0;
783 else if (devattr.max_qp_rd_atom > 32) /* arbitrary but <= 255 */
784 ep->rep_remote_cma.responder_resources = 32;
786 ep->rep_remote_cma.responder_resources = devattr.max_qp_rd_atom;
788 ep->rep_remote_cma.retry_count = 7;
789 ep->rep_remote_cma.flow_control = 0;
790 ep->rep_remote_cma.rnr_retry_count = 0;
795 err = ib_destroy_cq(ep->rep_cq);
797 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
806 * Disconnect and destroy endpoint. After this, the only
807 * valid operations on the ep are to free it (if dynamically
808 * allocated) or re-create it.
810 * The caller's error handling must be sure to not leak the endpoint
811 * if this function fails.
814 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
818 dprintk("RPC: %s: entering, connected is %d\n",
819 __func__, ep->rep_connected);
821 cancel_delayed_work_sync(&ep->rep_connect_worker);
824 rc = rpcrdma_ep_disconnect(ep, ia);
826 dprintk("RPC: %s: rpcrdma_ep_disconnect"
827 " returned %i\n", __func__, rc);
828 rdma_destroy_qp(ia->ri_id);
829 ia->ri_id->qp = NULL;
832 /* padding - could be done in rpcrdma_buffer_destroy... */
833 if (ep->rep_pad_mr) {
834 rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad);
835 ep->rep_pad_mr = NULL;
838 rpcrdma_clean_cq(ep->rep_cq);
839 rc = ib_destroy_cq(ep->rep_cq);
841 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
848 * Connect unconnected endpoint.
851 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
853 struct rdma_cm_id *id;
857 if (ep->rep_connected != 0) {
858 struct rpcrdma_xprt *xprt;
860 rc = rpcrdma_ep_disconnect(ep, ia);
861 if (rc && rc != -ENOTCONN)
862 dprintk("RPC: %s: rpcrdma_ep_disconnect"
863 " status %i\n", __func__, rc);
864 rpcrdma_clean_cq(ep->rep_cq);
866 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
867 id = rpcrdma_create_id(xprt, ia,
868 (struct sockaddr *)&xprt->rx_data.addr);
873 /* TEMP TEMP TEMP - fail if new device:
874 * Deregister/remarshal *all* requests!
875 * Close and recreate adapter, pd, etc!
876 * Re-determine all attributes still sane!
877 * More stuff I haven't thought of!
880 if (ia->ri_id->device != id->device) {
881 printk("RPC: %s: can't reconnect on "
882 "different device!\n", __func__);
888 rdma_destroy_qp(ia->ri_id);
889 rdma_destroy_id(ia->ri_id);
893 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
895 dprintk("RPC: %s: rdma_create_qp failed %i\n",
900 /* XXX Tavor device performs badly with 2K MTU! */
901 if (strnicmp(ia->ri_id->device->dma_device->bus->name, "pci", 3) == 0) {
902 struct pci_dev *pcid = to_pci_dev(ia->ri_id->device->dma_device);
903 if (pcid->device == PCI_DEVICE_ID_MELLANOX_TAVOR &&
904 (pcid->vendor == PCI_VENDOR_ID_MELLANOX ||
905 pcid->vendor == PCI_VENDOR_ID_TOPSPIN)) {
906 struct ib_qp_attr attr = {
907 .path_mtu = IB_MTU_1024
909 rc = ib_modify_qp(ia->ri_id->qp, &attr, IB_QP_PATH_MTU);
913 ep->rep_connected = 0;
915 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
917 dprintk("RPC: %s: rdma_connect() failed with %i\n",
922 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
925 * Check state. A non-peer reject indicates no listener
926 * (ECONNREFUSED), which may be a transient state. All
927 * others indicate a transport condition which has already
928 * undergone a best-effort.
930 if (ep->rep_connected == -ECONNREFUSED &&
931 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
932 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
935 if (ep->rep_connected <= 0) {
936 /* Sometimes, the only way to reliably connect to remote
937 * CMs is to use same nonzero values for ORD and IRD. */
938 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
939 (ep->rep_remote_cma.responder_resources == 0 ||
940 ep->rep_remote_cma.initiator_depth !=
941 ep->rep_remote_cma.responder_resources)) {
942 if (ep->rep_remote_cma.responder_resources == 0)
943 ep->rep_remote_cma.responder_resources = 1;
944 ep->rep_remote_cma.initiator_depth =
945 ep->rep_remote_cma.responder_resources;
948 rc = ep->rep_connected;
950 dprintk("RPC: %s: connected\n", __func__);
955 ep->rep_connected = rc;
960 * rpcrdma_ep_disconnect
962 * This is separate from destroy to facilitate the ability
963 * to reconnect without recreating the endpoint.
965 * This call is not reentrant, and must not be made in parallel
966 * on the same endpoint.
969 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
973 rpcrdma_clean_cq(ep->rep_cq);
974 rc = rdma_disconnect(ia->ri_id);
976 /* returns without wait if not connected */
977 wait_event_interruptible(ep->rep_connect_wait,
978 ep->rep_connected != 1);
979 dprintk("RPC: %s: after wait, %sconnected\n", __func__,
980 (ep->rep_connected == 1) ? "still " : "dis");
982 dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
983 ep->rep_connected = rc;
989 * Initialize buffer memory
992 rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
993 struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata)
998 struct rpcrdma_mw *r;
1000 buf->rb_max_requests = cdata->max_requests;
1001 spin_lock_init(&buf->rb_lock);
1002 atomic_set(&buf->rb_credits, 1);
1004 /* Need to allocate:
1005 * 1. arrays for send and recv pointers
1006 * 2. arrays of struct rpcrdma_req to fill in pointers
1007 * 3. array of struct rpcrdma_rep for replies
1008 * 4. padding, if any
1009 * 5. mw's, fmr's or frmr's, if any
1010 * Send/recv buffers in req/rep need to be registered
1013 len = buf->rb_max_requests *
1014 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
1015 len += cdata->padding;
1016 switch (ia->ri_memreg_strategy) {
1018 len += buf->rb_max_requests * RPCRDMA_MAX_SEGS *
1019 sizeof(struct rpcrdma_mw);
1021 case RPCRDMA_MTHCAFMR:
1022 /* TBD we are perhaps overallocating here */
1023 len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS *
1024 sizeof(struct rpcrdma_mw);
1026 case RPCRDMA_MEMWINDOWS_ASYNC:
1027 case RPCRDMA_MEMWINDOWS:
1028 len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS *
1029 sizeof(struct rpcrdma_mw);
1035 /* allocate 1, 4 and 5 in one shot */
1036 p = kzalloc(len, GFP_KERNEL);
1038 dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
1043 buf->rb_pool = p; /* for freeing it later */
1045 buf->rb_send_bufs = (struct rpcrdma_req **) p;
1046 p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
1047 buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
1048 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
1051 * Register the zeroed pad buffer, if any.
1053 if (cdata->padding) {
1054 rc = rpcrdma_register_internal(ia, p, cdata->padding,
1055 &ep->rep_pad_mr, &ep->rep_pad);
1059 p += cdata->padding;
1062 * Allocate the fmr's, or mw's for mw_bind chunk registration.
1063 * We "cycle" the mw's in order to minimize rkey reuse,
1064 * and also reduce unbind-to-bind collision.
1066 INIT_LIST_HEAD(&buf->rb_mws);
1067 r = (struct rpcrdma_mw *)p;
1068 switch (ia->ri_memreg_strategy) {
1070 for (i = buf->rb_max_requests * RPCRDMA_MAX_SEGS; i; i--) {
1071 r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
1072 ia->ri_max_frmr_depth);
1073 if (IS_ERR(r->r.frmr.fr_mr)) {
1074 rc = PTR_ERR(r->r.frmr.fr_mr);
1075 dprintk("RPC: %s: ib_alloc_fast_reg_mr"
1076 " failed %i\n", __func__, rc);
1079 r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list(
1081 ia->ri_max_frmr_depth);
1082 if (IS_ERR(r->r.frmr.fr_pgl)) {
1083 rc = PTR_ERR(r->r.frmr.fr_pgl);
1085 "ib_alloc_fast_reg_page_list "
1086 "failed %i\n", __func__, rc);
1088 ib_dereg_mr(r->r.frmr.fr_mr);
1091 list_add(&r->mw_list, &buf->rb_mws);
1095 case RPCRDMA_MTHCAFMR:
1096 /* TBD we are perhaps overallocating here */
1097 for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) {
1098 static struct ib_fmr_attr fa =
1099 { RPCRDMA_MAX_DATA_SEGS, 1, PAGE_SHIFT };
1100 r->r.fmr = ib_alloc_fmr(ia->ri_pd,
1101 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ,
1103 if (IS_ERR(r->r.fmr)) {
1104 rc = PTR_ERR(r->r.fmr);
1105 dprintk("RPC: %s: ib_alloc_fmr"
1106 " failed %i\n", __func__, rc);
1109 list_add(&r->mw_list, &buf->rb_mws);
1113 case RPCRDMA_MEMWINDOWS_ASYNC:
1114 case RPCRDMA_MEMWINDOWS:
1115 /* Allocate one extra request's worth, for full cycling */
1116 for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) {
1117 r->r.mw = ib_alloc_mw(ia->ri_pd, IB_MW_TYPE_1);
1118 if (IS_ERR(r->r.mw)) {
1119 rc = PTR_ERR(r->r.mw);
1120 dprintk("RPC: %s: ib_alloc_mw"
1121 " failed %i\n", __func__, rc);
1124 list_add(&r->mw_list, &buf->rb_mws);
1133 * Allocate/init the request/reply buffers. Doing this
1134 * using kmalloc for now -- one for each buf.
1136 for (i = 0; i < buf->rb_max_requests; i++) {
1137 struct rpcrdma_req *req;
1138 struct rpcrdma_rep *rep;
1140 len = cdata->inline_wsize + sizeof(struct rpcrdma_req);
1141 /* RPC layer requests *double* size + 1K RPC_SLACK_SPACE! */
1142 /* Typical ~2400b, so rounding up saves work later */
1145 req = kmalloc(len, GFP_KERNEL);
1147 dprintk("RPC: %s: request buffer %d alloc"
1148 " failed\n", __func__, i);
1152 memset(req, 0, sizeof(struct rpcrdma_req));
1153 buf->rb_send_bufs[i] = req;
1154 buf->rb_send_bufs[i]->rl_buffer = buf;
1156 rc = rpcrdma_register_internal(ia, req->rl_base,
1157 len - offsetof(struct rpcrdma_req, rl_base),
1158 &buf->rb_send_bufs[i]->rl_handle,
1159 &buf->rb_send_bufs[i]->rl_iov);
1163 buf->rb_send_bufs[i]->rl_size = len-sizeof(struct rpcrdma_req);
1165 len = cdata->inline_rsize + sizeof(struct rpcrdma_rep);
1166 rep = kmalloc(len, GFP_KERNEL);
1168 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1173 memset(rep, 0, sizeof(struct rpcrdma_rep));
1174 buf->rb_recv_bufs[i] = rep;
1175 buf->rb_recv_bufs[i]->rr_buffer = buf;
1176 init_waitqueue_head(&rep->rr_unbind);
1178 rc = rpcrdma_register_internal(ia, rep->rr_base,
1179 len - offsetof(struct rpcrdma_rep, rr_base),
1180 &buf->rb_recv_bufs[i]->rr_handle,
1181 &buf->rb_recv_bufs[i]->rr_iov);
1186 dprintk("RPC: %s: max_requests %d\n",
1187 __func__, buf->rb_max_requests);
1191 rpcrdma_buffer_destroy(buf);
1196 * Unregister and destroy buffer memory. Need to deal with
1197 * partial initialization, so it's callable from failed create.
1198 * Must be called before destroying endpoint, as registrations
1202 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1205 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1206 struct rpcrdma_mw *r;
1208 /* clean up in reverse order from create
1209 * 1. recv mr memory (mr free, then kfree)
1210 * 1a. bind mw memory
1211 * 2. send mr memory (mr free, then kfree)
1212 * 3. padding (if any) [moved to rpcrdma_ep_destroy]
1215 dprintk("RPC: %s: entering\n", __func__);
1217 for (i = 0; i < buf->rb_max_requests; i++) {
1218 if (buf->rb_recv_bufs && buf->rb_recv_bufs[i]) {
1219 rpcrdma_deregister_internal(ia,
1220 buf->rb_recv_bufs[i]->rr_handle,
1221 &buf->rb_recv_bufs[i]->rr_iov);
1222 kfree(buf->rb_recv_bufs[i]);
1224 if (buf->rb_send_bufs && buf->rb_send_bufs[i]) {
1225 rpcrdma_deregister_internal(ia,
1226 buf->rb_send_bufs[i]->rl_handle,
1227 &buf->rb_send_bufs[i]->rl_iov);
1228 kfree(buf->rb_send_bufs[i]);
1232 while (!list_empty(&buf->rb_mws)) {
1233 r = list_entry(buf->rb_mws.next,
1234 struct rpcrdma_mw, mw_list);
1235 list_del(&r->mw_list);
1236 switch (ia->ri_memreg_strategy) {
1238 rc = ib_dereg_mr(r->r.frmr.fr_mr);
1244 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
1246 case RPCRDMA_MTHCAFMR:
1247 rc = ib_dealloc_fmr(r->r.fmr);
1254 case RPCRDMA_MEMWINDOWS_ASYNC:
1255 case RPCRDMA_MEMWINDOWS:
1256 rc = ib_dealloc_mw(r->r.mw);
1268 kfree(buf->rb_pool);
1272 * Get a set of request/reply buffers.
1274 * Reply buffer (if needed) is attached to send buffer upon return.
1276 * rb_send_index and rb_recv_index MUST always be pointing to the
1277 * *next* available buffer (non-NULL). They are incremented after
1278 * removing buffers, and decremented *before* returning them.
1280 struct rpcrdma_req *
1281 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1283 struct rpcrdma_req *req;
1284 unsigned long flags;
1286 struct rpcrdma_mw *r;
1288 spin_lock_irqsave(&buffers->rb_lock, flags);
1289 if (buffers->rb_send_index == buffers->rb_max_requests) {
1290 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1291 dprintk("RPC: %s: out of request buffers\n", __func__);
1292 return ((struct rpcrdma_req *)NULL);
1295 req = buffers->rb_send_bufs[buffers->rb_send_index];
1296 if (buffers->rb_send_index < buffers->rb_recv_index) {
1297 dprintk("RPC: %s: %d extra receives outstanding (ok)\n",
1299 buffers->rb_recv_index - buffers->rb_send_index);
1300 req->rl_reply = NULL;
1302 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1303 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1305 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
1306 if (!list_empty(&buffers->rb_mws)) {
1307 i = RPCRDMA_MAX_SEGS - 1;
1309 r = list_entry(buffers->rb_mws.next,
1310 struct rpcrdma_mw, mw_list);
1311 list_del(&r->mw_list);
1312 req->rl_segments[i].mr_chunk.rl_mw = r;
1315 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1320 * Put request/reply buffers back into pool.
1321 * Pre-decrement counter/array index.
1324 rpcrdma_buffer_put(struct rpcrdma_req *req)
1326 struct rpcrdma_buffer *buffers = req->rl_buffer;
1327 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
1329 unsigned long flags;
1331 BUG_ON(req->rl_nchunks != 0);
1332 spin_lock_irqsave(&buffers->rb_lock, flags);
1333 buffers->rb_send_bufs[--buffers->rb_send_index] = req;
1335 if (req->rl_reply) {
1336 buffers->rb_recv_bufs[--buffers->rb_recv_index] = req->rl_reply;
1337 init_waitqueue_head(&req->rl_reply->rr_unbind);
1338 req->rl_reply->rr_func = NULL;
1339 req->rl_reply = NULL;
1341 switch (ia->ri_memreg_strategy) {
1343 case RPCRDMA_MTHCAFMR:
1344 case RPCRDMA_MEMWINDOWS_ASYNC:
1345 case RPCRDMA_MEMWINDOWS:
1347 * Cycle mw's back in reverse order, and "spin" them.
1348 * This delays and scrambles reuse as much as possible.
1352 struct rpcrdma_mw **mw;
1353 mw = &req->rl_segments[i].mr_chunk.rl_mw;
1354 list_add_tail(&(*mw)->mw_list, &buffers->rb_mws);
1356 } while (++i < RPCRDMA_MAX_SEGS);
1357 list_add_tail(&req->rl_segments[0].mr_chunk.rl_mw->mw_list,
1359 req->rl_segments[0].mr_chunk.rl_mw = NULL;
1364 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1368 * Recover reply buffers from pool.
1369 * This happens when recovering from error conditions.
1370 * Post-increment counter/array index.
1373 rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1375 struct rpcrdma_buffer *buffers = req->rl_buffer;
1376 unsigned long flags;
1378 if (req->rl_iov.length == 0) /* special case xprt_rdma_allocate() */
1379 buffers = ((struct rpcrdma_req *) buffers)->rl_buffer;
1380 spin_lock_irqsave(&buffers->rb_lock, flags);
1381 if (buffers->rb_recv_index < buffers->rb_max_requests) {
1382 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1383 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1385 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1389 * Put reply buffers back into pool when not attached to
1390 * request. This happens in error conditions, and when
1391 * aborting unbinds. Pre-decrement counter/array index.
1394 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1396 struct rpcrdma_buffer *buffers = rep->rr_buffer;
1397 unsigned long flags;
1399 rep->rr_func = NULL;
1400 spin_lock_irqsave(&buffers->rb_lock, flags);
1401 buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
1402 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1406 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1410 rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
1411 struct ib_mr **mrp, struct ib_sge *iov)
1413 struct ib_phys_buf ipb;
1418 * All memory passed here was kmalloc'ed, therefore phys-contiguous.
1420 iov->addr = ib_dma_map_single(ia->ri_id->device,
1421 va, len, DMA_BIDIRECTIONAL);
1424 if (ia->ri_have_dma_lkey) {
1426 iov->lkey = ia->ri_dma_lkey;
1428 } else if (ia->ri_bind_mem != NULL) {
1430 iov->lkey = ia->ri_bind_mem->lkey;
1434 ipb.addr = iov->addr;
1435 ipb.size = iov->length;
1436 mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1,
1437 IB_ACCESS_LOCAL_WRITE, &iov->addr);
1439 dprintk("RPC: %s: phys convert: 0x%llx "
1440 "registered 0x%llx length %d\n",
1441 __func__, (unsigned long long)ipb.addr,
1442 (unsigned long long)iov->addr, len);
1447 dprintk("RPC: %s: failed with %i\n", __func__, rc);
1450 iov->lkey = mr->lkey;
1458 rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
1459 struct ib_mr *mr, struct ib_sge *iov)
1463 ib_dma_unmap_single(ia->ri_id->device,
1464 iov->addr, iov->length, DMA_BIDIRECTIONAL);
1469 rc = ib_dereg_mr(mr);
1471 dprintk("RPC: %s: ib_dereg_mr failed %i\n", __func__, rc);
1476 * Wrappers for chunk registration, shared by read/write chunk code.
1480 rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing)
1482 seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1483 seg->mr_dmalen = seg->mr_len;
1485 seg->mr_dma = ib_dma_map_page(ia->ri_id->device,
1486 seg->mr_page, offset_in_page(seg->mr_offset),
1487 seg->mr_dmalen, seg->mr_dir);
1489 seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
1491 seg->mr_dmalen, seg->mr_dir);
1492 if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
1493 dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
1495 (unsigned long long)seg->mr_dma,
1496 seg->mr_offset, seg->mr_dmalen);
1501 rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg)
1504 ib_dma_unmap_page(ia->ri_id->device,
1505 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1507 ib_dma_unmap_single(ia->ri_id->device,
1508 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1512 rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1513 int *nsegs, int writing, struct rpcrdma_ia *ia,
1514 struct rpcrdma_xprt *r_xprt)
1516 struct rpcrdma_mr_seg *seg1 = seg;
1517 struct ib_send_wr invalidate_wr, frmr_wr, *bad_wr, *post_wr;
1526 pageoff = offset_in_page(seg1->mr_offset);
1527 seg1->mr_offset -= pageoff; /* start of page */
1528 seg1->mr_len += pageoff;
1530 if (*nsegs > ia->ri_max_frmr_depth)
1531 *nsegs = ia->ri_max_frmr_depth;
1532 for (page_no = i = 0; i < *nsegs;) {
1533 rpcrdma_map_one(ia, seg, writing);
1535 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
1536 seg1->mr_chunk.rl_mw->r.frmr.fr_pgl->
1537 page_list[page_no++] = pa;
1543 /* Check for holes */
1544 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
1545 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
1548 dprintk("RPC: %s: Using frmr %p to map %d segments\n",
1549 __func__, seg1->mr_chunk.rl_mw, i);
1551 if (unlikely(seg1->mr_chunk.rl_mw->r.frmr.state == FRMR_IS_VALID)) {
1552 dprintk("RPC: %s: frmr %x left valid, posting invalidate.\n",
1554 seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey);
1555 /* Invalidate before using. */
1556 memset(&invalidate_wr, 0, sizeof invalidate_wr);
1557 invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
1558 invalidate_wr.next = &frmr_wr;
1559 invalidate_wr.opcode = IB_WR_LOCAL_INV;
1560 invalidate_wr.send_flags = IB_SEND_SIGNALED;
1561 invalidate_wr.ex.invalidate_rkey =
1562 seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
1563 DECR_CQCOUNT(&r_xprt->rx_ep);
1564 post_wr = &invalidate_wr;
1569 key = (u8)(seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey & 0x000000FF);
1570 ib_update_fast_reg_key(seg1->mr_chunk.rl_mw->r.frmr.fr_mr, ++key);
1572 /* Prepare FRMR WR */
1573 memset(&frmr_wr, 0, sizeof frmr_wr);
1574 frmr_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
1575 frmr_wr.opcode = IB_WR_FAST_REG_MR;
1576 frmr_wr.send_flags = IB_SEND_SIGNALED;
1577 frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
1578 frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl;
1579 frmr_wr.wr.fast_reg.page_list_len = page_no;
1580 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1581 frmr_wr.wr.fast_reg.length = page_no << PAGE_SHIFT;
1582 BUG_ON(frmr_wr.wr.fast_reg.length < len);
1583 frmr_wr.wr.fast_reg.access_flags = (writing ?
1584 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
1585 IB_ACCESS_REMOTE_READ);
1586 frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
1587 DECR_CQCOUNT(&r_xprt->rx_ep);
1589 rc = ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
1592 dprintk("RPC: %s: failed ib_post_send for register,"
1593 " status %i\n", __func__, rc);
1595 rpcrdma_unmap_one(ia, --seg);
1597 seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
1598 seg1->mr_base = seg1->mr_dma + pageoff;
1607 rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
1608 struct rpcrdma_ia *ia, struct rpcrdma_xprt *r_xprt)
1610 struct rpcrdma_mr_seg *seg1 = seg;
1611 struct ib_send_wr invalidate_wr, *bad_wr;
1614 while (seg1->mr_nsegs--)
1615 rpcrdma_unmap_one(ia, seg++);
1617 memset(&invalidate_wr, 0, sizeof invalidate_wr);
1618 invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
1619 invalidate_wr.opcode = IB_WR_LOCAL_INV;
1620 invalidate_wr.send_flags = IB_SEND_SIGNALED;
1621 invalidate_wr.ex.invalidate_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
1622 DECR_CQCOUNT(&r_xprt->rx_ep);
1624 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
1626 dprintk("RPC: %s: failed ib_post_send for invalidate,"
1627 " status %i\n", __func__, rc);
1632 rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg,
1633 int *nsegs, int writing, struct rpcrdma_ia *ia)
1635 struct rpcrdma_mr_seg *seg1 = seg;
1636 u64 physaddrs[RPCRDMA_MAX_DATA_SEGS];
1637 int len, pageoff, i, rc;
1639 pageoff = offset_in_page(seg1->mr_offset);
1640 seg1->mr_offset -= pageoff; /* start of page */
1641 seg1->mr_len += pageoff;
1643 if (*nsegs > RPCRDMA_MAX_DATA_SEGS)
1644 *nsegs = RPCRDMA_MAX_DATA_SEGS;
1645 for (i = 0; i < *nsegs;) {
1646 rpcrdma_map_one(ia, seg, writing);
1647 physaddrs[i] = seg->mr_dma;
1651 /* Check for holes */
1652 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
1653 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
1656 rc = ib_map_phys_fmr(seg1->mr_chunk.rl_mw->r.fmr,
1657 physaddrs, i, seg1->mr_dma);
1659 dprintk("RPC: %s: failed ib_map_phys_fmr "
1660 "%u@0x%llx+%i (%d)... status %i\n", __func__,
1661 len, (unsigned long long)seg1->mr_dma,
1664 rpcrdma_unmap_one(ia, --seg);
1666 seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.fmr->rkey;
1667 seg1->mr_base = seg1->mr_dma + pageoff;
1676 rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg,
1677 struct rpcrdma_ia *ia)
1679 struct rpcrdma_mr_seg *seg1 = seg;
1683 list_add(&seg1->mr_chunk.rl_mw->r.fmr->list, &l);
1684 rc = ib_unmap_fmr(&l);
1685 while (seg1->mr_nsegs--)
1686 rpcrdma_unmap_one(ia, seg++);
1688 dprintk("RPC: %s: failed ib_unmap_fmr,"
1689 " status %i\n", __func__, rc);
1694 rpcrdma_register_memwin_external(struct rpcrdma_mr_seg *seg,
1695 int *nsegs, int writing, struct rpcrdma_ia *ia,
1696 struct rpcrdma_xprt *r_xprt)
1698 int mem_priv = (writing ? IB_ACCESS_REMOTE_WRITE :
1699 IB_ACCESS_REMOTE_READ);
1700 struct ib_mw_bind param;
1704 rpcrdma_map_one(ia, seg, writing);
1705 param.bind_info.mr = ia->ri_bind_mem;
1706 param.wr_id = 0ULL; /* no send cookie */
1707 param.bind_info.addr = seg->mr_dma;
1708 param.bind_info.length = seg->mr_len;
1709 param.send_flags = 0;
1710 param.bind_info.mw_access_flags = mem_priv;
1712 DECR_CQCOUNT(&r_xprt->rx_ep);
1713 rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m);
1715 dprintk("RPC: %s: failed ib_bind_mw "
1716 "%u@0x%llx status %i\n",
1717 __func__, seg->mr_len,
1718 (unsigned long long)seg->mr_dma, rc);
1719 rpcrdma_unmap_one(ia, seg);
1721 seg->mr_rkey = seg->mr_chunk.rl_mw->r.mw->rkey;
1722 seg->mr_base = param.bind_info.addr;
1729 rpcrdma_deregister_memwin_external(struct rpcrdma_mr_seg *seg,
1730 struct rpcrdma_ia *ia,
1731 struct rpcrdma_xprt *r_xprt, void **r)
1733 struct ib_mw_bind param;
1737 BUG_ON(seg->mr_nsegs != 1);
1738 param.bind_info.mr = ia->ri_bind_mem;
1739 param.bind_info.addr = 0ULL; /* unbind */
1740 param.bind_info.length = 0;
1741 param.bind_info.mw_access_flags = 0;
1743 param.wr_id = (u64) (unsigned long) *r;
1744 param.send_flags = IB_SEND_SIGNALED;
1745 INIT_CQCOUNT(&r_xprt->rx_ep);
1748 param.send_flags = 0;
1749 DECR_CQCOUNT(&r_xprt->rx_ep);
1751 rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m);
1752 rpcrdma_unmap_one(ia, seg);
1754 dprintk("RPC: %s: failed ib_(un)bind_mw,"
1755 " status %i\n", __func__, rc);
1757 *r = NULL; /* will upcall on completion */
1762 rpcrdma_register_default_external(struct rpcrdma_mr_seg *seg,
1763 int *nsegs, int writing, struct rpcrdma_ia *ia)
1765 int mem_priv = (writing ? IB_ACCESS_REMOTE_WRITE :
1766 IB_ACCESS_REMOTE_READ);
1767 struct rpcrdma_mr_seg *seg1 = seg;
1768 struct ib_phys_buf ipb[RPCRDMA_MAX_DATA_SEGS];
1771 if (*nsegs > RPCRDMA_MAX_DATA_SEGS)
1772 *nsegs = RPCRDMA_MAX_DATA_SEGS;
1773 for (len = 0, i = 0; i < *nsegs;) {
1774 rpcrdma_map_one(ia, seg, writing);
1775 ipb[i].addr = seg->mr_dma;
1776 ipb[i].size = seg->mr_len;
1780 /* Check for holes */
1781 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
1782 offset_in_page((seg-1)->mr_offset+(seg-1)->mr_len))
1785 seg1->mr_base = seg1->mr_dma;
1786 seg1->mr_chunk.rl_mr = ib_reg_phys_mr(ia->ri_pd,
1787 ipb, i, mem_priv, &seg1->mr_base);
1788 if (IS_ERR(seg1->mr_chunk.rl_mr)) {
1789 rc = PTR_ERR(seg1->mr_chunk.rl_mr);
1790 dprintk("RPC: %s: failed ib_reg_phys_mr "
1791 "%u@0x%llx (%d)... status %i\n",
1793 (unsigned long long)seg1->mr_dma, i, rc);
1795 rpcrdma_unmap_one(ia, --seg);
1797 seg1->mr_rkey = seg1->mr_chunk.rl_mr->rkey;
1806 rpcrdma_deregister_default_external(struct rpcrdma_mr_seg *seg,
1807 struct rpcrdma_ia *ia)
1809 struct rpcrdma_mr_seg *seg1 = seg;
1812 rc = ib_dereg_mr(seg1->mr_chunk.rl_mr);
1813 seg1->mr_chunk.rl_mr = NULL;
1814 while (seg1->mr_nsegs--)
1815 rpcrdma_unmap_one(ia, seg++);
1817 dprintk("RPC: %s: failed ib_dereg_mr,"
1818 " status %i\n", __func__, rc);
1823 rpcrdma_register_external(struct rpcrdma_mr_seg *seg,
1824 int nsegs, int writing, struct rpcrdma_xprt *r_xprt)
1826 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1829 switch (ia->ri_memreg_strategy) {
1831 #if RPCRDMA_PERSISTENT_REGISTRATION
1832 case RPCRDMA_ALLPHYSICAL:
1833 rpcrdma_map_one(ia, seg, writing);
1834 seg->mr_rkey = ia->ri_bind_mem->rkey;
1835 seg->mr_base = seg->mr_dma;
1841 /* Registration using frmr registration */
1843 rc = rpcrdma_register_frmr_external(seg, &nsegs, writing, ia, r_xprt);
1846 /* Registration using fmr memory registration */
1847 case RPCRDMA_MTHCAFMR:
1848 rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia);
1851 /* Registration using memory windows */
1852 case RPCRDMA_MEMWINDOWS_ASYNC:
1853 case RPCRDMA_MEMWINDOWS:
1854 rc = rpcrdma_register_memwin_external(seg, &nsegs, writing, ia, r_xprt);
1857 /* Default registration each time */
1859 rc = rpcrdma_register_default_external(seg, &nsegs, writing, ia);
1869 rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg,
1870 struct rpcrdma_xprt *r_xprt, void *r)
1872 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1873 int nsegs = seg->mr_nsegs, rc;
1875 switch (ia->ri_memreg_strategy) {
1877 #if RPCRDMA_PERSISTENT_REGISTRATION
1878 case RPCRDMA_ALLPHYSICAL:
1880 rpcrdma_unmap_one(ia, seg);
1886 rc = rpcrdma_deregister_frmr_external(seg, ia, r_xprt);
1889 case RPCRDMA_MTHCAFMR:
1890 rc = rpcrdma_deregister_fmr_external(seg, ia);
1893 case RPCRDMA_MEMWINDOWS_ASYNC:
1894 case RPCRDMA_MEMWINDOWS:
1895 rc = rpcrdma_deregister_memwin_external(seg, ia, r_xprt, &r);
1899 rc = rpcrdma_deregister_default_external(seg, ia);
1903 struct rpcrdma_rep *rep = r;
1904 void (*func)(struct rpcrdma_rep *) = rep->rr_func;
1905 rep->rr_func = NULL;
1906 func(rep); /* dereg done, callback now */
1912 * Prepost any receive buffer, then post send.
1914 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1917 rpcrdma_ep_post(struct rpcrdma_ia *ia,
1918 struct rpcrdma_ep *ep,
1919 struct rpcrdma_req *req)
1921 struct ib_send_wr send_wr, *send_wr_fail;
1922 struct rpcrdma_rep *rep = req->rl_reply;
1926 rc = rpcrdma_ep_post_recv(ia, ep, rep);
1929 req->rl_reply = NULL;
1932 send_wr.next = NULL;
1933 send_wr.wr_id = 0ULL; /* no send cookie */
1934 send_wr.sg_list = req->rl_send_iov;
1935 send_wr.num_sge = req->rl_niovs;
1936 send_wr.opcode = IB_WR_SEND;
1937 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
1938 ib_dma_sync_single_for_device(ia->ri_id->device,
1939 req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
1941 ib_dma_sync_single_for_device(ia->ri_id->device,
1942 req->rl_send_iov[1].addr, req->rl_send_iov[1].length,
1944 ib_dma_sync_single_for_device(ia->ri_id->device,
1945 req->rl_send_iov[0].addr, req->rl_send_iov[0].length,
1948 if (DECR_CQCOUNT(ep) > 0)
1949 send_wr.send_flags = 0;
1950 else { /* Provider must take a send completion every now and then */
1952 send_wr.send_flags = IB_SEND_SIGNALED;
1955 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
1957 dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
1964 * (Re)post a receive buffer.
1967 rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1968 struct rpcrdma_ep *ep,
1969 struct rpcrdma_rep *rep)
1971 struct ib_recv_wr recv_wr, *recv_wr_fail;
1974 recv_wr.next = NULL;
1975 recv_wr.wr_id = (u64) (unsigned long) rep;
1976 recv_wr.sg_list = &rep->rr_iov;
1977 recv_wr.num_sge = 1;
1979 ib_dma_sync_single_for_cpu(ia->ri_id->device,
1980 rep->rr_iov.addr, rep->rr_iov.length, DMA_BIDIRECTIONAL);
1983 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
1986 dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,