2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #include <linux/sunrpc/svc_xprt.h>
44 #include <linux/sunrpc/debug.h>
45 #include <linux/sunrpc/rpc_rdma.h>
46 #include <linux/interrupt.h>
47 #include <linux/sched.h>
48 #include <linux/slab.h>
49 #include <linux/spinlock.h>
50 #include <linux/workqueue.h>
51 #include <rdma/ib_verbs.h>
52 #include <rdma/rdma_cm.h>
53 #include <linux/sunrpc/svc_rdma.h>
54 #include <linux/export.h>
55 #include "xprt_rdma.h"
57 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
59 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
61 struct sockaddr *sa, int salen,
63 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
64 static void svc_rdma_release_rqst(struct svc_rqst *);
65 static void dto_tasklet_func(unsigned long data);
66 static void svc_rdma_detach(struct svc_xprt *xprt);
67 static void svc_rdma_free(struct svc_xprt *xprt);
68 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
69 static int svc_rdma_secure_port(struct svc_rqst *);
70 static void rq_cq_reap(struct svcxprt_rdma *xprt);
71 static void sq_cq_reap(struct svcxprt_rdma *xprt);
73 static DECLARE_TASKLET(dto_tasklet, dto_tasklet_func, 0UL);
74 static DEFINE_SPINLOCK(dto_lock);
75 static LIST_HEAD(dto_xprt_q);
77 static struct svc_xprt_ops svc_rdma_ops = {
78 .xpo_create = svc_rdma_create,
79 .xpo_recvfrom = svc_rdma_recvfrom,
80 .xpo_sendto = svc_rdma_sendto,
81 .xpo_release_rqst = svc_rdma_release_rqst,
82 .xpo_detach = svc_rdma_detach,
83 .xpo_free = svc_rdma_free,
84 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
85 .xpo_has_wspace = svc_rdma_has_wspace,
86 .xpo_accept = svc_rdma_accept,
87 .xpo_secure_port = svc_rdma_secure_port,
90 struct svc_xprt_class svc_rdma_class = {
92 .xcl_owner = THIS_MODULE,
93 .xcl_ops = &svc_rdma_ops,
94 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
95 .xcl_ident = XPRT_TRANSPORT_RDMA,
98 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
100 struct svc_rdma_op_ctxt *ctxt;
103 ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL);
106 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
109 INIT_LIST_HEAD(&ctxt->dto_q);
112 atomic_inc(&xprt->sc_ctxt_used);
116 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
118 struct svcxprt_rdma *xprt = ctxt->xprt;
120 for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
122 * Unmap the DMA addr in the SGE if the lkey matches
123 * the sc_dma_lkey, otherwise, ignore it since it is
124 * an FRMR lkey and will be unmapped later when the
125 * last WR that uses it completes.
127 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
128 atomic_dec(&xprt->sc_dma_used);
129 ib_dma_unmap_page(xprt->sc_cm_id->device,
137 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
139 struct svcxprt_rdma *xprt;
144 for (i = 0; i < ctxt->count; i++)
145 put_page(ctxt->pages[i]);
147 kmem_cache_free(svc_rdma_ctxt_cachep, ctxt);
148 atomic_dec(&xprt->sc_ctxt_used);
152 * Temporary NFS req mappings are shared across all transport
153 * instances. These are short lived and should be bounded by the number
154 * of concurrent server threads * depth of the SQ.
156 struct svc_rdma_req_map *svc_rdma_get_req_map(void)
158 struct svc_rdma_req_map *map;
160 map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL);
163 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
169 void svc_rdma_put_req_map(struct svc_rdma_req_map *map)
171 kmem_cache_free(svc_rdma_map_cachep, map);
174 /* ib_cq event handler */
175 static void cq_event_handler(struct ib_event *event, void *context)
177 struct svc_xprt *xprt = context;
178 dprintk("svcrdma: received CQ event %s (%d), context=%p\n",
179 ib_event_msg(event->event), event->event, context);
180 set_bit(XPT_CLOSE, &xprt->xpt_flags);
183 /* QP event handler */
184 static void qp_event_handler(struct ib_event *event, void *context)
186 struct svc_xprt *xprt = context;
188 switch (event->event) {
189 /* These are considered benign events */
190 case IB_EVENT_PATH_MIG:
191 case IB_EVENT_COMM_EST:
192 case IB_EVENT_SQ_DRAINED:
193 case IB_EVENT_QP_LAST_WQE_REACHED:
194 dprintk("svcrdma: QP event %s (%d) received for QP=%p\n",
195 ib_event_msg(event->event), event->event,
198 /* These are considered fatal events */
199 case IB_EVENT_PATH_MIG_ERR:
200 case IB_EVENT_QP_FATAL:
201 case IB_EVENT_QP_REQ_ERR:
202 case IB_EVENT_QP_ACCESS_ERR:
203 case IB_EVENT_DEVICE_FATAL:
205 dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, "
206 "closing transport\n",
207 ib_event_msg(event->event), event->event,
209 set_bit(XPT_CLOSE, &xprt->xpt_flags);
215 * Data Transfer Operation Tasklet
217 * Walks a list of transports with I/O pending, removing entries as
218 * they are added to the server's I/O pending list. Two bits indicate
219 * if SQ, RQ, or both have I/O pending. The dto_lock is an irqsave
220 * spinlock that serializes access to the transport list with the RQ
221 * and SQ interrupt handlers.
223 static void dto_tasklet_func(unsigned long data)
225 struct svcxprt_rdma *xprt;
228 spin_lock_irqsave(&dto_lock, flags);
229 while (!list_empty(&dto_xprt_q)) {
230 xprt = list_entry(dto_xprt_q.next,
231 struct svcxprt_rdma, sc_dto_q);
232 list_del_init(&xprt->sc_dto_q);
233 spin_unlock_irqrestore(&dto_lock, flags);
238 svc_xprt_put(&xprt->sc_xprt);
239 spin_lock_irqsave(&dto_lock, flags);
241 spin_unlock_irqrestore(&dto_lock, flags);
245 * Receive Queue Completion Handler
247 * Since an RQ completion handler is called on interrupt context, we
248 * need to defer the handling of the I/O to a tasklet
250 static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
252 struct svcxprt_rdma *xprt = cq_context;
255 /* Guard against unconditional flush call for destroyed QP */
256 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
260 * Set the bit regardless of whether or not it's on the list
261 * because it may be on the list already due to an SQ
264 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
267 * If this transport is not already on the DTO transport queue,
270 spin_lock_irqsave(&dto_lock, flags);
271 if (list_empty(&xprt->sc_dto_q)) {
272 svc_xprt_get(&xprt->sc_xprt);
273 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
275 spin_unlock_irqrestore(&dto_lock, flags);
277 /* Tasklet does all the work to avoid irqsave locks. */
278 tasklet_schedule(&dto_tasklet);
282 * rq_cq_reap - Process the RQ CQ.
284 * Take all completing WC off the CQE and enqueue the associated DTO
285 * context on the dto_q for the transport.
287 * Note that caller must hold a transport reference.
289 static void rq_cq_reap(struct svcxprt_rdma *xprt)
293 struct svc_rdma_op_ctxt *ctxt = NULL;
295 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
298 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
299 atomic_inc(&rdma_stat_rq_poll);
301 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
302 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
303 ctxt->wc_status = wc.status;
304 ctxt->byte_len = wc.byte_len;
305 svc_rdma_unmap_dma(ctxt);
306 if (wc.status != IB_WC_SUCCESS) {
307 /* Close the transport */
308 dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
309 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
310 svc_rdma_put_context(ctxt, 1);
311 svc_xprt_put(&xprt->sc_xprt);
314 spin_lock_bh(&xprt->sc_rq_dto_lock);
315 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
316 spin_unlock_bh(&xprt->sc_rq_dto_lock);
317 svc_xprt_put(&xprt->sc_xprt);
321 atomic_inc(&rdma_stat_rq_prod);
323 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
325 * If data arrived before established event,
326 * don't enqueue. This defers RPC I/O until the
327 * RDMA connection is complete.
329 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
330 svc_xprt_enqueue(&xprt->sc_xprt);
334 * Process a completion context
336 static void process_context(struct svcxprt_rdma *xprt,
337 struct svc_rdma_op_ctxt *ctxt)
339 svc_rdma_unmap_dma(ctxt);
341 switch (ctxt->wr_op) {
344 pr_err("svcrdma: SEND: ctxt->frmr != NULL\n");
345 svc_rdma_put_context(ctxt, 1);
348 case IB_WR_RDMA_WRITE:
350 pr_err("svcrdma: WRITE: ctxt->frmr != NULL\n");
351 svc_rdma_put_context(ctxt, 0);
354 case IB_WR_RDMA_READ:
355 case IB_WR_RDMA_READ_WITH_INV:
356 svc_rdma_put_frmr(xprt, ctxt->frmr);
357 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
358 struct svc_rdma_op_ctxt *read_hdr = ctxt->read_hdr;
360 spin_lock_bh(&xprt->sc_rq_dto_lock);
361 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
362 list_add_tail(&read_hdr->dto_q,
363 &xprt->sc_read_complete_q);
364 spin_unlock_bh(&xprt->sc_rq_dto_lock);
366 pr_err("svcrdma: ctxt->read_hdr == NULL\n");
368 svc_xprt_enqueue(&xprt->sc_xprt);
370 svc_rdma_put_context(ctxt, 0);
374 printk(KERN_ERR "svcrdma: unexpected completion type, "
382 * Send Queue Completion Handler - potentially called on interrupt context.
384 * Note that caller must hold a transport reference.
386 static void sq_cq_reap(struct svcxprt_rdma *xprt)
388 struct svc_rdma_op_ctxt *ctxt = NULL;
389 struct ib_wc wc_a[6];
391 struct ib_cq *cq = xprt->sc_sq_cq;
394 memset(wc_a, 0, sizeof(wc_a));
396 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
399 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
400 atomic_inc(&rdma_stat_sq_poll);
401 while ((ret = ib_poll_cq(cq, ARRAY_SIZE(wc_a), wc_a)) > 0) {
404 for (i = 0; i < ret; i++) {
406 if (wc->status != IB_WC_SUCCESS) {
407 dprintk("svcrdma: sq wc err status %s (%d)\n",
408 ib_wc_status_msg(wc->status),
411 /* Close the transport */
412 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
415 /* Decrement used SQ WR count */
416 atomic_dec(&xprt->sc_sq_count);
417 wake_up(&xprt->sc_send_wait);
419 ctxt = (struct svc_rdma_op_ctxt *)
420 (unsigned long)wc->wr_id;
422 process_context(xprt, ctxt);
424 svc_xprt_put(&xprt->sc_xprt);
429 atomic_inc(&rdma_stat_sq_prod);
432 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
434 struct svcxprt_rdma *xprt = cq_context;
437 /* Guard against unconditional flush call for destroyed QP */
438 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
442 * Set the bit regardless of whether or not it's on the list
443 * because it may be on the list already due to an RQ
446 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
449 * If this transport is not already on the DTO transport queue,
452 spin_lock_irqsave(&dto_lock, flags);
453 if (list_empty(&xprt->sc_dto_q)) {
454 svc_xprt_get(&xprt->sc_xprt);
455 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
457 spin_unlock_irqrestore(&dto_lock, flags);
459 /* Tasklet does all the work to avoid irqsave locks. */
460 tasklet_schedule(&dto_tasklet);
463 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
466 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
470 svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
471 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
472 INIT_LIST_HEAD(&cma_xprt->sc_dto_q);
473 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
474 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
475 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
476 init_waitqueue_head(&cma_xprt->sc_send_wait);
478 spin_lock_init(&cma_xprt->sc_lock);
479 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
480 spin_lock_init(&cma_xprt->sc_frmr_q_lock);
482 cma_xprt->sc_ord = svcrdma_ord;
484 cma_xprt->sc_max_req_size = svcrdma_max_req_size;
485 cma_xprt->sc_max_requests = svcrdma_max_requests;
486 cma_xprt->sc_sq_depth = svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT;
487 atomic_set(&cma_xprt->sc_sq_count, 0);
488 atomic_set(&cma_xprt->sc_ctxt_used, 0);
491 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
496 struct page *svc_rdma_get_page(void)
500 while ((page = alloc_page(GFP_KERNEL)) == NULL) {
501 /* If we can't get memory, wait a bit and try again */
502 printk(KERN_INFO "svcrdma: out of memory...retrying in 1s\n");
503 schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
508 int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
510 struct ib_recv_wr recv_wr, *bad_recv_wr;
511 struct svc_rdma_op_ctxt *ctxt;
518 ctxt = svc_rdma_get_context(xprt);
520 ctxt->direction = DMA_FROM_DEVICE;
521 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
522 if (sge_no >= xprt->sc_max_sge) {
523 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
526 page = svc_rdma_get_page();
527 ctxt->pages[sge_no] = page;
528 pa = ib_dma_map_page(xprt->sc_cm_id->device,
531 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
533 atomic_inc(&xprt->sc_dma_used);
534 ctxt->sge[sge_no].addr = pa;
535 ctxt->sge[sge_no].length = PAGE_SIZE;
536 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
537 ctxt->count = sge_no + 1;
541 recv_wr.sg_list = &ctxt->sge[0];
542 recv_wr.num_sge = ctxt->count;
543 recv_wr.wr_id = (u64)(unsigned long)ctxt;
545 svc_xprt_get(&xprt->sc_xprt);
546 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
548 svc_rdma_unmap_dma(ctxt);
549 svc_rdma_put_context(ctxt, 1);
550 svc_xprt_put(&xprt->sc_xprt);
555 svc_rdma_unmap_dma(ctxt);
556 svc_rdma_put_context(ctxt, 1);
561 * This function handles the CONNECT_REQUEST event on a listening
562 * endpoint. It is passed the cma_id for the _new_ connection. The context in
563 * this cma_id is inherited from the listening cma_id and is the svc_xprt
564 * structure for the listening endpoint.
566 * This function creates a new xprt for the new connection and enqueues it on
567 * the accept queue for the listent xprt. When the listen thread is kicked, it
568 * will call the recvfrom method on the listen xprt which will accept the new
571 static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
573 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
574 struct svcxprt_rdma *newxprt;
577 /* Create a new transport */
578 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
580 dprintk("svcrdma: failed to create new transport\n");
583 newxprt->sc_cm_id = new_cma_id;
584 new_cma_id->context = newxprt;
585 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
586 newxprt, newxprt->sc_cm_id, listen_xprt);
588 /* Save client advertised inbound read limit for use later in accept. */
589 newxprt->sc_ord = client_ird;
591 /* Set the local and remote addresses in the transport */
592 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
593 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
594 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
595 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
598 * Enqueue the new transport on the accept queue of the listening
601 spin_lock_bh(&listen_xprt->sc_lock);
602 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
603 spin_unlock_bh(&listen_xprt->sc_lock);
605 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
606 svc_xprt_enqueue(&listen_xprt->sc_xprt);
610 * Handles events generated on the listening endpoint. These events will be
611 * either be incoming connect requests or adapter removal events.
613 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
614 struct rdma_cm_event *event)
616 struct svcxprt_rdma *xprt = cma_id->context;
619 switch (event->event) {
620 case RDMA_CM_EVENT_CONNECT_REQUEST:
621 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
622 "event = %s (%d)\n", cma_id, cma_id->context,
623 rdma_event_msg(event->event), event->event);
624 handle_connect_req(cma_id,
625 event->param.conn.initiator_depth);
628 case RDMA_CM_EVENT_ESTABLISHED:
629 /* Accept complete */
630 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
631 "cm_id=%p\n", xprt, cma_id);
634 case RDMA_CM_EVENT_DEVICE_REMOVAL:
635 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
638 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
642 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
643 "event = %s (%d)\n", cma_id,
644 rdma_event_msg(event->event), event->event);
651 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
652 struct rdma_cm_event *event)
654 struct svc_xprt *xprt = cma_id->context;
655 struct svcxprt_rdma *rdma =
656 container_of(xprt, struct svcxprt_rdma, sc_xprt);
657 switch (event->event) {
658 case RDMA_CM_EVENT_ESTABLISHED:
659 /* Accept complete */
661 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
662 "cm_id=%p\n", xprt, cma_id);
663 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
664 svc_xprt_enqueue(xprt);
666 case RDMA_CM_EVENT_DISCONNECTED:
667 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
670 set_bit(XPT_CLOSE, &xprt->xpt_flags);
671 svc_xprt_enqueue(xprt);
675 case RDMA_CM_EVENT_DEVICE_REMOVAL:
676 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
677 "event = %s (%d)\n", cma_id, xprt,
678 rdma_event_msg(event->event), event->event);
680 set_bit(XPT_CLOSE, &xprt->xpt_flags);
681 svc_xprt_enqueue(xprt);
685 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
686 "event = %s (%d)\n", cma_id,
687 rdma_event_msg(event->event), event->event);
694 * Create a listening RDMA service endpoint.
696 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
698 struct sockaddr *sa, int salen,
701 struct rdma_cm_id *listen_id;
702 struct svcxprt_rdma *cma_xprt;
705 dprintk("svcrdma: Creating RDMA socket\n");
706 if (sa->sa_family != AF_INET) {
707 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
708 return ERR_PTR(-EAFNOSUPPORT);
710 cma_xprt = rdma_create_xprt(serv, 1);
712 return ERR_PTR(-ENOMEM);
714 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
716 if (IS_ERR(listen_id)) {
717 ret = PTR_ERR(listen_id);
718 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
722 ret = rdma_bind_addr(listen_id, sa);
724 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
727 cma_xprt->sc_cm_id = listen_id;
729 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
731 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
736 * We need to use the address from the cm_id in case the
737 * caller specified 0 for the port number.
739 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
740 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
742 return &cma_xprt->sc_xprt;
745 rdma_destroy_id(listen_id);
751 static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
754 struct ib_fast_reg_page_list *pl;
755 struct svc_rdma_fastreg_mr *frmr;
757 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
761 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
765 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
771 frmr->page_list = pl;
772 INIT_LIST_HEAD(&frmr->frmr_list);
780 return ERR_PTR(-ENOMEM);
783 static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
785 struct svc_rdma_fastreg_mr *frmr;
787 while (!list_empty(&xprt->sc_frmr_q)) {
788 frmr = list_entry(xprt->sc_frmr_q.next,
789 struct svc_rdma_fastreg_mr, frmr_list);
790 list_del_init(&frmr->frmr_list);
791 ib_dereg_mr(frmr->mr);
792 ib_free_fast_reg_page_list(frmr->page_list);
797 struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
799 struct svc_rdma_fastreg_mr *frmr = NULL;
801 spin_lock_bh(&rdma->sc_frmr_q_lock);
802 if (!list_empty(&rdma->sc_frmr_q)) {
803 frmr = list_entry(rdma->sc_frmr_q.next,
804 struct svc_rdma_fastreg_mr, frmr_list);
805 list_del_init(&frmr->frmr_list);
807 frmr->page_list_len = 0;
809 spin_unlock_bh(&rdma->sc_frmr_q_lock);
813 return rdma_alloc_frmr(rdma);
816 static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
817 struct svc_rdma_fastreg_mr *frmr)
820 for (page_no = 0; page_no < frmr->page_list_len; page_no++) {
821 dma_addr_t addr = frmr->page_list->page_list[page_no];
822 if (ib_dma_mapping_error(frmr->mr->device, addr))
824 atomic_dec(&xprt->sc_dma_used);
825 ib_dma_unmap_page(frmr->mr->device, addr, PAGE_SIZE,
830 void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
831 struct svc_rdma_fastreg_mr *frmr)
834 frmr_unmap_dma(rdma, frmr);
835 spin_lock_bh(&rdma->sc_frmr_q_lock);
836 WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
837 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
838 spin_unlock_bh(&rdma->sc_frmr_q_lock);
843 * This is the xpo_recvfrom function for listening endpoints. Its
844 * purpose is to accept incoming connections. The CMA callback handler
845 * has already created a new transport and attached it to the new CMA
848 * There is a queue of pending connections hung on the listening
849 * transport. This queue contains the new svc_xprt structure. This
850 * function takes svc_xprt structures off the accept_q and completes
853 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
855 struct svcxprt_rdma *listen_rdma;
856 struct svcxprt_rdma *newxprt = NULL;
857 struct rdma_conn_param conn_param;
858 struct ib_qp_init_attr qp_attr;
859 struct ib_device_attr devattr;
860 int uninitialized_var(dma_mr_acc);
865 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
866 clear_bit(XPT_CONN, &xprt->xpt_flags);
867 /* Get the next entry off the accept list */
868 spin_lock_bh(&listen_rdma->sc_lock);
869 if (!list_empty(&listen_rdma->sc_accept_q)) {
870 newxprt = list_entry(listen_rdma->sc_accept_q.next,
871 struct svcxprt_rdma, sc_accept_q);
872 list_del_init(&newxprt->sc_accept_q);
874 if (!list_empty(&listen_rdma->sc_accept_q))
875 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
876 spin_unlock_bh(&listen_rdma->sc_lock);
880 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
881 newxprt, newxprt->sc_cm_id);
883 ret = ib_query_device(newxprt->sc_cm_id->device, &devattr);
885 dprintk("svcrdma: could not query device attributes on "
886 "device %p, rc=%d\n", newxprt->sc_cm_id->device, ret);
890 /* Qualify the transport resource defaults with the
891 * capabilities of this particular device */
892 newxprt->sc_max_sge = min((size_t)devattr.max_sge,
893 (size_t)RPCSVC_MAXPAGES);
894 newxprt->sc_max_requests = min((size_t)devattr.max_qp_wr,
895 (size_t)svcrdma_max_requests);
896 newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
899 * Limit ORD based on client limit, local device limit, and
900 * configured svcrdma limit.
902 newxprt->sc_ord = min_t(size_t, devattr.max_qp_rd_atom, newxprt->sc_ord);
903 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
905 newxprt->sc_pd = ib_alloc_pd(newxprt->sc_cm_id->device);
906 if (IS_ERR(newxprt->sc_pd)) {
907 dprintk("svcrdma: error creating PD for connect request\n");
910 newxprt->sc_sq_cq = ib_create_cq(newxprt->sc_cm_id->device,
914 newxprt->sc_sq_depth,
916 if (IS_ERR(newxprt->sc_sq_cq)) {
917 dprintk("svcrdma: error creating SQ CQ for connect request\n");
920 newxprt->sc_rq_cq = ib_create_cq(newxprt->sc_cm_id->device,
924 newxprt->sc_max_requests,
926 if (IS_ERR(newxprt->sc_rq_cq)) {
927 dprintk("svcrdma: error creating RQ CQ for connect request\n");
931 memset(&qp_attr, 0, sizeof qp_attr);
932 qp_attr.event_handler = qp_event_handler;
933 qp_attr.qp_context = &newxprt->sc_xprt;
934 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
935 qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
936 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
937 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
938 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
939 qp_attr.qp_type = IB_QPT_RC;
940 qp_attr.send_cq = newxprt->sc_sq_cq;
941 qp_attr.recv_cq = newxprt->sc_rq_cq;
942 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n"
943 " cm_id->device=%p, sc_pd->device=%p\n"
944 " cap.max_send_wr = %d\n"
945 " cap.max_recv_wr = %d\n"
946 " cap.max_send_sge = %d\n"
947 " cap.max_recv_sge = %d\n",
948 newxprt->sc_cm_id, newxprt->sc_pd,
949 newxprt->sc_cm_id->device, newxprt->sc_pd->device,
950 qp_attr.cap.max_send_wr,
951 qp_attr.cap.max_recv_wr,
952 qp_attr.cap.max_send_sge,
953 qp_attr.cap.max_recv_sge);
955 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
957 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
960 newxprt->sc_qp = newxprt->sc_cm_id->qp;
963 * Use the most secure set of MR resources based on the
964 * transport type and available memory management features in
965 * the device. Here's the table implemented below:
967 * Fast Global DMA Remote WR
969 * Sup'd Sup'd Needed Needed
981 * NB: iWARP requires remote write access for the data sink
982 * of an RDMA_READ. IB does not.
984 newxprt->sc_reader = rdma_read_chunk_lcl;
985 if (devattr.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
986 newxprt->sc_frmr_pg_list_len =
987 devattr.max_fast_reg_page_list_len;
988 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
989 newxprt->sc_reader = rdma_read_chunk_frmr;
993 * Determine if a DMA MR is required and if so, what privs are required
995 if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device,
996 newxprt->sc_cm_id->port_num) &&
997 !rdma_ib_or_roce(newxprt->sc_cm_id->device,
998 newxprt->sc_cm_id->port_num))
1001 if (!(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG) ||
1002 !(devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) {
1004 dma_mr_acc = IB_ACCESS_LOCAL_WRITE;
1005 if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
1006 newxprt->sc_cm_id->port_num) &&
1007 !(newxprt->sc_dev_caps & SVCRDMA_DEVCAP_FAST_REG))
1008 dma_mr_acc |= IB_ACCESS_REMOTE_WRITE;
1011 if (rdma_protocol_iwarp(newxprt->sc_cm_id->device,
1012 newxprt->sc_cm_id->port_num))
1013 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
1015 /* Create the DMA MR if needed, otherwise, use the DMA LKEY */
1017 /* Register all of physical memory */
1018 newxprt->sc_phys_mr =
1019 ib_get_dma_mr(newxprt->sc_pd, dma_mr_acc);
1020 if (IS_ERR(newxprt->sc_phys_mr)) {
1021 dprintk("svcrdma: Failed to create DMA MR ret=%d\n",
1025 newxprt->sc_dma_lkey = newxprt->sc_phys_mr->lkey;
1027 newxprt->sc_dma_lkey =
1028 newxprt->sc_cm_id->device->local_dma_lkey;
1030 /* Post receive buffers */
1031 for (i = 0; i < newxprt->sc_max_requests; i++) {
1032 ret = svc_rdma_post_recv(newxprt);
1034 dprintk("svcrdma: failure posting receive buffers\n");
1039 /* Swap out the handler */
1040 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1043 * Arm the CQs for the SQ and RQ before accepting so we can't
1044 * miss the first message
1046 ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
1047 ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
1049 /* Accept Connection */
1050 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1051 memset(&conn_param, 0, sizeof conn_param);
1052 conn_param.responder_resources = 0;
1053 conn_param.initiator_depth = newxprt->sc_ord;
1054 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1056 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1061 dprintk("svcrdma: new connection %p accepted with the following "
1063 " local_ip : %pI4\n"
1064 " local_port : %d\n"
1065 " remote_ip : %pI4\n"
1066 " remote_port : %d\n"
1069 " max_requests : %d\n"
1072 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1073 route.addr.src_addr)->sin_addr.s_addr,
1074 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1075 route.addr.src_addr)->sin_port),
1076 &((struct sockaddr_in *)&newxprt->sc_cm_id->
1077 route.addr.dst_addr)->sin_addr.s_addr,
1078 ntohs(((struct sockaddr_in *)&newxprt->sc_cm_id->
1079 route.addr.dst_addr)->sin_port),
1080 newxprt->sc_max_sge,
1081 newxprt->sc_sq_depth,
1082 newxprt->sc_max_requests,
1085 return &newxprt->sc_xprt;
1088 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
1089 /* Take a reference in case the DTO handler runs */
1090 svc_xprt_get(&newxprt->sc_xprt);
1091 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
1092 ib_destroy_qp(newxprt->sc_qp);
1093 rdma_destroy_id(newxprt->sc_cm_id);
1094 /* This call to put will destroy the transport */
1095 svc_xprt_put(&newxprt->sc_xprt);
1099 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1104 * When connected, an svc_xprt has at least two references:
1106 * - A reference held by the cm_id between the ESTABLISHED and
1107 * DISCONNECTED events. If the remote peer disconnected first, this
1108 * reference could be gone.
1110 * - A reference held by the svc_recv code that called this function
1111 * as part of close processing.
1113 * At a minimum one references should still be held.
1115 static void svc_rdma_detach(struct svc_xprt *xprt)
1117 struct svcxprt_rdma *rdma =
1118 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1119 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
1121 /* Disconnect and flush posted WQE */
1122 rdma_disconnect(rdma->sc_cm_id);
1125 static void __svc_rdma_free(struct work_struct *work)
1127 struct svcxprt_rdma *rdma =
1128 container_of(work, struct svcxprt_rdma, sc_work);
1129 dprintk("svcrdma: svc_rdma_free(%p)\n", rdma);
1131 /* We should only be called from kref_put */
1132 if (atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0)
1133 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1134 atomic_read(&rdma->sc_xprt.xpt_ref.refcount));
1137 * Destroy queued, but not processed read completions. Note
1138 * that this cleanup has to be done before destroying the
1139 * cm_id because the device ptr is needed to unmap the dma in
1140 * svc_rdma_put_context.
1142 while (!list_empty(&rdma->sc_read_complete_q)) {
1143 struct svc_rdma_op_ctxt *ctxt;
1144 ctxt = list_entry(rdma->sc_read_complete_q.next,
1145 struct svc_rdma_op_ctxt,
1147 list_del_init(&ctxt->dto_q);
1148 svc_rdma_put_context(ctxt, 1);
1151 /* Destroy queued, but not processed recv completions */
1152 while (!list_empty(&rdma->sc_rq_dto_q)) {
1153 struct svc_rdma_op_ctxt *ctxt;
1154 ctxt = list_entry(rdma->sc_rq_dto_q.next,
1155 struct svc_rdma_op_ctxt,
1157 list_del_init(&ctxt->dto_q);
1158 svc_rdma_put_context(ctxt, 1);
1161 /* Warn if we leaked a resource or under-referenced */
1162 if (atomic_read(&rdma->sc_ctxt_used) != 0)
1163 pr_err("svcrdma: ctxt still in use? (%d)\n",
1164 atomic_read(&rdma->sc_ctxt_used));
1165 if (atomic_read(&rdma->sc_dma_used) != 0)
1166 pr_err("svcrdma: dma still in use? (%d)\n",
1167 atomic_read(&rdma->sc_dma_used));
1169 /* De-allocate fastreg mr */
1170 rdma_dealloc_frmr_q(rdma);
1172 /* Destroy the QP if present (not a listener) */
1173 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1174 ib_destroy_qp(rdma->sc_qp);
1176 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1177 ib_destroy_cq(rdma->sc_sq_cq);
1179 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1180 ib_destroy_cq(rdma->sc_rq_cq);
1182 if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr))
1183 ib_dereg_mr(rdma->sc_phys_mr);
1185 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1186 ib_dealloc_pd(rdma->sc_pd);
1188 /* Destroy the CM ID */
1189 rdma_destroy_id(rdma->sc_cm_id);
1194 static void svc_rdma_free(struct svc_xprt *xprt)
1196 struct svcxprt_rdma *rdma =
1197 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1198 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1199 queue_work(svc_rdma_wq, &rdma->sc_work);
1202 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1204 struct svcxprt_rdma *rdma =
1205 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1208 * If there are already waiters on the SQ,
1211 if (waitqueue_active(&rdma->sc_send_wait))
1214 /* Otherwise return true. */
1218 static int svc_rdma_secure_port(struct svc_rqst *rqstp)
1224 * Attempt to register the kvec representing the RPC memory with the
1228 * NULL : The device does not support fastreg or there were no more
1230 * frmr : The kvec register request was successfully posted.
1231 * <0 : An error was encountered attempting to register the kvec.
1233 int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
1234 struct svc_rdma_fastreg_mr *frmr)
1236 struct ib_send_wr fastreg_wr;
1240 key = (u8)(frmr->mr->lkey & 0x000000FF);
1241 ib_update_fast_reg_key(frmr->mr, ++key);
1243 /* Prepare FASTREG WR */
1244 memset(&fastreg_wr, 0, sizeof fastreg_wr);
1245 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
1246 fastreg_wr.send_flags = IB_SEND_SIGNALED;
1247 fastreg_wr.wr.fast_reg.iova_start = (unsigned long)frmr->kva;
1248 fastreg_wr.wr.fast_reg.page_list = frmr->page_list;
1249 fastreg_wr.wr.fast_reg.page_list_len = frmr->page_list_len;
1250 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1251 fastreg_wr.wr.fast_reg.length = frmr->map_len;
1252 fastreg_wr.wr.fast_reg.access_flags = frmr->access_flags;
1253 fastreg_wr.wr.fast_reg.rkey = frmr->mr->lkey;
1254 return svc_rdma_send(xprt, &fastreg_wr);
1257 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1259 struct ib_send_wr *bad_wr, *n_wr;
1264 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1268 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1271 /* If the SQ is full, wait until an SQ entry is available */
1273 spin_lock_bh(&xprt->sc_lock);
1274 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
1275 spin_unlock_bh(&xprt->sc_lock);
1276 atomic_inc(&rdma_stat_sq_starve);
1278 /* See if we can opportunistically reap SQ WR to make room */
1281 /* Wait until SQ WR available if SQ still full */
1282 wait_event(xprt->sc_send_wait,
1283 atomic_read(&xprt->sc_sq_count) <
1285 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1289 /* Take a transport ref for each WR posted */
1290 for (i = 0; i < wr_count; i++)
1291 svc_xprt_get(&xprt->sc_xprt);
1293 /* Bump used SQ WR count and post */
1294 atomic_add(wr_count, &xprt->sc_sq_count);
1295 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1297 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1298 atomic_sub(wr_count, &xprt->sc_sq_count);
1299 for (i = 0; i < wr_count; i ++)
1300 svc_xprt_put(&xprt->sc_xprt);
1301 dprintk("svcrdma: failed to post SQ WR rc=%d, "
1302 "sc_sq_count=%d, sc_sq_depth=%d\n",
1303 ret, atomic_read(&xprt->sc_sq_count),
1306 spin_unlock_bh(&xprt->sc_lock);
1308 wake_up(&xprt->sc_send_wait);
1314 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1315 enum rpcrdma_errcode err)
1317 struct ib_send_wr err_wr;
1319 struct svc_rdma_op_ctxt *ctxt;
1324 p = svc_rdma_get_page();
1325 va = page_address(p);
1327 /* XDR encode error */
1328 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1330 ctxt = svc_rdma_get_context(xprt);
1331 ctxt->direction = DMA_FROM_DEVICE;
1335 /* Prepare SGE for local address */
1336 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
1337 p, 0, length, DMA_FROM_DEVICE);
1338 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1340 svc_rdma_put_context(ctxt, 1);
1343 atomic_inc(&xprt->sc_dma_used);
1344 ctxt->sge[0].lkey = xprt->sc_dma_lkey;
1345 ctxt->sge[0].length = length;
1347 /* Prepare SEND WR */
1348 memset(&err_wr, 0, sizeof err_wr);
1349 ctxt->wr_op = IB_WR_SEND;
1350 err_wr.wr_id = (unsigned long)ctxt;
1351 err_wr.sg_list = ctxt->sge;
1353 err_wr.opcode = IB_WR_SEND;
1354 err_wr.send_flags = IB_SEND_SIGNALED;
1357 ret = svc_rdma_send(xprt, &err_wr);
1359 dprintk("svcrdma: Error %d posting send for protocol error\n",
1361 svc_rdma_unmap_dma(ctxt);
1362 svc_rdma_put_context(ctxt, 1);