2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
37 static int db_delay_usecs = 1;
38 module_param(db_delay_usecs, int, 0644);
39 MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
41 static int ocqp_support = 1;
42 module_param(ocqp_support, int, 0644);
43 MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
45 int db_fc_threshold = 1000;
46 module_param(db_fc_threshold, int, 0644);
47 MODULE_PARM_DESC(db_fc_threshold,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
51 int db_coalescing_threshold;
52 module_param(db_coalescing_threshold, int, 0644);
53 MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
57 static int max_fr_immd = T4_MAX_FR_IMMD;
58 module_param(max_fr_immd, int, 0644);
59 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
61 static int alloc_ird(struct c4iw_dev *dev, u32 ird)
65 spin_lock_irq(&dev->lock);
66 if (ird <= dev->avail_ird)
67 dev->avail_ird -= ird;
70 spin_unlock_irq(&dev->lock);
73 dev_warn(&dev->rdev.lldi.pdev->dev,
74 "device IRD resources exhausted\n");
79 static void free_ird(struct c4iw_dev *dev, int ird)
81 spin_lock_irq(&dev->lock);
82 dev->avail_ird += ird;
83 spin_unlock_irq(&dev->lock);
86 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
89 spin_lock_irqsave(&qhp->lock, flag);
90 qhp->attr.state = state;
91 spin_unlock_irqrestore(&qhp->lock, flag);
94 static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
96 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
99 static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
101 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
102 pci_unmap_addr(sq, mapping));
105 static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
107 if (t4_sq_onchip(sq))
108 dealloc_oc_sq(rdev, sq);
110 dealloc_host_sq(rdev, sq);
113 static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
115 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
117 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
120 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
121 rdev->lldi.vr->ocq.start;
122 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
123 rdev->lldi.vr->ocq.start);
124 sq->flags |= T4_SQ_ONCHIP;
128 static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
130 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
131 &(sq->dma_addr), GFP_KERNEL);
134 sq->phys_addr = virt_to_phys(sq->queue);
135 pci_unmap_addr_set(sq, mapping, sq->dma_addr);
139 static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
143 ret = alloc_oc_sq(rdev, sq);
145 ret = alloc_host_sq(rdev, sq);
149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
150 struct c4iw_dev_ucontext *uctx)
153 * uP clears EQ contexts when the connection exits rdma mode,
154 * so no need to post a RESET WR for these EQs.
156 dma_free_coherent(&(rdev->lldi.pdev->dev),
157 wq->rq.memsize, wq->rq.queue,
158 dma_unmap_addr(&wq->rq, mapping));
159 dealloc_sq(rdev, &wq->sq);
160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
163 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
164 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
168 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
169 struct t4_cq *rcq, struct t4_cq *scq,
170 struct c4iw_dev_ucontext *uctx)
172 int user = (uctx != &rdev->uctx);
173 struct fw_ri_res_wr *res_wr;
174 struct fw_ri_res *res;
176 struct c4iw_wr_wait wr_wait;
181 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
185 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
192 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
199 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
208 * RQT must be a power of 2 and at least 16 deep.
210 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
211 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
212 if (!wq->rq.rqt_hwaddr) {
217 ret = alloc_sq(rdev, &wq->sq, user);
220 memset(wq->sq.queue, 0, wq->sq.memsize);
221 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
223 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
224 wq->rq.memsize, &(wq->rq.dma_addr),
230 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
231 __func__, wq->sq.queue,
232 (unsigned long long)virt_to_phys(wq->sq.queue),
234 (unsigned long long)virt_to_phys(wq->rq.queue));
235 memset(wq->rq.queue, 0, wq->rq.memsize);
236 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
238 wq->db = rdev->lldi.db_reg;
239 wq->gts = rdev->lldi.gts_reg;
240 if (user || is_t5(rdev->lldi.adapter_type)) {
243 off = (wq->sq.qid << rdev->qpshift) & PAGE_MASK;
245 wq->sq.udb = (u64 __iomem *)(rdev->bar2_pa + off);
247 off += 128 * (wq->sq.qid & rdev->qpmask) + 8;
248 wq->sq.udb = (u64 __iomem *)(rdev->bar2_kva + off);
250 off = (wq->rq.qid << rdev->qpshift) & PAGE_MASK;
252 wq->rq.udb = (u64 __iomem *)(rdev->bar2_pa + off);
254 off += 128 * (wq->rq.qid & rdev->qpmask) + 8;
255 wq->rq.udb = (u64 __iomem *)(rdev->bar2_kva + off);
261 /* build fw_ri_res_wr */
262 wr_len = sizeof *res_wr + 2 * sizeof *res;
264 skb = alloc_skb(wr_len, GFP_KERNEL);
269 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
271 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
272 memset(res_wr, 0, wr_len);
273 res_wr->op_nres = cpu_to_be32(
274 FW_WR_OP_V(FW_RI_RES_WR) |
275 V_FW_RI_RES_WR_NRES(2) |
277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
278 res_wr->cookie = (unsigned long) &wr_wait;
280 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
281 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
284 * eqsize is the number of 64B entries plus the status page size.
286 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
287 rdev->hw_queue.t4_eq_status_entries;
289 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
290 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
291 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
292 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
293 (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
294 V_FW_RI_RES_WR_IQID(scq->cqid));
295 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
296 V_FW_RI_RES_WR_DCAEN(0) |
297 V_FW_RI_RES_WR_DCACPU(0) |
298 V_FW_RI_RES_WR_FBMIN(2) |
299 V_FW_RI_RES_WR_FBMAX(2) |
300 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
301 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
302 V_FW_RI_RES_WR_EQSIZE(eqsize));
303 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
304 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
306 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
307 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
310 * eqsize is the number of 64B entries plus the status page size.
312 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
313 rdev->hw_queue.t4_eq_status_entries;
314 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
315 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
316 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
317 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
318 V_FW_RI_RES_WR_IQID(rcq->cqid));
319 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
320 V_FW_RI_RES_WR_DCAEN(0) |
321 V_FW_RI_RES_WR_DCACPU(0) |
322 V_FW_RI_RES_WR_FBMIN(2) |
323 V_FW_RI_RES_WR_FBMAX(2) |
324 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
325 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
326 V_FW_RI_RES_WR_EQSIZE(eqsize));
327 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
328 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
330 c4iw_init_wr_wait(&wr_wait);
332 ret = c4iw_ofld_send(rdev, skb);
335 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
339 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%lx rqudb 0x%lx\n",
340 __func__, wq->sq.qid, wq->rq.qid, wq->db,
341 (__force unsigned long) wq->sq.udb,
342 (__force unsigned long) wq->rq.udb);
346 dma_free_coherent(&(rdev->lldi.pdev->dev),
347 wq->rq.memsize, wq->rq.queue,
348 dma_unmap_addr(&wq->rq, mapping));
350 dealloc_sq(rdev, &wq->sq);
352 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
358 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
360 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
364 static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
365 struct ib_send_wr *wr, int max, u32 *plenp)
372 dstp = (u8 *)immdp->data;
373 for (i = 0; i < wr->num_sge; i++) {
374 if ((plen + wr->sg_list[i].length) > max)
376 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
377 plen += wr->sg_list[i].length;
378 rem = wr->sg_list[i].length;
380 if (dstp == (u8 *)&sq->queue[sq->size])
381 dstp = (u8 *)sq->queue;
382 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
385 len = (u8 *)&sq->queue[sq->size] - dstp;
386 memcpy(dstp, srcp, len);
392 len = roundup(plen + sizeof *immdp, 16) - (plen + sizeof *immdp);
394 memset(dstp, 0, len);
395 immdp->op = FW_RI_DATA_IMMD;
398 immdp->immdlen = cpu_to_be32(plen);
403 static int build_isgl(__be64 *queue_start, __be64 *queue_end,
404 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
405 int num_sge, u32 *plenp)
410 __be64 *flitp = (__be64 *)isglp->sge;
412 for (i = 0; i < num_sge; i++) {
413 if ((plen + sg_list[i].length) < plen)
415 plen += sg_list[i].length;
416 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
418 if (++flitp == queue_end)
420 *flitp = cpu_to_be64(sg_list[i].addr);
421 if (++flitp == queue_end)
424 *flitp = (__force __be64)0;
425 isglp->op = FW_RI_DATA_ISGL;
427 isglp->nsge = cpu_to_be16(num_sge);
434 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
435 struct ib_send_wr *wr, u8 *len16)
441 if (wr->num_sge > T4_MAX_SEND_SGE)
443 switch (wr->opcode) {
445 if (wr->send_flags & IB_SEND_SOLICITED)
446 wqe->send.sendop_pkd = cpu_to_be32(
447 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE));
449 wqe->send.sendop_pkd = cpu_to_be32(
450 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND));
451 wqe->send.stag_inv = 0;
453 case IB_WR_SEND_WITH_INV:
454 if (wr->send_flags & IB_SEND_SOLICITED)
455 wqe->send.sendop_pkd = cpu_to_be32(
456 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV));
458 wqe->send.sendop_pkd = cpu_to_be32(
459 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV));
460 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
471 if (wr->send_flags & IB_SEND_INLINE) {
472 ret = build_immd(sq, wqe->send.u.immd_src, wr,
473 T4_MAX_SEND_INLINE, &plen);
476 size = sizeof wqe->send + sizeof(struct fw_ri_immd) +
479 ret = build_isgl((__be64 *)sq->queue,
480 (__be64 *)&sq->queue[sq->size],
481 wqe->send.u.isgl_src,
482 wr->sg_list, wr->num_sge, &plen);
485 size = sizeof wqe->send + sizeof(struct fw_ri_isgl) +
486 wr->num_sge * sizeof(struct fw_ri_sge);
489 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
490 wqe->send.u.immd_src[0].r1 = 0;
491 wqe->send.u.immd_src[0].r2 = 0;
492 wqe->send.u.immd_src[0].immdlen = 0;
493 size = sizeof wqe->send + sizeof(struct fw_ri_immd);
496 *len16 = DIV_ROUND_UP(size, 16);
497 wqe->send.plen = cpu_to_be32(plen);
501 static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
502 struct ib_send_wr *wr, u8 *len16)
508 if (wr->num_sge > T4_MAX_SEND_SGE)
511 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
512 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
514 if (wr->send_flags & IB_SEND_INLINE) {
515 ret = build_immd(sq, wqe->write.u.immd_src, wr,
516 T4_MAX_WRITE_INLINE, &plen);
519 size = sizeof wqe->write + sizeof(struct fw_ri_immd) +
522 ret = build_isgl((__be64 *)sq->queue,
523 (__be64 *)&sq->queue[sq->size],
524 wqe->write.u.isgl_src,
525 wr->sg_list, wr->num_sge, &plen);
528 size = sizeof wqe->write + sizeof(struct fw_ri_isgl) +
529 wr->num_sge * sizeof(struct fw_ri_sge);
532 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
533 wqe->write.u.immd_src[0].r1 = 0;
534 wqe->write.u.immd_src[0].r2 = 0;
535 wqe->write.u.immd_src[0].immdlen = 0;
536 size = sizeof wqe->write + sizeof(struct fw_ri_immd);
539 *len16 = DIV_ROUND_UP(size, 16);
540 wqe->write.plen = cpu_to_be32(plen);
544 static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
549 wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey);
550 wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr
552 wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr);
553 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
554 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
555 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
557 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
559 wqe->read.stag_src = cpu_to_be32(2);
560 wqe->read.to_src_hi = 0;
561 wqe->read.to_src_lo = 0;
562 wqe->read.stag_sink = cpu_to_be32(2);
564 wqe->read.to_sink_hi = 0;
565 wqe->read.to_sink_lo = 0;
569 *len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
573 static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
574 struct ib_recv_wr *wr, u8 *len16)
578 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
579 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
580 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
583 *len16 = DIV_ROUND_UP(sizeof wqe->recv +
584 wr->num_sge * sizeof(struct fw_ri_sge), 16);
588 static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
589 struct ib_send_wr *wr, u8 *len16, u8 t5dev)
592 struct fw_ri_immd *imdp;
595 int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
598 if (wr->wr.fast_reg.page_list_len >
599 t4_max_fr_depth(use_dsgl))
602 wqe->fr.qpbinde_to_dcacpu = 0;
603 wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12;
604 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
605 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags);
607 wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length);
608 wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
609 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
610 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
613 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
614 struct c4iw_fr_page_list *c4pl =
615 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
616 struct fw_ri_dsgl *sglp;
618 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
619 wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
621 wr->wr.fast_reg.page_list->page_list[i]);
624 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
625 sglp->op = FW_RI_DATA_DSGL;
627 sglp->nsge = cpu_to_be16(1);
628 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
629 sglp->len0 = cpu_to_be32(pbllen);
631 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
633 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
634 imdp->op = FW_RI_DATA_IMMD;
637 imdp->immdlen = cpu_to_be32(pbllen);
638 p = (__be64 *)(imdp + 1);
640 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
642 (u64)wr->wr.fast_reg.page_list->page_list[i]);
644 if (++p == (__be64 *)&sq->queue[sq->size])
645 p = (__be64 *)sq->queue;
651 if (++p == (__be64 *)&sq->queue[sq->size])
652 p = (__be64 *)sq->queue;
654 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
660 static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr,
663 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
665 *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
669 void c4iw_qp_add_ref(struct ib_qp *qp)
671 PDBG("%s ib_qp %p\n", __func__, qp);
672 atomic_inc(&(to_c4iw_qp(qp)->refcnt));
675 void c4iw_qp_rem_ref(struct ib_qp *qp)
677 PDBG("%s ib_qp %p\n", __func__, qp);
678 if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt)))
679 wake_up(&(to_c4iw_qp(qp)->wait));
682 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
684 if (list_empty(entry))
685 list_add_tail(entry, head);
688 static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
692 spin_lock_irqsave(&qhp->rhp->lock, flags);
693 spin_lock(&qhp->lock);
694 if (qhp->rhp->db_state == NORMAL)
695 t4_ring_sq_db(&qhp->wq, inc,
696 is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
698 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
699 qhp->wq.sq.wq_pidx_inc += inc;
701 spin_unlock(&qhp->lock);
702 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
706 static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
710 spin_lock_irqsave(&qhp->rhp->lock, flags);
711 spin_lock(&qhp->lock);
712 if (qhp->rhp->db_state == NORMAL)
713 t4_ring_rq_db(&qhp->wq, inc,
714 is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
716 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
717 qhp->wq.rq.wq_pidx_inc += inc;
719 spin_unlock(&qhp->lock);
720 spin_unlock_irqrestore(&qhp->rhp->lock, flags);
724 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
725 struct ib_send_wr **bad_wr)
729 enum fw_wr_opcodes fw_opcode = 0;
730 enum fw_ri_wr_flags fw_flags;
732 union t4_wr *wqe = NULL;
734 struct t4_swsqe *swsqe;
738 qhp = to_c4iw_qp(ibqp);
739 spin_lock_irqsave(&qhp->lock, flag);
740 if (t4_wq_in_error(&qhp->wq)) {
741 spin_unlock_irqrestore(&qhp->lock, flag);
744 num_wrs = t4_sq_avail(&qhp->wq);
746 spin_unlock_irqrestore(&qhp->lock, flag);
755 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
756 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
759 if (wr->send_flags & IB_SEND_SOLICITED)
760 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
761 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
762 fw_flags |= FW_RI_COMPLETION_FLAG;
763 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
764 switch (wr->opcode) {
765 case IB_WR_SEND_WITH_INV:
767 if (wr->send_flags & IB_SEND_FENCE)
768 fw_flags |= FW_RI_READ_FENCE_FLAG;
769 fw_opcode = FW_RI_SEND_WR;
770 if (wr->opcode == IB_WR_SEND)
771 swsqe->opcode = FW_RI_SEND;
773 swsqe->opcode = FW_RI_SEND_WITH_INV;
774 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
776 case IB_WR_RDMA_WRITE:
777 fw_opcode = FW_RI_RDMA_WRITE_WR;
778 swsqe->opcode = FW_RI_RDMA_WRITE;
779 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
781 case IB_WR_RDMA_READ:
782 case IB_WR_RDMA_READ_WITH_INV:
783 fw_opcode = FW_RI_RDMA_READ_WR;
784 swsqe->opcode = FW_RI_READ_REQ;
785 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
786 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
789 err = build_rdma_read(wqe, wr, &len16);
792 swsqe->read_len = wr->sg_list[0].length;
793 if (!qhp->wq.sq.oldest_read)
794 qhp->wq.sq.oldest_read = swsqe;
796 case IB_WR_FAST_REG_MR:
797 fw_opcode = FW_RI_FR_NSMR_WR;
798 swsqe->opcode = FW_RI_FAST_REGISTER;
799 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
801 qhp->rhp->rdev.lldi.adapter_type) ?
804 case IB_WR_LOCAL_INV:
805 if (wr->send_flags & IB_SEND_FENCE)
806 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
807 fw_opcode = FW_RI_INV_LSTAG_WR;
808 swsqe->opcode = FW_RI_LOCAL_INV;
809 err = build_inv_stag(wqe, wr, &len16);
812 PDBG("%s post of type=%d TBD!\n", __func__,
820 swsqe->idx = qhp->wq.sq.pidx;
822 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
825 swsqe->wr_id = wr->wr_id;
827 swsqe->sge_ts = cxgb4_read_sge_timestamp(
828 qhp->rhp->rdev.lldi.ports[0]);
829 getnstimeofday(&swsqe->host_ts);
832 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
834 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
835 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
836 swsqe->opcode, swsqe->read_len);
839 t4_sq_produce(&qhp->wq, len16);
840 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
842 if (!qhp->rhp->rdev.status_page->db_off) {
843 t4_ring_sq_db(&qhp->wq, idx,
844 is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
845 spin_unlock_irqrestore(&qhp->lock, flag);
847 spin_unlock_irqrestore(&qhp->lock, flag);
848 ring_kernel_sq_db(qhp, idx);
853 int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
854 struct ib_recv_wr **bad_wr)
858 union t4_recv_wr *wqe = NULL;
864 qhp = to_c4iw_qp(ibqp);
865 spin_lock_irqsave(&qhp->lock, flag);
866 if (t4_wq_in_error(&qhp->wq)) {
867 spin_unlock_irqrestore(&qhp->lock, flag);
870 num_wrs = t4_rq_avail(&qhp->wq);
872 spin_unlock_irqrestore(&qhp->lock, flag);
876 if (wr->num_sge > T4_MAX_RECV_SGE) {
881 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
885 err = build_rdma_recv(qhp, wqe, wr, &len16);
893 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
895 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
896 cxgb4_read_sge_timestamp(
897 qhp->rhp->rdev.lldi.ports[0]);
899 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
902 wqe->recv.opcode = FW_RI_RECV_WR;
904 wqe->recv.wrid = qhp->wq.rq.pidx;
908 wqe->recv.len16 = len16;
909 PDBG("%s cookie 0x%llx pidx %u\n", __func__,
910 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx);
911 t4_rq_produce(&qhp->wq, len16);
912 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
916 if (!qhp->rhp->rdev.status_page->db_off) {
917 t4_ring_rq_db(&qhp->wq, idx,
918 is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
919 spin_unlock_irqrestore(&qhp->lock, flag);
921 spin_unlock_irqrestore(&qhp->lock, flag);
922 ring_kernel_rq_db(qhp, idx);
927 int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind)
932 static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
942 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
947 status = CQE_STATUS(err_cqe);
948 opcode = CQE_OPCODE(err_cqe);
949 rqtype = RQ_TYPE(err_cqe);
950 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
951 (opcode == FW_RI_SEND_WITH_SE_INV);
952 tagged = (opcode == FW_RI_RDMA_WRITE) ||
953 (rqtype && (opcode == FW_RI_READ_RESP));
958 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
959 *ecode = RDMAP_CANT_INV_STAG;
961 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
962 *ecode = RDMAP_INV_STAG;
966 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
967 if ((opcode == FW_RI_SEND_WITH_INV) ||
968 (opcode == FW_RI_SEND_WITH_SE_INV))
969 *ecode = RDMAP_CANT_INV_STAG;
971 *ecode = RDMAP_STAG_NOT_ASSOC;
974 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
975 *ecode = RDMAP_STAG_NOT_ASSOC;
978 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
979 *ecode = RDMAP_ACC_VIOL;
982 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
983 *ecode = RDMAP_TO_WRAP;
987 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
988 *ecode = DDPT_BASE_BOUNDS;
990 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
991 *ecode = RDMAP_BASE_BOUNDS;
994 case T4_ERR_INVALIDATE_SHARED_MR:
995 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
996 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
997 *ecode = RDMAP_CANT_INV_STAG;
1000 case T4_ERR_ECC_PSTAG:
1001 case T4_ERR_INTERNAL_ERR:
1002 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1005 case T4_ERR_OUT_OF_RQE:
1006 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1007 *ecode = DDPU_INV_MSN_NOBUF;
1009 case T4_ERR_PBL_ADDR_BOUND:
1010 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1011 *ecode = DDPT_BASE_BOUNDS;
1014 *layer_type = LAYER_MPA|DDP_LLP;
1015 *ecode = MPA_CRC_ERR;
1018 *layer_type = LAYER_MPA|DDP_LLP;
1019 *ecode = MPA_MARKER_ERR;
1021 case T4_ERR_PDU_LEN_ERR:
1022 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1023 *ecode = DDPU_MSG_TOOBIG;
1025 case T4_ERR_DDP_VERSION:
1027 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1028 *ecode = DDPT_INV_VERS;
1030 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1031 *ecode = DDPU_INV_VERS;
1034 case T4_ERR_RDMA_VERSION:
1035 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1036 *ecode = RDMAP_INV_VERS;
1039 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1040 *ecode = RDMAP_INV_OPCODE;
1042 case T4_ERR_DDP_QUEUE_NUM:
1043 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1044 *ecode = DDPU_INV_QN;
1047 case T4_ERR_MSN_GAP:
1048 case T4_ERR_MSN_RANGE:
1049 case T4_ERR_IRD_OVERFLOW:
1050 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1051 *ecode = DDPU_INV_MSN_RANGE;
1054 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1058 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1059 *ecode = DDPU_INV_MO;
1062 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1068 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1071 struct fw_ri_wr *wqe;
1072 struct sk_buff *skb;
1073 struct terminate_message *term;
1075 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1078 skb = alloc_skb(sizeof *wqe, gfp);
1081 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1083 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1084 memset(wqe, 0, sizeof *wqe);
1085 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1086 wqe->flowid_len16 = cpu_to_be32(
1087 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1088 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1090 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1091 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
1092 term = (struct terminate_message *)wqe->u.terminate.termmsg;
1093 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1094 term->layer_etype = qhp->attr.layer_etype;
1095 term->ecode = qhp->attr.ecode;
1097 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
1098 c4iw_ofld_send(&qhp->rhp->rdev, skb);
1102 * Assumes qhp lock is held.
1104 static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1105 struct c4iw_cq *schp)
1108 int rq_flushed, sq_flushed;
1111 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
1113 /* locking hierarchy: cq lock first, then qp lock. */
1114 spin_lock_irqsave(&rchp->lock, flag);
1115 spin_lock(&qhp->lock);
1117 if (qhp->wq.flushed) {
1118 spin_unlock(&qhp->lock);
1119 spin_unlock_irqrestore(&rchp->lock, flag);
1122 qhp->wq.flushed = 1;
1124 c4iw_flush_hw_cq(rchp);
1125 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1126 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1127 spin_unlock(&qhp->lock);
1128 spin_unlock_irqrestore(&rchp->lock, flag);
1130 /* locking hierarchy: cq lock first, then qp lock. */
1131 spin_lock_irqsave(&schp->lock, flag);
1132 spin_lock(&qhp->lock);
1134 c4iw_flush_hw_cq(schp);
1135 sq_flushed = c4iw_flush_sq(qhp);
1136 spin_unlock(&qhp->lock);
1137 spin_unlock_irqrestore(&schp->lock, flag);
1140 if (t4_clear_cq_armed(&rchp->cq) &&
1141 (rq_flushed || sq_flushed)) {
1142 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1143 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1144 rchp->ibcq.cq_context);
1145 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1148 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
1149 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1150 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1151 rchp->ibcq.cq_context);
1152 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1154 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
1155 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1156 (*schp->ibcq.comp_handler)(&schp->ibcq,
1157 schp->ibcq.cq_context);
1158 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1163 static void flush_qp(struct c4iw_qp *qhp)
1165 struct c4iw_cq *rchp, *schp;
1168 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1169 schp = to_c4iw_cq(qhp->ibqp.send_cq);
1171 t4_set_wq_in_error(&qhp->wq);
1172 if (qhp->ibqp.uobject) {
1173 t4_set_cq_in_error(&rchp->cq);
1174 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1175 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
1176 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1178 t4_set_cq_in_error(&schp->cq);
1179 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1180 (*schp->ibcq.comp_handler)(&schp->ibcq,
1181 schp->ibcq.cq_context);
1182 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1186 __flush_qp(qhp, rchp, schp);
1189 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1192 struct fw_ri_wr *wqe;
1194 struct sk_buff *skb;
1196 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
1199 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1202 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1204 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1205 memset(wqe, 0, sizeof *wqe);
1206 wqe->op_compl = cpu_to_be32(
1207 FW_WR_OP_V(FW_RI_INIT_WR) |
1209 wqe->flowid_len16 = cpu_to_be32(
1210 FW_WR_FLOWID_V(ep->hwtid) |
1211 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1212 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1214 wqe->u.fini.type = FW_RI_TYPE_FINI;
1215 ret = c4iw_ofld_send(&rhp->rdev, skb);
1219 ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
1220 qhp->wq.sq.qid, __func__);
1222 PDBG("%s ret %d\n", __func__, ret);
1226 static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1228 PDBG("%s p2p_type = %d\n", __func__, p2p_type);
1229 memset(&init->u, 0, sizeof init->u);
1231 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1232 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1233 init->u.write.stag_sink = cpu_to_be32(1);
1234 init->u.write.to_sink = cpu_to_be64(1);
1235 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
1236 init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write +
1237 sizeof(struct fw_ri_immd),
1240 case FW_RI_INIT_P2PTYPE_READ_REQ:
1241 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1242 init->u.read.stag_src = cpu_to_be32(1);
1243 init->u.read.to_src_lo = cpu_to_be32(1);
1244 init->u.read.stag_sink = cpu_to_be32(1);
1245 init->u.read.to_sink_lo = cpu_to_be32(1);
1246 init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16);
1251 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1253 struct fw_ri_wr *wqe;
1255 struct sk_buff *skb;
1257 PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
1258 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1260 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1265 ret = alloc_ird(rhp, qhp->attr.max_ird);
1267 qhp->attr.max_ird = 0;
1271 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1273 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1274 memset(wqe, 0, sizeof *wqe);
1275 wqe->op_compl = cpu_to_be32(
1276 FW_WR_OP_V(FW_RI_INIT_WR) |
1278 wqe->flowid_len16 = cpu_to_be32(
1279 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1280 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1282 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
1284 wqe->u.init.type = FW_RI_TYPE_INIT;
1285 wqe->u.init.mpareqbit_p2ptype =
1286 V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) |
1287 V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type);
1288 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1289 if (qhp->attr.mpa_attr.recv_marker_enabled)
1290 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1291 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1292 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1293 if (qhp->attr.mpa_attr.crc_enabled)
1294 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1296 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1297 FW_RI_QP_RDMA_WRITE_ENABLE |
1298 FW_RI_QP_BIND_ENABLE;
1299 if (!qhp->ibqp.uobject)
1300 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1301 FW_RI_QP_STAG0_ENABLE;
1302 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1303 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1304 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1305 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
1306 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1307 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1308 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1309 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1310 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1311 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1312 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
1313 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1314 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1315 rhp->rdev.lldi.vr->rq.start);
1316 if (qhp->attr.mpa_attr.initiator)
1317 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1319 ret = c4iw_ofld_send(&rhp->rdev, skb);
1323 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1324 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1328 free_ird(rhp, qhp->attr.max_ird);
1330 PDBG("%s ret %d\n", __func__, ret);
1334 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1335 enum c4iw_qp_attr_mask mask,
1336 struct c4iw_qp_attributes *attrs,
1340 struct c4iw_qp_attributes newattr = qhp->attr;
1345 struct c4iw_ep *ep = NULL;
1347 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__,
1348 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1349 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
1351 mutex_lock(&qhp->mutex);
1353 /* Process attr changes if in IDLE */
1354 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1355 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1359 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1360 newattr.enable_rdma_read = attrs->enable_rdma_read;
1361 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1362 newattr.enable_rdma_write = attrs->enable_rdma_write;
1363 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1364 newattr.enable_bind = attrs->enable_bind;
1365 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1366 if (attrs->max_ord > c4iw_max_read_depth) {
1370 newattr.max_ord = attrs->max_ord;
1372 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1373 if (attrs->max_ird > cur_max_read_depth(rhp)) {
1377 newattr.max_ird = attrs->max_ird;
1379 qhp->attr = newattr;
1382 if (mask & C4IW_QP_ATTR_SQ_DB) {
1383 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
1386 if (mask & C4IW_QP_ATTR_RQ_DB) {
1387 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
1391 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1393 if (qhp->attr.state == attrs->next_state)
1396 switch (qhp->attr.state) {
1397 case C4IW_QP_STATE_IDLE:
1398 switch (attrs->next_state) {
1399 case C4IW_QP_STATE_RTS:
1400 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1404 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1408 qhp->attr.mpa_attr = attrs->mpa_attr;
1409 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1410 qhp->ep = qhp->attr.llp_stream_handle;
1411 set_state(qhp, C4IW_QP_STATE_RTS);
1414 * Ref the endpoint here and deref when we
1415 * disassociate the endpoint from the QP. This
1416 * happens in CLOSING->IDLE transition or *->ERROR
1419 c4iw_get_ep(&qhp->ep->com);
1420 ret = rdma_init(rhp, qhp);
1424 case C4IW_QP_STATE_ERROR:
1425 set_state(qhp, C4IW_QP_STATE_ERROR);
1433 case C4IW_QP_STATE_RTS:
1434 switch (attrs->next_state) {
1435 case C4IW_QP_STATE_CLOSING:
1436 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1437 t4_set_wq_in_error(&qhp->wq);
1438 set_state(qhp, C4IW_QP_STATE_CLOSING);
1443 c4iw_get_ep(&qhp->ep->com);
1445 ret = rdma_fini(rhp, qhp, ep);
1449 case C4IW_QP_STATE_TERMINATE:
1450 t4_set_wq_in_error(&qhp->wq);
1451 set_state(qhp, C4IW_QP_STATE_TERMINATE);
1452 qhp->attr.layer_etype = attrs->layer_etype;
1453 qhp->attr.ecode = attrs->ecode;
1456 c4iw_get_ep(&qhp->ep->com);
1460 terminate = qhp->attr.send_term;
1461 ret = rdma_fini(rhp, qhp, ep);
1466 case C4IW_QP_STATE_ERROR:
1467 t4_set_wq_in_error(&qhp->wq);
1468 set_state(qhp, C4IW_QP_STATE_ERROR);
1473 c4iw_get_ep(&qhp->ep->com);
1482 case C4IW_QP_STATE_CLOSING:
1487 switch (attrs->next_state) {
1488 case C4IW_QP_STATE_IDLE:
1490 set_state(qhp, C4IW_QP_STATE_IDLE);
1491 qhp->attr.llp_stream_handle = NULL;
1492 c4iw_put_ep(&qhp->ep->com);
1494 wake_up(&qhp->wait);
1496 case C4IW_QP_STATE_ERROR:
1503 case C4IW_QP_STATE_ERROR:
1504 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
1508 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
1512 set_state(qhp, C4IW_QP_STATE_IDLE);
1514 case C4IW_QP_STATE_TERMINATE:
1522 printk(KERN_ERR "%s in a bad state %d\n",
1523 __func__, qhp->attr.state);
1530 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1533 /* disassociate the LLP connection */
1534 qhp->attr.llp_stream_handle = NULL;
1538 set_state(qhp, C4IW_QP_STATE_ERROR);
1543 wake_up(&qhp->wait);
1545 mutex_unlock(&qhp->mutex);
1548 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1551 * If disconnect is 1, then we need to initiate a disconnect
1552 * on the EP. This can be a normal close (RTS->CLOSING) or
1553 * an abnormal close (RTS/CLOSING->ERROR).
1556 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1558 c4iw_put_ep(&ep->com);
1562 * If free is 1, then we've disassociated the EP from the QP
1563 * and we need to dereference the EP.
1566 c4iw_put_ep(&ep->com);
1567 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1571 int c4iw_destroy_qp(struct ib_qp *ib_qp)
1573 struct c4iw_dev *rhp;
1574 struct c4iw_qp *qhp;
1575 struct c4iw_qp_attributes attrs;
1576 struct c4iw_ucontext *ucontext;
1578 qhp = to_c4iw_qp(ib_qp);
1581 attrs.next_state = C4IW_QP_STATE_ERROR;
1582 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
1583 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1585 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1586 wait_event(qhp->wait, !qhp->ep);
1588 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1589 atomic_dec(&qhp->refcnt);
1590 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
1592 spin_lock_irq(&rhp->lock);
1593 if (!list_empty(&qhp->db_fc_entry))
1594 list_del_init(&qhp->db_fc_entry);
1595 spin_unlock_irq(&rhp->lock);
1596 free_ird(rhp, qhp->attr.max_ird);
1598 ucontext = ib_qp->uobject ?
1599 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
1600 destroy_qp(&rhp->rdev, &qhp->wq,
1601 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1603 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
1608 struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1609 struct ib_udata *udata)
1611 struct c4iw_dev *rhp;
1612 struct c4iw_qp *qhp;
1613 struct c4iw_pd *php;
1614 struct c4iw_cq *schp;
1615 struct c4iw_cq *rchp;
1616 struct c4iw_create_qp_resp uresp;
1617 unsigned int sqsize, rqsize;
1618 struct c4iw_ucontext *ucontext;
1620 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
1622 PDBG("%s ib_pd %p\n", __func__, pd);
1624 if (attrs->qp_type != IB_QPT_RC)
1625 return ERR_PTR(-EINVAL);
1627 php = to_c4iw_pd(pd);
1629 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
1630 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
1632 return ERR_PTR(-EINVAL);
1634 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
1635 return ERR_PTR(-EINVAL);
1637 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
1638 return ERR_PTR(-E2BIG);
1639 rqsize = attrs->cap.max_recv_wr + 1;
1643 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
1644 return ERR_PTR(-E2BIG);
1645 sqsize = attrs->cap.max_send_wr + 1;
1649 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
1651 qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
1653 return ERR_PTR(-ENOMEM);
1654 qhp->wq.sq.size = sqsize;
1655 qhp->wq.sq.memsize =
1656 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1657 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
1658 qhp->wq.sq.flush_cidx = -1;
1659 qhp->wq.rq.size = rqsize;
1660 qhp->wq.rq.memsize =
1661 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
1662 sizeof(*qhp->wq.rq.queue);
1665 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
1666 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
1669 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
1670 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1674 attrs->cap.max_recv_wr = rqsize - 1;
1675 attrs->cap.max_send_wr = sqsize - 1;
1676 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
1679 qhp->attr.pd = php->pdid;
1680 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
1681 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
1682 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
1683 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
1684 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
1685 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
1686 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
1687 qhp->attr.state = C4IW_QP_STATE_IDLE;
1688 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
1689 qhp->attr.enable_rdma_read = 1;
1690 qhp->attr.enable_rdma_write = 1;
1691 qhp->attr.enable_bind = 1;
1692 qhp->attr.max_ord = 0;
1693 qhp->attr.max_ird = 0;
1694 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1695 spin_lock_init(&qhp->lock);
1696 mutex_init(&qhp->mutex);
1697 init_waitqueue_head(&qhp->wait);
1698 atomic_set(&qhp->refcnt, 1);
1700 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1705 mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
1710 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
1715 mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
1720 mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
1725 if (t4_sq_onchip(&qhp->wq.sq)) {
1726 mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
1731 uresp.flags = C4IW_QPF_ONCHIP;
1734 uresp.qid_mask = rhp->rdev.qpmask;
1735 uresp.sqid = qhp->wq.sq.qid;
1736 uresp.sq_size = qhp->wq.sq.size;
1737 uresp.sq_memsize = qhp->wq.sq.memsize;
1738 uresp.rqid = qhp->wq.rq.qid;
1739 uresp.rq_size = qhp->wq.rq.size;
1740 uresp.rq_memsize = qhp->wq.rq.memsize;
1741 spin_lock(&ucontext->mmap_lock);
1743 uresp.ma_sync_key = ucontext->key;
1744 ucontext->key += PAGE_SIZE;
1746 uresp.ma_sync_key = 0;
1748 uresp.sq_key = ucontext->key;
1749 ucontext->key += PAGE_SIZE;
1750 uresp.rq_key = ucontext->key;
1751 ucontext->key += PAGE_SIZE;
1752 uresp.sq_db_gts_key = ucontext->key;
1753 ucontext->key += PAGE_SIZE;
1754 uresp.rq_db_gts_key = ucontext->key;
1755 ucontext->key += PAGE_SIZE;
1756 spin_unlock(&ucontext->mmap_lock);
1757 ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
1760 mm1->key = uresp.sq_key;
1761 mm1->addr = qhp->wq.sq.phys_addr;
1762 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
1763 insert_mmap(ucontext, mm1);
1764 mm2->key = uresp.rq_key;
1765 mm2->addr = virt_to_phys(qhp->wq.rq.queue);
1766 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
1767 insert_mmap(ucontext, mm2);
1768 mm3->key = uresp.sq_db_gts_key;
1769 mm3->addr = (__force unsigned long) qhp->wq.sq.udb;
1770 mm3->len = PAGE_SIZE;
1771 insert_mmap(ucontext, mm3);
1772 mm4->key = uresp.rq_db_gts_key;
1773 mm4->addr = (__force unsigned long) qhp->wq.rq.udb;
1774 mm4->len = PAGE_SIZE;
1775 insert_mmap(ucontext, mm4);
1777 mm5->key = uresp.ma_sync_key;
1778 mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
1779 + A_PCIE_MA_SYNC) & PAGE_MASK;
1780 mm5->len = PAGE_SIZE;
1781 insert_mmap(ucontext, mm5);
1784 qhp->ibqp.qp_num = qhp->wq.sq.qid;
1785 init_timer(&(qhp->timer));
1786 INIT_LIST_HEAD(&qhp->db_fc_entry);
1787 PDBG("%s sq id %u size %u memsize %zu num_entries %u "
1788 "rq id %u size %u memsize %zu num_entries %u\n", __func__,
1789 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
1790 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
1791 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
1804 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
1806 destroy_qp(&rhp->rdev, &qhp->wq,
1807 ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
1810 return ERR_PTR(ret);
1813 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1814 int attr_mask, struct ib_udata *udata)
1816 struct c4iw_dev *rhp;
1817 struct c4iw_qp *qhp;
1818 enum c4iw_qp_attr_mask mask = 0;
1819 struct c4iw_qp_attributes attrs;
1821 PDBG("%s ib_qp %p\n", __func__, ibqp);
1823 /* iwarp does not support the RTR state */
1824 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
1825 attr_mask &= ~IB_QP_STATE;
1827 /* Make sure we still have something left to do */
1831 memset(&attrs, 0, sizeof attrs);
1832 qhp = to_c4iw_qp(ibqp);
1835 attrs.next_state = c4iw_convert_state(attr->qp_state);
1836 attrs.enable_rdma_read = (attr->qp_access_flags &
1837 IB_ACCESS_REMOTE_READ) ? 1 : 0;
1838 attrs.enable_rdma_write = (attr->qp_access_flags &
1839 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
1840 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
1843 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
1844 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
1845 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
1846 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
1847 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
1850 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1851 * ringing the queue db when we're in DB_FULL mode.
1852 * Only allow this on T4 devices.
1854 attrs.sq_db_inc = attr->sq_psn;
1855 attrs.rq_db_inc = attr->rq_psn;
1856 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
1857 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
1858 if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
1859 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
1862 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
1865 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
1867 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
1868 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
1871 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1872 int attr_mask, struct ib_qp_init_attr *init_attr)
1874 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
1876 memset(attr, 0, sizeof *attr);
1877 memset(init_attr, 0, sizeof *init_attr);
1878 attr->qp_state = to_ib_qp_state(qhp->attr.state);
1879 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
1880 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
1881 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
1882 init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
1883 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
1884 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;