2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_user_verbs.h>
38 #include <linux/module.h>
39 #include <linux/utsname.h>
40 #include <linux/rculist.h>
42 #include <linux/random.h>
45 #include "qib_common.h"
47 static unsigned int ib_qib_qp_table_size = 256;
48 module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO);
49 MODULE_PARM_DESC(qp_table_size, "QP table size");
51 unsigned int ib_qib_lkey_table_size = 16;
52 module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint,
54 MODULE_PARM_DESC(lkey_table_size,
55 "LKEY table size in bits (2^n, 1 <= n <= 23)");
57 static unsigned int ib_qib_max_pds = 0xFFFF;
58 module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO);
59 MODULE_PARM_DESC(max_pds,
60 "Maximum number of protection domains to support");
62 static unsigned int ib_qib_max_ahs = 0xFFFF;
63 module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO);
64 MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
66 unsigned int ib_qib_max_cqes = 0x2FFFF;
67 module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO);
68 MODULE_PARM_DESC(max_cqes,
69 "Maximum number of completion queue entries to support");
71 unsigned int ib_qib_max_cqs = 0x1FFFF;
72 module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO);
73 MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
75 unsigned int ib_qib_max_qp_wrs = 0x3FFF;
76 module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO);
77 MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
79 unsigned int ib_qib_max_qps = 16384;
80 module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO);
81 MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
83 unsigned int ib_qib_max_sges = 0x60;
84 module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO);
85 MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
87 unsigned int ib_qib_max_mcast_grps = 16384;
88 module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO);
89 MODULE_PARM_DESC(max_mcast_grps,
90 "Maximum number of multicast groups to support");
92 unsigned int ib_qib_max_mcast_qp_attached = 16;
93 module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached,
95 MODULE_PARM_DESC(max_mcast_qp_attached,
96 "Maximum number of attached QPs to support");
98 unsigned int ib_qib_max_srqs = 1024;
99 module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO);
100 MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
102 unsigned int ib_qib_max_srq_sges = 128;
103 module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO);
104 MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
106 unsigned int ib_qib_max_srq_wrs = 0x1FFFF;
107 module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO);
108 MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
110 static unsigned int ib_qib_disable_sma;
111 module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO);
112 MODULE_PARM_DESC(disable_sma, "Disable the SMA");
115 * Note that it is OK to post send work requests in the SQE and ERR
116 * states; qib_do_send() will process them and generate error
117 * completions as per IB 1.2 C10-96.
119 const int ib_qib_state_ops[IB_QPS_ERR + 1] = {
121 [IB_QPS_INIT] = QIB_POST_RECV_OK,
122 [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK,
123 [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
124 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK |
125 QIB_PROCESS_NEXT_SEND_OK,
126 [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
127 QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK,
128 [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK |
129 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
130 [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV |
131 QIB_POST_SEND_OK | QIB_FLUSH_SEND,
134 struct qib_ucontext {
135 struct ib_ucontext ibucontext;
138 static inline struct qib_ucontext *to_iucontext(struct ib_ucontext
141 return container_of(ibucontext, struct qib_ucontext, ibucontext);
145 * Translate ib_wr_opcode into ib_wc_opcode.
147 const enum ib_wc_opcode ib_qib_wc_opcode[] = {
148 [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
149 [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
150 [IB_WR_SEND] = IB_WC_SEND,
151 [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
152 [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
153 [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
154 [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
160 __be64 ib_qib_sys_image_guid;
163 * qib_copy_sge - copy data to SGE memory
165 * @data: the data to copy
166 * @length: the length of the data
168 void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
170 struct qib_sge *sge = &ss->sge;
173 u32 len = sge->length;
177 if (len > sge->sge_length)
178 len = sge->sge_length;
180 memcpy(sge->vaddr, data, len);
183 sge->sge_length -= len;
184 if (sge->sge_length == 0) {
188 *sge = *ss->sg_list++;
189 } else if (sge->length == 0 && sge->mr->lkey) {
190 if (++sge->n >= QIB_SEGSZ) {
191 if (++sge->m >= sge->mr->mapsz)
196 sge->mr->map[sge->m]->segs[sge->n].vaddr;
198 sge->mr->map[sge->m]->segs[sge->n].length;
206 * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func
208 * @length: the number of bytes to skip
210 void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
212 struct qib_sge *sge = &ss->sge;
215 u32 len = sge->length;
219 if (len > sge->sge_length)
220 len = sge->sge_length;
224 sge->sge_length -= len;
225 if (sge->sge_length == 0) {
229 *sge = *ss->sg_list++;
230 } else if (sge->length == 0 && sge->mr->lkey) {
231 if (++sge->n >= QIB_SEGSZ) {
232 if (++sge->m >= sge->mr->mapsz)
237 sge->mr->map[sge->m]->segs[sge->n].vaddr;
239 sge->mr->map[sge->m]->segs[sge->n].length;
246 * Count the number of DMA descriptors needed to send length bytes of data.
247 * Don't modify the qib_sge_state to get the count.
248 * Return zero if any of the segments is not aligned.
250 static u32 qib_count_sge(struct qib_sge_state *ss, u32 length)
252 struct qib_sge *sg_list = ss->sg_list;
253 struct qib_sge sge = ss->sge;
254 u8 num_sge = ss->num_sge;
255 u32 ndesc = 1; /* count the header */
258 u32 len = sge.length;
262 if (len > sge.sge_length)
263 len = sge.sge_length;
265 if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
266 (len != length && (len & (sizeof(u32) - 1)))) {
273 sge.sge_length -= len;
274 if (sge.sge_length == 0) {
277 } else if (sge.length == 0 && sge.mr->lkey) {
278 if (++sge.n >= QIB_SEGSZ) {
279 if (++sge.m >= sge.mr->mapsz)
284 sge.mr->map[sge.m]->segs[sge.n].vaddr;
286 sge.mr->map[sge.m]->segs[sge.n].length;
294 * Copy from the SGEs to the data buffer.
296 static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
298 struct qib_sge *sge = &ss->sge;
301 u32 len = sge->length;
305 if (len > sge->sge_length)
306 len = sge->sge_length;
308 memcpy(data, sge->vaddr, len);
311 sge->sge_length -= len;
312 if (sge->sge_length == 0) {
314 *sge = *ss->sg_list++;
315 } else if (sge->length == 0 && sge->mr->lkey) {
316 if (++sge->n >= QIB_SEGSZ) {
317 if (++sge->m >= sge->mr->mapsz)
322 sge->mr->map[sge->m]->segs[sge->n].vaddr;
324 sge->mr->map[sge->m]->segs[sge->n].length;
332 * qib_post_one_send - post one RC, UC, or UD send work request
333 * @qp: the QP to post on
334 * @wr: the work request to send
336 static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
339 struct qib_swqe *wqe;
346 struct qib_lkey_table *rkt;
349 spin_lock_irqsave(&qp->s_lock, flags);
351 /* Check that state is OK to post send. */
352 if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK)))
355 /* IB spec says that num_sge == 0 is OK. */
356 if (wr->num_sge > qp->s_max_sge)
360 * Don't allow RDMA reads or atomic operations on UC or
361 * undefined operations.
362 * Make sure buffer is large enough to hold the result for atomics.
364 if (wr->opcode == IB_WR_FAST_REG_MR) {
365 if (qib_fast_reg_mr(qp, wr))
367 } else if (qp->ibqp.qp_type == IB_QPT_UC) {
368 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
370 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
371 /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */
372 if (wr->opcode != IB_WR_SEND &&
373 wr->opcode != IB_WR_SEND_WITH_IMM)
375 /* Check UD destination address PD */
376 if (qp->ibqp.pd != wr->wr.ud.ah->pd)
378 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
380 else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
382 wr->sg_list[0].length < sizeof(u64) ||
383 wr->sg_list[0].addr & (sizeof(u64) - 1)))
385 else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
388 next = qp->s_head + 1;
389 if (next >= qp->s_size)
391 if (next == qp->s_last) {
396 rkt = &to_idev(qp->ibqp.device)->lk_table;
397 pd = to_ipd(qp->ibqp.pd);
398 wqe = get_swqe_ptr(qp, qp->s_head);
403 acc = wr->opcode >= IB_WR_RDMA_READ ?
404 IB_ACCESS_LOCAL_WRITE : 0;
405 for (i = 0; i < wr->num_sge; i++) {
406 u32 length = wr->sg_list[i].length;
411 ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j],
412 &wr->sg_list[i], acc);
414 goto bail_inval_free;
415 wqe->length += length;
420 if (qp->ibqp.qp_type == IB_QPT_UC ||
421 qp->ibqp.qp_type == IB_QPT_RC) {
422 if (wqe->length > 0x80000000U)
423 goto bail_inval_free;
424 } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport +
425 qp->port_num - 1)->ibmtu)
426 goto bail_inval_free;
428 atomic_inc(&to_iah(wr->wr.ud.ah)->refcount);
429 wqe->ssn = qp->s_ssn++;
437 struct qib_sge *sge = &wqe->sg_list[--j];
444 if (!ret && !wr->next &&
446 dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
447 qib_schedule_send(qp);
450 spin_unlock_irqrestore(&qp->s_lock, flags);
455 * qib_post_send - post a send on a QP
456 * @ibqp: the QP to post the send on
457 * @wr: the list of work requests to post
458 * @bad_wr: the first bad WR is put here
460 * This may be called from interrupt context.
462 static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
463 struct ib_send_wr **bad_wr)
465 struct qib_qp *qp = to_iqp(ibqp);
469 for (; wr; wr = wr->next) {
470 err = qib_post_one_send(qp, wr, &scheduled);
477 /* Try to do the send work in the caller's context. */
479 qib_do_send(&qp->s_work);
486 * qib_post_receive - post a receive on a QP
487 * @ibqp: the QP to post the receive on
488 * @wr: the WR to post
489 * @bad_wr: the first bad WR is put here
491 * This may be called from interrupt context.
493 static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
494 struct ib_recv_wr **bad_wr)
496 struct qib_qp *qp = to_iqp(ibqp);
497 struct qib_rwq *wq = qp->r_rq.wq;
501 /* Check that state is OK to post receive. */
502 if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) {
508 for (; wr; wr = wr->next) {
509 struct qib_rwqe *wqe;
513 if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
519 spin_lock_irqsave(&qp->r_rq.lock, flags);
521 if (next >= qp->r_rq.size)
523 if (next == wq->tail) {
524 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
530 wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
531 wqe->wr_id = wr->wr_id;
532 wqe->num_sge = wr->num_sge;
533 for (i = 0; i < wr->num_sge; i++)
534 wqe->sg_list[i] = wr->sg_list[i];
535 /* Make sure queue entry is written before the head index. */
538 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
547 * qib_qp_rcv - processing an incoming packet on a QP
548 * @rcd: the context pointer
549 * @hdr: the packet header
550 * @has_grh: true if the packet has a GRH
551 * @data: the packet data
552 * @tlen: the packet length
553 * @qp: the QP the packet came on
555 * This is called from qib_ib_rcv() to process an incoming packet
557 * Called at interrupt level.
559 static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
560 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
562 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
564 spin_lock(&qp->r_lock);
566 /* Check for valid receive state. */
567 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
572 switch (qp->ibqp.qp_type) {
575 if (ib_qib_disable_sma)
579 qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp);
583 qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp);
587 qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp);
595 spin_unlock(&qp->r_lock);
599 * qib_ib_rcv - process an incoming packet
600 * @rcd: the context pointer
601 * @rhdr: the header of the packet
602 * @data: the packet payload
603 * @tlen: the packet length
605 * This is called from qib_kreceive() to process an incoming packet at
606 * interrupt level. Tlen is the length of the header + data + CRC in bytes.
608 void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
610 struct qib_pportdata *ppd = rcd->ppd;
611 struct qib_ibport *ibp = &ppd->ibport_data;
612 struct qib_ib_header *hdr = rhdr;
613 struct qib_other_headers *ohdr;
620 /* 24 == LRH+BTH+CRC */
621 if (unlikely(tlen < 24))
624 /* Check for a valid destination LID (see ch. 7.11.1). */
625 lid = be16_to_cpu(hdr->lrh[1]);
626 if (lid < QIB_MULTICAST_LID_BASE) {
627 lid &= ~((1 << ppd->lmc) - 1);
628 if (unlikely(lid != ppd->lid))
633 lnh = be16_to_cpu(hdr->lrh[0]) & 3;
634 if (lnh == QIB_LRH_BTH)
636 else if (lnh == QIB_LRH_GRH) {
639 ohdr = &hdr->u.l.oth;
640 if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
642 vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
643 if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
648 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
649 ibp->opstats[opcode & 0x7f].n_bytes += tlen;
650 ibp->opstats[opcode & 0x7f].n_packets++;
652 /* Get the destination QP number. */
653 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
654 if (qp_num == QIB_MULTICAST_QPN) {
655 struct qib_mcast *mcast;
656 struct qib_mcast_qp *p;
658 if (lnh != QIB_LRH_GRH)
660 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid);
663 ibp->n_multicast_rcv++;
664 list_for_each_entry_rcu(p, &mcast->qp_list, list)
665 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
667 * Notify qib_multicast_detach() if it is waiting for us
670 if (atomic_dec_return(&mcast->refcount) <= 1)
671 wake_up(&mcast->wait);
673 if (rcd->lookaside_qp) {
674 if (rcd->lookaside_qpn != qp_num) {
675 if (atomic_dec_and_test(
676 &rcd->lookaside_qp->refcount))
678 &rcd->lookaside_qp->wait);
679 rcd->lookaside_qp = NULL;
682 if (!rcd->lookaside_qp) {
683 qp = qib_lookup_qpn(ibp, qp_num);
686 rcd->lookaside_qp = qp;
687 rcd->lookaside_qpn = qp_num;
689 qp = rcd->lookaside_qp;
690 ibp->n_unicast_rcv++;
691 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp);
700 * This is called from a timer to check for QPs
701 * which need kernel memory in order to send a packet.
703 static void mem_timer(unsigned long data)
705 struct qib_ibdev *dev = (struct qib_ibdev *) data;
706 struct list_head *list = &dev->memwait;
707 struct qib_qp *qp = NULL;
710 spin_lock_irqsave(&dev->pending_lock, flags);
711 if (!list_empty(list)) {
712 qp = list_entry(list->next, struct qib_qp, iowait);
713 list_del_init(&qp->iowait);
714 atomic_inc(&qp->refcount);
715 if (!list_empty(list))
716 mod_timer(&dev->mem_timer, jiffies + 1);
718 spin_unlock_irqrestore(&dev->pending_lock, flags);
721 spin_lock_irqsave(&qp->s_lock, flags);
722 if (qp->s_flags & QIB_S_WAIT_KMEM) {
723 qp->s_flags &= ~QIB_S_WAIT_KMEM;
724 qib_schedule_send(qp);
726 spin_unlock_irqrestore(&qp->s_lock, flags);
727 if (atomic_dec_and_test(&qp->refcount))
732 static void update_sge(struct qib_sge_state *ss, u32 length)
734 struct qib_sge *sge = &ss->sge;
736 sge->vaddr += length;
737 sge->length -= length;
738 sge->sge_length -= length;
739 if (sge->sge_length == 0) {
741 *sge = *ss->sg_list++;
742 } else if (sge->length == 0 && sge->mr->lkey) {
743 if (++sge->n >= QIB_SEGSZ) {
744 if (++sge->m >= sge->mr->mapsz)
748 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
749 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
753 #ifdef __LITTLE_ENDIAN
754 static inline u32 get_upper_bits(u32 data, u32 shift)
756 return data >> shift;
759 static inline u32 set_upper_bits(u32 data, u32 shift)
761 return data << shift;
764 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
766 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
767 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
771 static inline u32 get_upper_bits(u32 data, u32 shift)
773 return data << shift;
776 static inline u32 set_upper_bits(u32 data, u32 shift)
778 return data >> shift;
781 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
783 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
784 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
789 static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
790 u32 length, unsigned flush_wc)
797 u32 len = ss->sge.length;
802 if (len > ss->sge.sge_length)
803 len = ss->sge.sge_length;
805 /* If the source address is not aligned, try to align it. */
806 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
808 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
810 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
813 y = sizeof(u32) - off;
816 if (len + extra >= sizeof(u32)) {
817 data |= set_upper_bits(v, extra *
819 len = sizeof(u32) - extra;
824 __raw_writel(data, piobuf);
829 /* Clear unused upper bytes */
830 data |= clear_upper_bytes(v, len, extra);
838 /* Source address is aligned. */
839 u32 *addr = (u32 *) ss->sge.vaddr;
840 int shift = extra * BITS_PER_BYTE;
841 int ushift = 32 - shift;
844 while (l >= sizeof(u32)) {
847 data |= set_upper_bits(v, shift);
848 __raw_writel(data, piobuf);
849 data = get_upper_bits(v, ushift);
855 * We still have 'extra' number of bytes leftover.
860 if (l + extra >= sizeof(u32)) {
861 data |= set_upper_bits(v, shift);
862 len -= l + extra - sizeof(u32);
867 __raw_writel(data, piobuf);
872 /* Clear unused upper bytes */
873 data |= clear_upper_bytes(v, l, extra);
880 } else if (len == length) {
884 } else if (len == length) {
888 * Need to round up for the last dword in the
892 qib_pio_copy(piobuf, ss->sge.vaddr, w - 1);
894 last = ((u32 *) ss->sge.vaddr)[w - 1];
899 qib_pio_copy(piobuf, ss->sge.vaddr, w);
902 extra = len & (sizeof(u32) - 1);
904 u32 v = ((u32 *) ss->sge.vaddr)[w];
906 /* Clear unused upper bytes */
907 data = clear_upper_bytes(v, extra, 0);
913 /* Update address before sending packet. */
914 update_sge(ss, length);
916 /* must flush early everything before trigger word */
918 __raw_writel(last, piobuf);
919 /* be sure trigger word is written */
922 __raw_writel(last, piobuf);
925 static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
928 struct qib_verbs_txreq *tx;
931 spin_lock_irqsave(&qp->s_lock, flags);
932 spin_lock(&dev->pending_lock);
934 if (!list_empty(&dev->txreq_free)) {
935 struct list_head *l = dev->txreq_free.next;
938 spin_unlock(&dev->pending_lock);
939 spin_unlock_irqrestore(&qp->s_lock, flags);
940 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
942 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
943 list_empty(&qp->iowait)) {
945 qp->s_flags |= QIB_S_WAIT_TX;
946 list_add_tail(&qp->iowait, &dev->txwait);
948 qp->s_flags &= ~QIB_S_BUSY;
949 spin_unlock(&dev->pending_lock);
950 spin_unlock_irqrestore(&qp->s_lock, flags);
951 tx = ERR_PTR(-EBUSY);
956 static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
959 struct qib_verbs_txreq *tx;
962 spin_lock_irqsave(&dev->pending_lock, flags);
963 /* assume the list non empty */
964 if (likely(!list_empty(&dev->txreq_free))) {
965 struct list_head *l = dev->txreq_free.next;
968 spin_unlock_irqrestore(&dev->pending_lock, flags);
969 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
971 /* call slow path to get the extra lock */
972 spin_unlock_irqrestore(&dev->pending_lock, flags);
973 tx = __get_txreq(dev, qp);
978 void qib_put_txreq(struct qib_verbs_txreq *tx)
980 struct qib_ibdev *dev;
985 dev = to_idev(qp->ibqp.device);
987 if (atomic_dec_and_test(&qp->refcount))
993 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
994 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF;
995 dma_unmap_single(&dd_from_dev(dev)->pcidev->dev,
996 tx->txreq.addr, tx->hdr_dwords << 2,
998 kfree(tx->align_buf);
1001 spin_lock_irqsave(&dev->pending_lock, flags);
1003 /* Put struct back on free list */
1004 list_add(&tx->txreq.list, &dev->txreq_free);
1006 if (!list_empty(&dev->txwait)) {
1007 /* Wake up first QP wanting a free struct */
1008 qp = list_entry(dev->txwait.next, struct qib_qp, iowait);
1009 list_del_init(&qp->iowait);
1010 atomic_inc(&qp->refcount);
1011 spin_unlock_irqrestore(&dev->pending_lock, flags);
1013 spin_lock_irqsave(&qp->s_lock, flags);
1014 if (qp->s_flags & QIB_S_WAIT_TX) {
1015 qp->s_flags &= ~QIB_S_WAIT_TX;
1016 qib_schedule_send(qp);
1018 spin_unlock_irqrestore(&qp->s_lock, flags);
1020 if (atomic_dec_and_test(&qp->refcount))
1023 spin_unlock_irqrestore(&dev->pending_lock, flags);
1027 * This is called when there are send DMA descriptors that might be
1030 * This is called with ppd->sdma_lock held.
1032 void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
1034 struct qib_qp *qp, *nqp;
1035 struct qib_qp *qps[20];
1036 struct qib_ibdev *dev;
1040 dev = &ppd->dd->verbs_dev;
1041 spin_lock(&dev->pending_lock);
1043 /* Search wait list for first QP wanting DMA descriptors. */
1044 list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) {
1045 if (qp->port_num != ppd->port)
1047 if (n == ARRAY_SIZE(qps))
1049 if (qp->s_tx->txreq.sg_count > avail)
1051 avail -= qp->s_tx->txreq.sg_count;
1052 list_del_init(&qp->iowait);
1053 atomic_inc(&qp->refcount);
1057 spin_unlock(&dev->pending_lock);
1059 for (i = 0; i < n; i++) {
1061 spin_lock(&qp->s_lock);
1062 if (qp->s_flags & QIB_S_WAIT_DMA_DESC) {
1063 qp->s_flags &= ~QIB_S_WAIT_DMA_DESC;
1064 qib_schedule_send(qp);
1066 spin_unlock(&qp->s_lock);
1067 if (atomic_dec_and_test(&qp->refcount))
1073 * This is called with ppd->sdma_lock held.
1075 static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
1077 struct qib_verbs_txreq *tx =
1078 container_of(cookie, struct qib_verbs_txreq, txreq);
1079 struct qib_qp *qp = tx->qp;
1081 spin_lock(&qp->s_lock);
1083 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
1084 else if (qp->ibqp.qp_type == IB_QPT_RC) {
1085 struct qib_ib_header *hdr;
1087 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF)
1088 hdr = &tx->align_buf->hdr;
1090 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1092 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr;
1094 qib_rc_send_complete(qp, hdr);
1096 if (atomic_dec_and_test(&qp->s_dma_busy)) {
1097 if (qp->state == IB_QPS_RESET)
1098 wake_up(&qp->wait_dma);
1099 else if (qp->s_flags & QIB_S_WAIT_DMA) {
1100 qp->s_flags &= ~QIB_S_WAIT_DMA;
1101 qib_schedule_send(qp);
1104 spin_unlock(&qp->s_lock);
1109 static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp)
1111 unsigned long flags;
1114 spin_lock_irqsave(&qp->s_lock, flags);
1115 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1116 spin_lock(&dev->pending_lock);
1117 if (list_empty(&qp->iowait)) {
1118 if (list_empty(&dev->memwait))
1119 mod_timer(&dev->mem_timer, jiffies + 1);
1120 qp->s_flags |= QIB_S_WAIT_KMEM;
1121 list_add_tail(&qp->iowait, &dev->memwait);
1123 spin_unlock(&dev->pending_lock);
1124 qp->s_flags &= ~QIB_S_BUSY;
1127 spin_unlock_irqrestore(&qp->s_lock, flags);
1132 static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
1133 u32 hdrwords, struct qib_sge_state *ss, u32 len,
1134 u32 plen, u32 dwords)
1136 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1137 struct qib_devdata *dd = dd_from_dev(dev);
1138 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1139 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1140 struct qib_verbs_txreq *tx;
1141 struct qib_pio_header *phdr;
1149 /* resend previously constructed packet */
1150 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx);
1154 tx = get_txreq(dev, qp);
1158 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1159 be16_to_cpu(hdr->lrh[0]) >> 12);
1161 atomic_inc(&qp->refcount);
1162 tx->wqe = qp->s_wqe;
1163 tx->mr = qp->s_rdma_mr;
1165 qp->s_rdma_mr = NULL;
1166 tx->txreq.callback = sdma_complete;
1167 if (dd->flags & QIB_HAS_SDMA_TIMEOUT)
1168 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST;
1170 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ;
1171 if (plen + 1 > dd->piosize2kmax_dwords)
1172 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF;
1176 * Don't try to DMA if it takes more descriptors than
1179 ndesc = qib_count_sge(ss, len);
1180 if (ndesc >= ppd->sdma_descq_cnt)
1185 phdr = &dev->pio_hdrs[tx->hdr_inx];
1186 phdr->pbc[0] = cpu_to_le32(plen);
1187 phdr->pbc[1] = cpu_to_le32(control);
1188 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1189 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC;
1190 tx->txreq.sg_count = ndesc;
1191 tx->txreq.addr = dev->pio_hdrs_phys +
1192 tx->hdr_inx * sizeof(struct qib_pio_header);
1193 tx->hdr_dwords = hdrwords + 2; /* add PBC length */
1194 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx);
1198 /* Allocate a buffer and copy the header and payload to it. */
1199 tx->hdr_dwords = plen + 1;
1200 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC);
1203 phdr->pbc[0] = cpu_to_le32(plen);
1204 phdr->pbc[1] = cpu_to_le32(control);
1205 memcpy(&phdr->hdr, hdr, hdrwords << 2);
1206 qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len);
1208 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr,
1209 tx->hdr_dwords << 2, DMA_TO_DEVICE);
1210 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr))
1212 tx->align_buf = phdr;
1213 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF;
1214 tx->txreq.sg_count = 1;
1215 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx);
1222 ret = wait_kmem(dev, qp);
1233 * If we are now in the error state, return zero to flush the
1234 * send work request.
1236 static int no_bufs_available(struct qib_qp *qp)
1238 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
1239 struct qib_devdata *dd;
1240 unsigned long flags;
1244 * Note that as soon as want_buffer() is called and
1245 * possibly before it returns, qib_ib_piobufavail()
1246 * could be called. Therefore, put QP on the I/O wait list before
1247 * enabling the PIO avail interrupt.
1249 spin_lock_irqsave(&qp->s_lock, flags);
1250 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
1251 spin_lock(&dev->pending_lock);
1252 if (list_empty(&qp->iowait)) {
1254 qp->s_flags |= QIB_S_WAIT_PIO;
1255 list_add_tail(&qp->iowait, &dev->piowait);
1256 dd = dd_from_dev(dev);
1257 dd->f_wantpiobuf_intr(dd, 1);
1259 spin_unlock(&dev->pending_lock);
1260 qp->s_flags &= ~QIB_S_BUSY;
1263 spin_unlock_irqrestore(&qp->s_lock, flags);
1267 static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
1268 u32 hdrwords, struct qib_sge_state *ss, u32 len,
1269 u32 plen, u32 dwords)
1271 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1272 struct qib_pportdata *ppd = dd->pport + qp->port_num - 1;
1273 u32 *hdr = (u32 *) ibhdr;
1274 u32 __iomem *piobuf_orig;
1275 u32 __iomem *piobuf;
1277 unsigned long flags;
1282 control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
1283 be16_to_cpu(ibhdr->lrh[0]) >> 12);
1284 pbc = ((u64) control << 32) | plen;
1285 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
1286 if (unlikely(piobuf == NULL))
1287 return no_bufs_available(qp);
1291 * We have to flush after the PBC for correctness on some cpus
1292 * or WC buffer can be written out of order.
1294 writeq(pbc, piobuf);
1295 piobuf_orig = piobuf;
1298 flush_wc = dd->flags & QIB_PIO_FLUSH_WC;
1301 * If there is just the header portion, must flush before
1302 * writing last word of header for correctness, and after
1303 * the last header word (trigger word).
1307 qib_pio_copy(piobuf, hdr, hdrwords - 1);
1309 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
1312 qib_pio_copy(piobuf, hdr, hdrwords);
1318 qib_pio_copy(piobuf, hdr, hdrwords);
1321 /* The common case is aligned and contained in one segment. */
1322 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
1323 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
1324 u32 *addr = (u32 *) ss->sge.vaddr;
1326 /* Update address before sending packet. */
1327 update_sge(ss, len);
1329 qib_pio_copy(piobuf, addr, dwords - 1);
1330 /* must flush early everything before trigger word */
1332 __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
1333 /* be sure trigger word is written */
1336 qib_pio_copy(piobuf, addr, dwords);
1339 copy_io(piobuf, ss, len, flush_wc);
1341 if (dd->flags & QIB_USE_SPCL_TRIG) {
1342 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
1344 __raw_writel(0xaebecede, piobuf_orig + spcl_off);
1346 qib_sendbuf_done(dd, pbufn);
1347 if (qp->s_rdma_mr) {
1348 qib_put_mr(qp->s_rdma_mr);
1349 qp->s_rdma_mr = NULL;
1352 spin_lock_irqsave(&qp->s_lock, flags);
1353 qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
1354 spin_unlock_irqrestore(&qp->s_lock, flags);
1355 } else if (qp->ibqp.qp_type == IB_QPT_RC) {
1356 spin_lock_irqsave(&qp->s_lock, flags);
1357 qib_rc_send_complete(qp, ibhdr);
1358 spin_unlock_irqrestore(&qp->s_lock, flags);
1364 * qib_verbs_send - send a packet
1365 * @qp: the QP to send on
1366 * @hdr: the packet header
1367 * @hdrwords: the number of 32-bit words in the header
1368 * @ss: the SGE to send
1369 * @len: the length of the packet in bytes
1371 * Return zero if packet is sent or queued OK.
1372 * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise.
1374 int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr,
1375 u32 hdrwords, struct qib_sge_state *ss, u32 len)
1377 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
1380 u32 dwords = (len + 3) >> 2;
1383 * Calculate the send buffer trigger address.
1384 * The +1 counts for the pbc control dword following the pbc length.
1386 plen = hdrwords + dwords + 1;
1389 * VL15 packets (IB_QPT_SMI) will always use PIO, so we
1390 * can defer SDMA restart until link goes ACTIVE without
1391 * worrying about just how we got there.
1393 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1394 !(dd->flags & QIB_HAS_SEND_DMA))
1395 ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len,
1398 ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len,
1404 int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords,
1405 u64 *rwords, u64 *spkts, u64 *rpkts,
1409 struct qib_devdata *dd = ppd->dd;
1411 if (!(dd->flags & QIB_PRESENT)) {
1412 /* no hardware, freeze, etc. */
1416 *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND);
1417 *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV);
1418 *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND);
1419 *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV);
1420 *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL);
1429 * qib_get_counters - get various chip counters
1430 * @dd: the qlogic_ib device
1431 * @cntrs: counters are placed here
1433 * Return the counters needed by recv_pma_get_portcounters().
1435 int qib_get_counters(struct qib_pportdata *ppd,
1436 struct qib_verbs_counters *cntrs)
1440 if (!(ppd->dd->flags & QIB_PRESENT)) {
1441 /* no hardware, freeze, etc. */
1445 cntrs->symbol_error_counter =
1446 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR);
1447 cntrs->link_error_recovery_counter =
1448 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV);
1450 * The link downed counter counts when the other side downs the
1451 * connection. We add in the number of times we downed the link
1452 * due to local link integrity errors to compensate.
1454 cntrs->link_downed_counter =
1455 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN);
1456 cntrs->port_rcv_errors =
1457 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) +
1458 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) +
1459 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) +
1460 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) +
1461 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) +
1462 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) +
1463 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) +
1464 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) +
1465 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT);
1466 cntrs->port_rcv_errors +=
1467 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR);
1468 cntrs->port_rcv_errors +=
1469 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR);
1470 cntrs->port_rcv_remphys_errors =
1471 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP);
1472 cntrs->port_xmit_discards =
1473 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL);
1474 cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd,
1475 QIBPORTCNTR_WORDSEND);
1476 cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd,
1477 QIBPORTCNTR_WORDRCV);
1478 cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd,
1479 QIBPORTCNTR_PKTSEND);
1480 cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd,
1481 QIBPORTCNTR_PKTRCV);
1482 cntrs->local_link_integrity_errors =
1483 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI);
1484 cntrs->excessive_buffer_overrun_errors =
1485 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL);
1486 cntrs->vl15_dropped =
1487 ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP);
1496 * qib_ib_piobufavail - callback when a PIO buffer is available
1497 * @dd: the device pointer
1499 * This is called from qib_intr() at interrupt level when a PIO buffer is
1500 * available after qib_verbs_send() returned an error that no buffers were
1501 * available. Disable the interrupt if there are no more QPs waiting.
1503 void qib_ib_piobufavail(struct qib_devdata *dd)
1505 struct qib_ibdev *dev = &dd->verbs_dev;
1506 struct list_head *list;
1507 struct qib_qp *qps[5];
1509 unsigned long flags;
1512 list = &dev->piowait;
1516 * Note: checking that the piowait list is empty and clearing
1517 * the buffer available interrupt needs to be atomic or we
1518 * could end up with QPs on the wait list with the interrupt
1521 spin_lock_irqsave(&dev->pending_lock, flags);
1522 while (!list_empty(list)) {
1523 if (n == ARRAY_SIZE(qps))
1525 qp = list_entry(list->next, struct qib_qp, iowait);
1526 list_del_init(&qp->iowait);
1527 atomic_inc(&qp->refcount);
1530 dd->f_wantpiobuf_intr(dd, 0);
1532 spin_unlock_irqrestore(&dev->pending_lock, flags);
1534 for (i = 0; i < n; i++) {
1537 spin_lock_irqsave(&qp->s_lock, flags);
1538 if (qp->s_flags & QIB_S_WAIT_PIO) {
1539 qp->s_flags &= ~QIB_S_WAIT_PIO;
1540 qib_schedule_send(qp);
1542 spin_unlock_irqrestore(&qp->s_lock, flags);
1544 /* Notify qib_destroy_qp() if it is waiting. */
1545 if (atomic_dec_and_test(&qp->refcount))
1550 static int qib_query_device(struct ib_device *ibdev,
1551 struct ib_device_attr *props)
1553 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1554 struct qib_ibdev *dev = to_idev(ibdev);
1556 memset(props, 0, sizeof(*props));
1558 props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
1559 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
1560 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
1561 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
1562 props->page_size_cap = PAGE_SIZE;
1564 QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3;
1565 props->vendor_part_id = dd->deviceid;
1566 props->hw_ver = dd->minrev;
1567 props->sys_image_guid = ib_qib_sys_image_guid;
1568 props->max_mr_size = ~0ULL;
1569 props->max_qp = ib_qib_max_qps;
1570 props->max_qp_wr = ib_qib_max_qp_wrs;
1571 props->max_sge = ib_qib_max_sges;
1572 props->max_cq = ib_qib_max_cqs;
1573 props->max_ah = ib_qib_max_ahs;
1574 props->max_cqe = ib_qib_max_cqes;
1575 props->max_mr = dev->lk_table.max;
1576 props->max_fmr = dev->lk_table.max;
1577 props->max_map_per_fmr = 32767;
1578 props->max_pd = ib_qib_max_pds;
1579 props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
1580 props->max_qp_init_rd_atom = 255;
1581 /* props->max_res_rd_atom */
1582 props->max_srq = ib_qib_max_srqs;
1583 props->max_srq_wr = ib_qib_max_srq_wrs;
1584 props->max_srq_sge = ib_qib_max_srq_sges;
1585 /* props->local_ca_ack_delay */
1586 props->atomic_cap = IB_ATOMIC_GLOB;
1587 props->max_pkeys = qib_get_npkeys(dd);
1588 props->max_mcast_grp = ib_qib_max_mcast_grps;
1589 props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached;
1590 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
1591 props->max_mcast_grp;
1596 static int qib_query_port(struct ib_device *ibdev, u8 port,
1597 struct ib_port_attr *props)
1599 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1600 struct qib_ibport *ibp = to_iport(ibdev, port);
1601 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1605 memset(props, 0, sizeof(*props));
1606 props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
1607 props->lmc = ppd->lmc;
1608 props->sm_lid = ibp->sm_lid;
1609 props->sm_sl = ibp->sm_sl;
1610 props->state = dd->f_iblink_state(ppd->lastibcstat);
1611 props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat);
1612 props->port_cap_flags = ibp->port_cap_flags;
1613 props->gid_tbl_len = QIB_GUIDS_PER_PORT;
1614 props->max_msg_sz = 0x80000000;
1615 props->pkey_tbl_len = qib_get_npkeys(dd);
1616 props->bad_pkey_cntr = ibp->pkey_violations;
1617 props->qkey_viol_cntr = ibp->qkey_violations;
1618 props->active_width = ppd->link_width_active;
1619 /* See rate_show() */
1620 props->active_speed = ppd->link_speed_active;
1621 props->max_vl_num = qib_num_vls(ppd->vls_supported);
1622 props->init_type_reply = 0;
1624 props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096;
1625 switch (ppd->ibmtu) {
1644 props->active_mtu = mtu;
1645 props->subnet_timeout = ibp->subnet_timeout;
1650 static int qib_modify_device(struct ib_device *device,
1651 int device_modify_mask,
1652 struct ib_device_modify *device_modify)
1654 struct qib_devdata *dd = dd_from_ibdev(device);
1658 if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
1659 IB_DEVICE_MODIFY_NODE_DESC)) {
1664 if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) {
1665 memcpy(device->node_desc, device_modify->node_desc, 64);
1666 for (i = 0; i < dd->num_pports; i++) {
1667 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1669 qib_node_desc_chg(ibp);
1673 if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) {
1674 ib_qib_sys_image_guid =
1675 cpu_to_be64(device_modify->sys_image_guid);
1676 for (i = 0; i < dd->num_pports; i++) {
1677 struct qib_ibport *ibp = &dd->pport[i].ibport_data;
1679 qib_sys_guid_chg(ibp);
1689 static int qib_modify_port(struct ib_device *ibdev, u8 port,
1690 int port_modify_mask, struct ib_port_modify *props)
1692 struct qib_ibport *ibp = to_iport(ibdev, port);
1693 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1695 ibp->port_cap_flags |= props->set_port_cap_mask;
1696 ibp->port_cap_flags &= ~props->clr_port_cap_mask;
1697 if (props->set_port_cap_mask || props->clr_port_cap_mask)
1698 qib_cap_mask_chg(ibp);
1699 if (port_modify_mask & IB_PORT_SHUTDOWN)
1700 qib_set_linkstate(ppd, QIB_IB_LINKDOWN);
1701 if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
1702 ibp->qkey_violations = 0;
1706 static int qib_query_gid(struct ib_device *ibdev, u8 port,
1707 int index, union ib_gid *gid)
1709 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1712 if (!port || port > dd->num_pports)
1715 struct qib_ibport *ibp = to_iport(ibdev, port);
1716 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1718 gid->global.subnet_prefix = ibp->gid_prefix;
1720 gid->global.interface_id = ppd->guid;
1721 else if (index < QIB_GUIDS_PER_PORT)
1722 gid->global.interface_id = ibp->guids[index - 1];
1730 static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
1731 struct ib_ucontext *context,
1732 struct ib_udata *udata)
1734 struct qib_ibdev *dev = to_idev(ibdev);
1739 * This is actually totally arbitrary. Some correctness tests
1740 * assume there's a maximum number of PDs that can be allocated.
1741 * We don't actually have this limit, but we fail the test if
1742 * we allow allocations of more than we report for this value.
1745 pd = kmalloc(sizeof *pd, GFP_KERNEL);
1747 ret = ERR_PTR(-ENOMEM);
1751 spin_lock(&dev->n_pds_lock);
1752 if (dev->n_pds_allocated == ib_qib_max_pds) {
1753 spin_unlock(&dev->n_pds_lock);
1755 ret = ERR_PTR(-ENOMEM);
1759 dev->n_pds_allocated++;
1760 spin_unlock(&dev->n_pds_lock);
1762 /* ib_alloc_pd() will initialize pd->ibpd. */
1763 pd->user = udata != NULL;
1771 static int qib_dealloc_pd(struct ib_pd *ibpd)
1773 struct qib_pd *pd = to_ipd(ibpd);
1774 struct qib_ibdev *dev = to_idev(ibpd->device);
1776 spin_lock(&dev->n_pds_lock);
1777 dev->n_pds_allocated--;
1778 spin_unlock(&dev->n_pds_lock);
1785 int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr)
1787 /* A multicast address requires a GRH (see ch. 8.4.1). */
1788 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
1789 ah_attr->dlid != QIB_PERMISSIVE_LID &&
1790 !(ah_attr->ah_flags & IB_AH_GRH))
1792 if ((ah_attr->ah_flags & IB_AH_GRH) &&
1793 ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT)
1795 if (ah_attr->dlid == 0)
1797 if (ah_attr->port_num < 1 ||
1798 ah_attr->port_num > ibdev->phys_port_cnt)
1800 if (ah_attr->static_rate != IB_RATE_PORT_CURRENT &&
1801 ib_rate_to_mult(ah_attr->static_rate) < 0)
1803 if (ah_attr->sl > 15)
1811 * qib_create_ah - create an address handle
1812 * @pd: the protection domain
1813 * @ah_attr: the attributes of the AH
1815 * This may be called from interrupt context.
1817 static struct ib_ah *qib_create_ah(struct ib_pd *pd,
1818 struct ib_ah_attr *ah_attr)
1822 struct qib_ibdev *dev = to_idev(pd->device);
1823 unsigned long flags;
1825 if (qib_check_ah(pd->device, ah_attr)) {
1826 ret = ERR_PTR(-EINVAL);
1830 ah = kmalloc(sizeof *ah, GFP_ATOMIC);
1832 ret = ERR_PTR(-ENOMEM);
1836 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1837 if (dev->n_ahs_allocated == ib_qib_max_ahs) {
1838 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1840 ret = ERR_PTR(-ENOMEM);
1844 dev->n_ahs_allocated++;
1845 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1847 /* ib_create_ah() will initialize ah->ibah. */
1848 ah->attr = *ah_attr;
1849 atomic_set(&ah->refcount, 0);
1857 struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
1859 struct ib_ah_attr attr;
1860 struct ib_ah *ah = ERR_PTR(-EINVAL);
1863 memset(&attr, 0, sizeof attr);
1865 attr.port_num = ppd_from_ibp(ibp)->port;
1867 qp0 = rcu_dereference(ibp->qp0);
1869 ah = ib_create_ah(qp0->ibqp.pd, &attr);
1875 * qib_destroy_ah - destroy an address handle
1876 * @ibah: the AH to destroy
1878 * This may be called from interrupt context.
1880 static int qib_destroy_ah(struct ib_ah *ibah)
1882 struct qib_ibdev *dev = to_idev(ibah->device);
1883 struct qib_ah *ah = to_iah(ibah);
1884 unsigned long flags;
1886 if (atomic_read(&ah->refcount) != 0)
1889 spin_lock_irqsave(&dev->n_ahs_lock, flags);
1890 dev->n_ahs_allocated--;
1891 spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
1898 static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1900 struct qib_ah *ah = to_iah(ibah);
1902 if (qib_check_ah(ibah->device, ah_attr))
1905 ah->attr = *ah_attr;
1910 static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
1912 struct qib_ah *ah = to_iah(ibah);
1914 *ah_attr = ah->attr;
1920 * qib_get_npkeys - return the size of the PKEY table for context 0
1921 * @dd: the qlogic_ib device
1923 unsigned qib_get_npkeys(struct qib_devdata *dd)
1925 return ARRAY_SIZE(dd->rcd[0]->pkeys);
1929 * Return the indexed PKEY from the port PKEY table.
1930 * No need to validate rcd[ctxt]; the port is setup if we are here.
1932 unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index)
1934 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1935 struct qib_devdata *dd = ppd->dd;
1936 unsigned ctxt = ppd->hw_pidx;
1939 /* dd->rcd null if mini_init or some init failures */
1940 if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys))
1943 ret = dd->rcd[ctxt]->pkeys[index];
1948 static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
1951 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1954 if (index >= qib_get_npkeys(dd)) {
1959 *pkey = qib_get_pkey(to_iport(ibdev, port), index);
1967 * qib_alloc_ucontext - allocate a ucontest
1968 * @ibdev: the infiniband device
1969 * @udata: not used by the QLogic_IB driver
1972 static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
1973 struct ib_udata *udata)
1975 struct qib_ucontext *context;
1976 struct ib_ucontext *ret;
1978 context = kmalloc(sizeof *context, GFP_KERNEL);
1980 ret = ERR_PTR(-ENOMEM);
1984 ret = &context->ibucontext;
1990 static int qib_dealloc_ucontext(struct ib_ucontext *context)
1992 kfree(to_iucontext(context));
1996 static void init_ibport(struct qib_pportdata *ppd)
1998 struct qib_verbs_counters cntrs;
1999 struct qib_ibport *ibp = &ppd->ibport_data;
2001 spin_lock_init(&ibp->lock);
2002 /* Set the prefix to the default value (see ch. 4.1.1) */
2003 ibp->gid_prefix = IB_DEFAULT_GID_PREFIX;
2004 ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
2005 ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP |
2006 IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP |
2007 IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP |
2008 IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP |
2009 IB_PORT_OTHER_LOCAL_CHANGES_SUP;
2010 if (ppd->dd->flags & QIB_HAS_LINK_LATENCY)
2011 ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
2012 ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
2013 ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
2014 ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
2015 ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
2016 ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
2018 /* Snapshot current HW counters to "clear" them. */
2019 qib_get_counters(ppd, &cntrs);
2020 ibp->z_symbol_error_counter = cntrs.symbol_error_counter;
2021 ibp->z_link_error_recovery_counter =
2022 cntrs.link_error_recovery_counter;
2023 ibp->z_link_downed_counter = cntrs.link_downed_counter;
2024 ibp->z_port_rcv_errors = cntrs.port_rcv_errors;
2025 ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors;
2026 ibp->z_port_xmit_discards = cntrs.port_xmit_discards;
2027 ibp->z_port_xmit_data = cntrs.port_xmit_data;
2028 ibp->z_port_rcv_data = cntrs.port_rcv_data;
2029 ibp->z_port_xmit_packets = cntrs.port_xmit_packets;
2030 ibp->z_port_rcv_packets = cntrs.port_rcv_packets;
2031 ibp->z_local_link_integrity_errors =
2032 cntrs.local_link_integrity_errors;
2033 ibp->z_excessive_buffer_overrun_errors =
2034 cntrs.excessive_buffer_overrun_errors;
2035 ibp->z_vl15_dropped = cntrs.vl15_dropped;
2036 RCU_INIT_POINTER(ibp->qp0, NULL);
2037 RCU_INIT_POINTER(ibp->qp1, NULL);
2041 * qib_register_ib_device - register our device with the infiniband core
2042 * @dd: the device data structure
2043 * Return the allocated qib_ibdev pointer or NULL on error.
2045 int qib_register_ib_device(struct qib_devdata *dd)
2047 struct qib_ibdev *dev = &dd->verbs_dev;
2048 struct ib_device *ibdev = &dev->ibdev;
2049 struct qib_pportdata *ppd = dd->pport;
2050 unsigned i, lk_tab_size;
2053 dev->qp_table_size = ib_qib_qp_table_size;
2054 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
2055 dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
2057 if (!dev->qp_table) {
2061 for (i = 0; i < dev->qp_table_size; i++)
2062 RCU_INIT_POINTER(dev->qp_table[i], NULL);
2064 for (i = 0; i < dd->num_pports; i++)
2065 init_ibport(ppd + i);
2067 /* Only need to initialize non-zero fields. */
2068 spin_lock_init(&dev->qpt_lock);
2069 spin_lock_init(&dev->n_pds_lock);
2070 spin_lock_init(&dev->n_ahs_lock);
2071 spin_lock_init(&dev->n_cqs_lock);
2072 spin_lock_init(&dev->n_qps_lock);
2073 spin_lock_init(&dev->n_srqs_lock);
2074 spin_lock_init(&dev->n_mcast_grps_lock);
2075 init_timer(&dev->mem_timer);
2076 dev->mem_timer.function = mem_timer;
2077 dev->mem_timer.data = (unsigned long) dev;
2079 qib_init_qpn_table(dd, &dev->qpn_table);
2082 * The top ib_qib_lkey_table_size bits are used to index the
2083 * table. The lower 8 bits can be owned by the user (copied from
2084 * the LKEY). The remaining bits act as a generation number or tag.
2086 spin_lock_init(&dev->lk_table.lock);
2087 dev->lk_table.max = 1 << ib_qib_lkey_table_size;
2088 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2089 dev->lk_table.table = (struct qib_mregion __rcu **)
2090 __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
2091 if (dev->lk_table.table == NULL) {
2095 RCU_INIT_POINTER(dev->dma_mr, NULL);
2096 for (i = 0; i < dev->lk_table.max; i++)
2097 RCU_INIT_POINTER(dev->lk_table.table[i], NULL);
2098 INIT_LIST_HEAD(&dev->pending_mmaps);
2099 spin_lock_init(&dev->pending_lock);
2100 dev->mmap_offset = PAGE_SIZE;
2101 spin_lock_init(&dev->mmap_offset_lock);
2102 INIT_LIST_HEAD(&dev->piowait);
2103 INIT_LIST_HEAD(&dev->dmawait);
2104 INIT_LIST_HEAD(&dev->txwait);
2105 INIT_LIST_HEAD(&dev->memwait);
2106 INIT_LIST_HEAD(&dev->txreq_free);
2108 if (ppd->sdma_descq_cnt) {
2109 dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev,
2110 ppd->sdma_descq_cnt *
2111 sizeof(struct qib_pio_header),
2112 &dev->pio_hdrs_phys,
2114 if (!dev->pio_hdrs) {
2120 for (i = 0; i < ppd->sdma_descq_cnt; i++) {
2121 struct qib_verbs_txreq *tx;
2123 tx = kzalloc(sizeof *tx, GFP_KERNEL);
2129 list_add(&tx->txreq.list, &dev->txreq_free);
2133 * The system image GUID is supposed to be the same for all
2134 * IB HCAs in a single system but since there can be other
2135 * device types in the system, we can't be sure this is unique.
2137 if (!ib_qib_sys_image_guid)
2138 ib_qib_sys_image_guid = ppd->guid;
2140 strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX);
2141 ibdev->owner = THIS_MODULE;
2142 ibdev->node_guid = ppd->guid;
2143 ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION;
2144 ibdev->uverbs_cmd_mask =
2145 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2146 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2147 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2148 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2149 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2150 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
2151 (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
2152 (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
2153 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
2154 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2155 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2156 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2157 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2158 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2159 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2160 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
2161 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
2162 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2163 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2164 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2165 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2166 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
2167 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
2168 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2169 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2170 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2171 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2172 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2173 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2174 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
2175 ibdev->node_type = RDMA_NODE_IB_CA;
2176 ibdev->phys_port_cnt = dd->num_pports;
2177 ibdev->num_comp_vectors = 1;
2178 ibdev->dma_device = &dd->pcidev->dev;
2179 ibdev->query_device = qib_query_device;
2180 ibdev->modify_device = qib_modify_device;
2181 ibdev->query_port = qib_query_port;
2182 ibdev->modify_port = qib_modify_port;
2183 ibdev->query_pkey = qib_query_pkey;
2184 ibdev->query_gid = qib_query_gid;
2185 ibdev->alloc_ucontext = qib_alloc_ucontext;
2186 ibdev->dealloc_ucontext = qib_dealloc_ucontext;
2187 ibdev->alloc_pd = qib_alloc_pd;
2188 ibdev->dealloc_pd = qib_dealloc_pd;
2189 ibdev->create_ah = qib_create_ah;
2190 ibdev->destroy_ah = qib_destroy_ah;
2191 ibdev->modify_ah = qib_modify_ah;
2192 ibdev->query_ah = qib_query_ah;
2193 ibdev->create_srq = qib_create_srq;
2194 ibdev->modify_srq = qib_modify_srq;
2195 ibdev->query_srq = qib_query_srq;
2196 ibdev->destroy_srq = qib_destroy_srq;
2197 ibdev->create_qp = qib_create_qp;
2198 ibdev->modify_qp = qib_modify_qp;
2199 ibdev->query_qp = qib_query_qp;
2200 ibdev->destroy_qp = qib_destroy_qp;
2201 ibdev->post_send = qib_post_send;
2202 ibdev->post_recv = qib_post_receive;
2203 ibdev->post_srq_recv = qib_post_srq_receive;
2204 ibdev->create_cq = qib_create_cq;
2205 ibdev->destroy_cq = qib_destroy_cq;
2206 ibdev->resize_cq = qib_resize_cq;
2207 ibdev->poll_cq = qib_poll_cq;
2208 ibdev->req_notify_cq = qib_req_notify_cq;
2209 ibdev->get_dma_mr = qib_get_dma_mr;
2210 ibdev->reg_phys_mr = qib_reg_phys_mr;
2211 ibdev->reg_user_mr = qib_reg_user_mr;
2212 ibdev->dereg_mr = qib_dereg_mr;
2213 ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr;
2214 ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
2215 ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
2216 ibdev->alloc_fmr = qib_alloc_fmr;
2217 ibdev->map_phys_fmr = qib_map_phys_fmr;
2218 ibdev->unmap_fmr = qib_unmap_fmr;
2219 ibdev->dealloc_fmr = qib_dealloc_fmr;
2220 ibdev->attach_mcast = qib_multicast_attach;
2221 ibdev->detach_mcast = qib_multicast_detach;
2222 ibdev->process_mad = qib_process_mad;
2223 ibdev->mmap = qib_mmap;
2224 ibdev->dma_ops = &qib_dma_mapping_ops;
2226 snprintf(ibdev->node_desc, sizeof(ibdev->node_desc),
2227 "Intel Infiniband HCA %s", init_utsname()->nodename);
2229 ret = ib_register_device(ibdev, qib_create_port_files);
2233 ret = qib_create_agents(dev);
2237 ret = qib_verbs_register_sysfs(dd);
2244 qib_free_agents(dev);
2246 ib_unregister_device(ibdev);
2249 while (!list_empty(&dev->txreq_free)) {
2250 struct list_head *l = dev->txreq_free.next;
2251 struct qib_verbs_txreq *tx;
2254 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2257 if (ppd->sdma_descq_cnt)
2258 dma_free_coherent(&dd->pcidev->dev,
2259 ppd->sdma_descq_cnt *
2260 sizeof(struct qib_pio_header),
2261 dev->pio_hdrs, dev->pio_hdrs_phys);
2263 free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
2265 kfree(dev->qp_table);
2267 qib_dev_err(dd, "cannot register verbs: %d!\n", -ret);
2272 void qib_unregister_ib_device(struct qib_devdata *dd)
2274 struct qib_ibdev *dev = &dd->verbs_dev;
2275 struct ib_device *ibdev = &dev->ibdev;
2277 unsigned lk_tab_size;
2279 qib_verbs_unregister_sysfs(dd);
2281 qib_free_agents(dev);
2283 ib_unregister_device(ibdev);
2285 if (!list_empty(&dev->piowait))
2286 qib_dev_err(dd, "piowait list not empty!\n");
2287 if (!list_empty(&dev->dmawait))
2288 qib_dev_err(dd, "dmawait list not empty!\n");
2289 if (!list_empty(&dev->txwait))
2290 qib_dev_err(dd, "txwait list not empty!\n");
2291 if (!list_empty(&dev->memwait))
2292 qib_dev_err(dd, "memwait list not empty!\n");
2294 qib_dev_err(dd, "DMA MR not NULL!\n");
2296 qps_inuse = qib_free_all_qps(dd);
2298 qib_dev_err(dd, "QP memory leak! %u still in use\n",
2301 del_timer_sync(&dev->mem_timer);
2302 qib_free_qpn_table(&dev->qpn_table);
2303 while (!list_empty(&dev->txreq_free)) {
2304 struct list_head *l = dev->txreq_free.next;
2305 struct qib_verbs_txreq *tx;
2308 tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
2311 if (dd->pport->sdma_descq_cnt)
2312 dma_free_coherent(&dd->pcidev->dev,
2313 dd->pport->sdma_descq_cnt *
2314 sizeof(struct qib_pio_header),
2315 dev->pio_hdrs, dev->pio_hdrs_phys);
2316 lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
2317 free_pages((unsigned long) dev->lk_table.table,
2318 get_order(lk_tab_size));
2319 kfree(dev->qp_table);
2323 * This must be called with s_lock held.
2325 void qib_schedule_send(struct qib_qp *qp)
2327 if (qib_send_ok(qp)) {
2328 struct qib_ibport *ibp =
2329 to_iport(qp->ibqp.device, qp->port_num);
2330 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
2332 queue_work(ppd->qib_wq, &qp->s_work);