2 * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/slab.h>
36 #include <linux/vmalloc.h>
38 #include "qib_verbs.h"
41 * qib_cq_enter - add a new entry to the completion queue
42 * @cq: completion queue
43 * @entry: work completion entry to add
44 * @sig: true if @entry is a solicitated entry
46 * This may be called with qp->s_lock held.
48 void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited)
55 spin_lock_irqsave(&cq->lock, flags);
58 * Note that the head pointer might be writable by user processes.
59 * Take care to verify it is a sane value.
63 if (head >= (unsigned) cq->ibcq.cqe) {
68 if (unlikely(next == wc->tail)) {
69 spin_unlock_irqrestore(&cq->lock, flags);
70 if (cq->ibcq.event_handler) {
73 ev.device = cq->ibcq.device;
74 ev.element.cq = &cq->ibcq;
75 ev.event = IB_EVENT_CQ_ERR;
76 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
81 wc->uqueue[head].wr_id = entry->wr_id;
82 wc->uqueue[head].status = entry->status;
83 wc->uqueue[head].opcode = entry->opcode;
84 wc->uqueue[head].vendor_err = entry->vendor_err;
85 wc->uqueue[head].byte_len = entry->byte_len;
86 wc->uqueue[head].ex.imm_data =
87 (__u32 __force)entry->ex.imm_data;
88 wc->uqueue[head].qp_num = entry->qp->qp_num;
89 wc->uqueue[head].src_qp = entry->src_qp;
90 wc->uqueue[head].wc_flags = entry->wc_flags;
91 wc->uqueue[head].pkey_index = entry->pkey_index;
92 wc->uqueue[head].slid = entry->slid;
93 wc->uqueue[head].sl = entry->sl;
94 wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
95 wc->uqueue[head].port_num = entry->port_num;
96 /* Make sure entry is written before the head index. */
99 wc->kqueue[head] = *entry;
102 if (cq->notify == IB_CQ_NEXT_COMP ||
103 (cq->notify == IB_CQ_SOLICITED &&
104 (solicited || entry->status != IB_WC_SUCCESS))) {
105 cq->notify = IB_CQ_NONE;
108 * This will cause send_complete() to be called in
111 queue_work(qib_cq_wq, &cq->comptask);
114 spin_unlock_irqrestore(&cq->lock, flags);
118 * qib_poll_cq - poll for work completion entries
119 * @ibcq: the completion queue to poll
120 * @num_entries: the maximum number of entries to return
121 * @entry: pointer to array where work completions are placed
123 * Returns the number of completion entries polled.
125 * This may be called from interrupt context. Also called by ib_poll_cq()
126 * in the generic verbs code.
128 int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
130 struct qib_cq *cq = to_icq(ibcq);
131 struct qib_cq_wc *wc;
136 /* The kernel can only poll a kernel completion queue */
142 spin_lock_irqsave(&cq->lock, flags);
146 if (tail > (u32) cq->ibcq.cqe)
147 tail = (u32) cq->ibcq.cqe;
148 for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
149 if (tail == wc->head)
151 /* The kernel doesn't need a RMB since it has the lock. */
152 *entry = wc->kqueue[tail];
153 if (tail >= cq->ibcq.cqe)
160 spin_unlock_irqrestore(&cq->lock, flags);
166 static void send_complete(struct work_struct *work)
168 struct qib_cq *cq = container_of(work, struct qib_cq, comptask);
171 * The completion handler will most likely rearm the notification
172 * and poll for all pending entries. If a new completion entry
173 * is added while we are in this routine, queue_work()
174 * won't call us again until we return so we check triggered to
175 * see if we need to call the handler again.
178 u8 triggered = cq->triggered;
181 * IPoIB connected mode assumes the callback is from a
182 * soft IRQ. We simulate this by blocking "bottom halves".
183 * See the implementation for ipoib_cm_handle_tx_wc(),
184 * netif_tx_lock_bh() and netif_tx_lock().
187 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
190 if (cq->triggered == triggered)
196 * qib_create_cq - create a completion queue
197 * @ibdev: the device this completion queue is attached to
198 * @entries: the minimum size of the completion queue
199 * @context: unused by the QLogic_IB driver
200 * @udata: user data for libibverbs.so
202 * Returns a pointer to the completion queue or negative errno values
205 * Called by ib_create_cq() in the generic verbs code.
207 struct ib_cq *qib_create_cq(struct ib_device *ibdev, int entries,
208 int comp_vector, struct ib_ucontext *context,
209 struct ib_udata *udata)
211 struct qib_ibdev *dev = to_idev(ibdev);
213 struct qib_cq_wc *wc;
217 if (entries < 1 || entries > ib_qib_max_cqes) {
218 ret = ERR_PTR(-EINVAL);
222 /* Allocate the completion queue structure. */
223 cq = kmalloc(sizeof(*cq), GFP_KERNEL);
225 ret = ERR_PTR(-ENOMEM);
230 * Allocate the completion queue entries and head/tail pointers.
231 * This is allocated separately so that it can be resized and
232 * also mapped into user space.
233 * We need to use vmalloc() in order to support mmap and large
234 * numbers of entries.
237 if (udata && udata->outlen >= sizeof(__u64))
238 sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
240 sz += sizeof(struct ib_wc) * (entries + 1);
241 wc = vmalloc_user(sz);
243 ret = ERR_PTR(-ENOMEM);
248 * Return the address of the WC as the offset to mmap.
249 * See qib_mmap() for details.
251 if (udata && udata->outlen >= sizeof(__u64)) {
254 cq->ip = qib_create_mmap_info(dev, sz, context, wc);
256 ret = ERR_PTR(-ENOMEM);
260 err = ib_copy_to_udata(udata, &cq->ip->offset,
261 sizeof(cq->ip->offset));
269 spin_lock(&dev->n_cqs_lock);
270 if (dev->n_cqs_allocated == ib_qib_max_cqs) {
271 spin_unlock(&dev->n_cqs_lock);
272 ret = ERR_PTR(-ENOMEM);
276 dev->n_cqs_allocated++;
277 spin_unlock(&dev->n_cqs_lock);
280 spin_lock_irq(&dev->pending_lock);
281 list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
282 spin_unlock_irq(&dev->pending_lock);
286 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
287 * The number of entries should be >= the number requested or return
290 cq->ibcq.cqe = entries;
291 cq->notify = IB_CQ_NONE;
293 spin_lock_init(&cq->lock);
294 INIT_WORK(&cq->comptask, send_complete);
314 * qib_destroy_cq - destroy a completion queue
315 * @ibcq: the completion queue to destroy.
317 * Returns 0 for success.
319 * Called by ib_destroy_cq() in the generic verbs code.
321 int qib_destroy_cq(struct ib_cq *ibcq)
323 struct qib_ibdev *dev = to_idev(ibcq->device);
324 struct qib_cq *cq = to_icq(ibcq);
326 flush_work(&cq->comptask);
327 spin_lock(&dev->n_cqs_lock);
328 dev->n_cqs_allocated--;
329 spin_unlock(&dev->n_cqs_lock);
331 kref_put(&cq->ip->ref, qib_release_mmap_info);
340 * qib_req_notify_cq - change the notification type for a completion queue
341 * @ibcq: the completion queue
342 * @notify_flags: the type of notification to request
344 * Returns 0 for success.
346 * This may be called from interrupt context. Also called by
347 * ib_req_notify_cq() in the generic verbs code.
349 int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
351 struct qib_cq *cq = to_icq(ibcq);
355 spin_lock_irqsave(&cq->lock, flags);
357 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
358 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
360 if (cq->notify != IB_CQ_NEXT_COMP)
361 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
363 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
364 cq->queue->head != cq->queue->tail)
367 spin_unlock_irqrestore(&cq->lock, flags);
373 * qib_resize_cq - change the size of the CQ
374 * @ibcq: the completion queue
376 * Returns 0 for success.
378 int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
380 struct qib_cq *cq = to_icq(ibcq);
381 struct qib_cq_wc *old_wc;
382 struct qib_cq_wc *wc;
387 if (cqe < 1 || cqe > ib_qib_max_cqes) {
393 * Need to use vmalloc() if we want to support large #s of entries.
396 if (udata && udata->outlen >= sizeof(__u64))
397 sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
399 sz += sizeof(struct ib_wc) * (cqe + 1);
400 wc = vmalloc_user(sz);
406 /* Check that we can write the offset to mmap. */
407 if (udata && udata->outlen >= sizeof(__u64)) {
410 ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
415 spin_lock_irq(&cq->lock);
417 * Make sure head and tail are sane since they
418 * might be user writable.
422 if (head > (u32) cq->ibcq.cqe)
423 head = (u32) cq->ibcq.cqe;
425 if (tail > (u32) cq->ibcq.cqe)
426 tail = (u32) cq->ibcq.cqe;
428 n = cq->ibcq.cqe + 1 + head - tail;
431 if (unlikely((u32)cqe < n)) {
435 for (n = 0; tail != head; n++) {
437 wc->uqueue[n] = old_wc->uqueue[tail];
439 wc->kqueue[n] = old_wc->kqueue[tail];
440 if (tail == (u32) cq->ibcq.cqe)
449 spin_unlock_irq(&cq->lock);
454 struct qib_ibdev *dev = to_idev(ibcq->device);
455 struct qib_mmap_info *ip = cq->ip;
457 qib_update_mmap_info(dev, ip, sz, wc);
460 * Return the offset to mmap.
461 * See qib_mmap() for details.
463 if (udata && udata->outlen >= sizeof(__u64)) {
464 ret = ib_copy_to_udata(udata, &ip->offset,
470 spin_lock_irq(&dev->pending_lock);
471 if (list_empty(&ip->pending_mmaps))
472 list_add(&ip->pending_mmaps, &dev->pending_mmaps);
473 spin_unlock_irq(&dev->pending_lock);
480 spin_unlock_irq(&cq->lock);