2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
37 #include <rdma/ib_verbs.h>
41 #define DRV_VERSION "0.1"
43 MODULE_AUTHOR("Steve Wise");
44 MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
45 MODULE_LICENSE("Dual BSD/GPL");
46 MODULE_VERSION(DRV_VERSION);
49 struct list_head entry;
50 struct cxgb4_lld_info lldi;
54 static LIST_HEAD(uld_ctx_list);
55 static DEFINE_MUTEX(dev_mutex);
57 static struct dentry *c4iw_debugfs_root;
59 struct c4iw_debugfs_data {
60 struct c4iw_dev *devp;
66 static int count_idrs(int id, void *p, void *data)
70 *countp = *countp + 1;
74 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
77 struct c4iw_debugfs_data *d = file->private_data;
79 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
82 static int dump_qp(int id, void *p, void *data)
84 struct c4iw_qp *qp = p;
85 struct c4iw_debugfs_data *qpd = data;
89 if (id != qp->wq.sq.qid)
92 space = qpd->bufsize - qpd->pos - 1;
97 cc = snprintf(qpd->buf + qpd->pos, space,
98 "qp sq id %u rq id %u state %u onchip %u "
99 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
100 qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state,
101 qp->wq.sq.flags & T4_SQ_ONCHIP,
102 qp->ep->hwtid, (int)qp->ep->com.state,
103 &qp->ep->com.local_addr.sin_addr.s_addr,
104 ntohs(qp->ep->com.local_addr.sin_port),
105 &qp->ep->com.remote_addr.sin_addr.s_addr,
106 ntohs(qp->ep->com.remote_addr.sin_port));
108 cc = snprintf(qpd->buf + qpd->pos, space,
109 "qp sq id %u rq id %u state %u onchip %u\n",
110 qp->wq.sq.qid, qp->wq.rq.qid,
112 qp->wq.sq.flags & T4_SQ_ONCHIP);
118 static int qp_release(struct inode *inode, struct file *file)
120 struct c4iw_debugfs_data *qpd = file->private_data;
122 printk(KERN_INFO "%s null qpd?\n", __func__);
130 static int qp_open(struct inode *inode, struct file *file)
132 struct c4iw_debugfs_data *qpd;
136 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
141 qpd->devp = inode->i_private;
144 spin_lock_irq(&qpd->devp->lock);
145 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
146 spin_unlock_irq(&qpd->devp->lock);
148 qpd->bufsize = count * 128;
149 qpd->buf = vmalloc(qpd->bufsize);
155 spin_lock_irq(&qpd->devp->lock);
156 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
157 spin_unlock_irq(&qpd->devp->lock);
159 qpd->buf[qpd->pos++] = 0;
160 file->private_data = qpd;
168 static const struct file_operations qp_debugfs_fops = {
169 .owner = THIS_MODULE,
171 .release = qp_release,
172 .read = debugfs_read,
173 .llseek = default_llseek,
176 static int dump_stag(int id, void *p, void *data)
178 struct c4iw_debugfs_data *stagd = data;
182 space = stagd->bufsize - stagd->pos - 1;
186 cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
192 static int stag_release(struct inode *inode, struct file *file)
194 struct c4iw_debugfs_data *stagd = file->private_data;
196 printk(KERN_INFO "%s null stagd?\n", __func__);
204 static int stag_open(struct inode *inode, struct file *file)
206 struct c4iw_debugfs_data *stagd;
210 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
215 stagd->devp = inode->i_private;
218 spin_lock_irq(&stagd->devp->lock);
219 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
220 spin_unlock_irq(&stagd->devp->lock);
222 stagd->bufsize = count * sizeof("0x12345678\n");
223 stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
229 spin_lock_irq(&stagd->devp->lock);
230 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
231 spin_unlock_irq(&stagd->devp->lock);
233 stagd->buf[stagd->pos++] = 0;
234 file->private_data = stagd;
242 static const struct file_operations stag_debugfs_fops = {
243 .owner = THIS_MODULE,
245 .release = stag_release,
246 .read = debugfs_read,
247 .llseek = default_llseek,
250 static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
252 static int stats_show(struct seq_file *seq, void *v)
254 struct c4iw_dev *dev = seq->private;
256 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
258 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
259 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
260 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
261 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
262 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
263 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
264 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
265 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
266 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
267 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
268 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
269 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
270 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
271 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
272 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
273 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
274 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
275 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
276 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
277 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
278 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
279 seq_printf(seq, " DB State: %s Transitions %llu\n",
280 db_state_str[dev->db_state],
281 dev->rdev.stats.db_state_transitions);
282 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
283 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
284 dev->rdev.stats.act_ofld_conn_fails);
285 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
286 dev->rdev.stats.pas_ofld_conn_fails);
290 static int stats_open(struct inode *inode, struct file *file)
292 return single_open(file, stats_show, inode->i_private);
295 static ssize_t stats_clear(struct file *file, const char __user *buf,
296 size_t count, loff_t *pos)
298 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
300 mutex_lock(&dev->rdev.stats.lock);
301 dev->rdev.stats.pd.max = 0;
302 dev->rdev.stats.pd.fail = 0;
303 dev->rdev.stats.qid.max = 0;
304 dev->rdev.stats.qid.fail = 0;
305 dev->rdev.stats.stag.max = 0;
306 dev->rdev.stats.stag.fail = 0;
307 dev->rdev.stats.pbl.max = 0;
308 dev->rdev.stats.pbl.fail = 0;
309 dev->rdev.stats.rqt.max = 0;
310 dev->rdev.stats.rqt.fail = 0;
311 dev->rdev.stats.ocqp.max = 0;
312 dev->rdev.stats.ocqp.fail = 0;
313 dev->rdev.stats.db_full = 0;
314 dev->rdev.stats.db_empty = 0;
315 dev->rdev.stats.db_drop = 0;
316 dev->rdev.stats.db_state_transitions = 0;
317 dev->rdev.stats.tcam_full = 0;
318 dev->rdev.stats.act_ofld_conn_fails = 0;
319 dev->rdev.stats.pas_ofld_conn_fails = 0;
320 mutex_unlock(&dev->rdev.stats.lock);
324 static const struct file_operations stats_debugfs_fops = {
325 .owner = THIS_MODULE,
327 .release = single_release,
330 .write = stats_clear,
333 static int dump_ep(int id, void *p, void *data)
335 struct c4iw_ep *ep = p;
336 struct c4iw_debugfs_data *epd = data;
340 space = epd->bufsize - epd->pos - 1;
344 cc = snprintf(epd->buf + epd->pos, space,
345 "ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx "
346 "hwtid %d atid %d %pI4:%d <-> %pI4:%d\n",
347 ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state,
348 ep->com.flags, ep->com.history, ep->hwtid, ep->atid,
349 &ep->com.local_addr.sin_addr.s_addr,
350 ntohs(ep->com.local_addr.sin_port),
351 &ep->com.remote_addr.sin_addr.s_addr,
352 ntohs(ep->com.remote_addr.sin_port));
358 static int dump_listen_ep(int id, void *p, void *data)
360 struct c4iw_listen_ep *ep = p;
361 struct c4iw_debugfs_data *epd = data;
365 space = epd->bufsize - epd->pos - 1;
369 cc = snprintf(epd->buf + epd->pos, space,
370 "ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d "
371 "%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state,
372 ep->com.flags, ep->stid, ep->backlog,
373 &ep->com.local_addr.sin_addr.s_addr,
374 ntohs(ep->com.local_addr.sin_port));
380 static int ep_release(struct inode *inode, struct file *file)
382 struct c4iw_debugfs_data *epd = file->private_data;
384 pr_info("%s null qpd?\n", __func__);
392 static int ep_open(struct inode *inode, struct file *file)
394 struct c4iw_debugfs_data *epd;
398 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
403 epd->devp = inode->i_private;
406 spin_lock_irq(&epd->devp->lock);
407 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
408 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
409 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
410 spin_unlock_irq(&epd->devp->lock);
412 epd->bufsize = count * 160;
413 epd->buf = vmalloc(epd->bufsize);
419 spin_lock_irq(&epd->devp->lock);
420 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
421 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
422 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
423 spin_unlock_irq(&epd->devp->lock);
425 file->private_data = epd;
433 static const struct file_operations ep_debugfs_fops = {
434 .owner = THIS_MODULE,
436 .release = ep_release,
437 .read = debugfs_read,
440 static int setup_debugfs(struct c4iw_dev *devp)
444 if (!devp->debugfs_root)
447 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
448 (void *)devp, &qp_debugfs_fops);
449 if (de && de->d_inode)
450 de->d_inode->i_size = 4096;
452 de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
453 (void *)devp, &stag_debugfs_fops);
454 if (de && de->d_inode)
455 de->d_inode->i_size = 4096;
457 de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
458 (void *)devp, &stats_debugfs_fops);
459 if (de && de->d_inode)
460 de->d_inode->i_size = 4096;
462 de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
463 (void *)devp, &ep_debugfs_fops);
464 if (de && de->d_inode)
465 de->d_inode->i_size = 4096;
470 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
471 struct c4iw_dev_ucontext *uctx)
473 struct list_head *pos, *nxt;
474 struct c4iw_qid_list *entry;
476 mutex_lock(&uctx->lock);
477 list_for_each_safe(pos, nxt, &uctx->qpids) {
478 entry = list_entry(pos, struct c4iw_qid_list, entry);
479 list_del_init(&entry->entry);
480 if (!(entry->qid & rdev->qpmask)) {
481 c4iw_put_resource(&rdev->resource.qid_table,
483 mutex_lock(&rdev->stats.lock);
484 rdev->stats.qid.cur -= rdev->qpmask + 1;
485 mutex_unlock(&rdev->stats.lock);
490 list_for_each_safe(pos, nxt, &uctx->qpids) {
491 entry = list_entry(pos, struct c4iw_qid_list, entry);
492 list_del_init(&entry->entry);
495 mutex_unlock(&uctx->lock);
498 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
499 struct c4iw_dev_ucontext *uctx)
501 INIT_LIST_HEAD(&uctx->qpids);
502 INIT_LIST_HEAD(&uctx->cqids);
503 mutex_init(&uctx->lock);
506 /* Caller takes care of locking if needed */
507 static int c4iw_rdev_open(struct c4iw_rdev *rdev)
511 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
514 * qpshift is the number of bits to shift the qpid left in order
515 * to get the correct address of the doorbell for that qp.
517 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
518 rdev->qpmask = rdev->lldi.udb_density - 1;
519 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
520 rdev->cqmask = rdev->lldi.ucq_density - 1;
521 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
522 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
523 "qp qid start %u size %u cq qid start %u size %u\n",
524 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
525 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
526 rdev->lldi.vr->pbl.start,
527 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
528 rdev->lldi.vr->rq.size,
529 rdev->lldi.vr->qp.start,
530 rdev->lldi.vr->qp.size,
531 rdev->lldi.vr->cq.start,
532 rdev->lldi.vr->cq.size);
533 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
534 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
535 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
536 (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2),
539 rdev->qpshift, rdev->qpmask,
540 rdev->cqshift, rdev->cqmask);
542 if (c4iw_num_stags(rdev) == 0) {
547 rdev->stats.pd.total = T4_MAX_NUM_PD;
548 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
549 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
550 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
551 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
552 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
554 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
556 printk(KERN_ERR MOD "error %d initializing resources\n", err);
559 err = c4iw_pblpool_create(rdev);
561 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
564 err = c4iw_rqtpool_create(rdev);
566 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
569 err = c4iw_ocqp_pool_create(rdev);
571 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
576 c4iw_rqtpool_destroy(rdev);
578 c4iw_pblpool_destroy(rdev);
580 c4iw_destroy_resource(&rdev->resource);
585 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
587 c4iw_pblpool_destroy(rdev);
588 c4iw_rqtpool_destroy(rdev);
589 c4iw_destroy_resource(&rdev->resource);
592 static void c4iw_dealloc(struct uld_ctx *ctx)
594 c4iw_rdev_close(&ctx->dev->rdev);
595 idr_destroy(&ctx->dev->cqidr);
596 idr_destroy(&ctx->dev->qpidr);
597 idr_destroy(&ctx->dev->mmidr);
598 idr_destroy(&ctx->dev->hwtid_idr);
599 idr_destroy(&ctx->dev->stid_idr);
600 idr_destroy(&ctx->dev->atid_idr);
601 iounmap(ctx->dev->rdev.oc_mw_kva);
602 ib_dealloc_device(&ctx->dev->ibdev);
606 static void c4iw_remove(struct uld_ctx *ctx)
608 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
609 c4iw_unregister_device(ctx->dev);
613 static int rdma_supported(const struct cxgb4_lld_info *infop)
615 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
616 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
617 infop->vr->cq.size > 0 && infop->vr->ocq.size > 0;
620 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
622 struct c4iw_dev *devp;
625 if (!rdma_supported(infop)) {
626 printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
627 pci_name(infop->pdev));
628 return ERR_PTR(-ENOSYS);
630 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
632 printk(KERN_ERR MOD "Cannot allocate ib device\n");
633 return ERR_PTR(-ENOMEM);
635 devp->rdev.lldi = *infop;
637 devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
638 (pci_resource_len(devp->rdev.lldi.pdev, 2) -
639 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
640 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
641 devp->rdev.lldi.vr->ocq.size);
643 PDBG(KERN_INFO MOD "ocq memory: "
644 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
645 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
646 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
648 ret = c4iw_rdev_open(&devp->rdev);
650 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
651 ib_dealloc_device(&devp->ibdev);
655 idr_init(&devp->cqidr);
656 idr_init(&devp->qpidr);
657 idr_init(&devp->mmidr);
658 idr_init(&devp->hwtid_idr);
659 idr_init(&devp->stid_idr);
660 idr_init(&devp->atid_idr);
661 spin_lock_init(&devp->lock);
662 mutex_init(&devp->rdev.stats.lock);
663 mutex_init(&devp->db_mutex);
665 if (c4iw_debugfs_root) {
666 devp->debugfs_root = debugfs_create_dir(
667 pci_name(devp->rdev.lldi.pdev),
674 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
677 static int vers_printed;
681 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
684 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
686 ctx = ERR_PTR(-ENOMEM);
691 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
692 __func__, pci_name(ctx->lldi.pdev),
693 ctx->lldi.nchan, ctx->lldi.nrxq,
694 ctx->lldi.ntxq, ctx->lldi.nports);
696 mutex_lock(&dev_mutex);
697 list_add_tail(&ctx->entry, &uld_ctx_list);
698 mutex_unlock(&dev_mutex);
700 for (i = 0; i < ctx->lldi.nrxq; i++)
701 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
706 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
713 * Allocate space for cpl_pass_accept_req which will be synthesized by
714 * driver. Once the driver synthesizes the request the skb will go
715 * through the regular cpl_pass_accept_req processing.
716 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
719 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
720 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
724 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
725 sizeof(struct rss_header) - pktshift);
728 * This skb will contain:
729 * rss_header from the rspq descriptor (1 flit)
730 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
731 * space for the difference between the size of an
732 * rx_pkt and pass_accept_req cpl (1 flit)
733 * the packet data from the gl
735 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
736 sizeof(struct rss_header));
737 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
738 sizeof(struct cpl_pass_accept_req),
740 gl->tot_len - pktshift);
744 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
747 unsigned int opcode = *(u8 *)rsp;
750 if (opcode != CPL_RX_PKT)
753 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
757 if (c4iw_handlers[opcode] == NULL) {
758 pr_info("%s no handler opcode 0x%x...\n", __func__,
763 c4iw_handlers[opcode](dev, skb);
769 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
770 const struct pkt_gl *gl)
772 struct uld_ctx *ctx = handle;
773 struct c4iw_dev *dev = ctx->dev;
778 /* omit RSS and rsp_ctrl at end of descriptor */
779 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
781 skb = alloc_skb(256, GFP_ATOMIC);
785 skb_copy_to_linear_data(skb, &rsp[1], len);
786 } else if (gl == CXGB4_MSG_AN) {
787 const struct rsp_ctrl *rc = (void *)rsp;
789 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
790 c4iw_ev_handler(dev, qid);
792 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
793 if (recv_rx_pkt(dev, gl, rsp))
796 pr_info("%s: unexpected FL contents at %p, " \
797 "RSS %#llx, FL %#llx, len %u\n",
798 pci_name(ctx->lldi.pdev), gl->va,
799 (unsigned long long)be64_to_cpu(*rsp),
800 (unsigned long long)be64_to_cpu(
801 *(__force __be64 *)gl->va),
806 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
812 if (c4iw_handlers[opcode])
813 c4iw_handlers[opcode](dev, skb);
815 pr_info("%s no handler opcode 0x%x...\n", __func__,
823 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
825 struct uld_ctx *ctx = handle;
827 PDBG("%s new_state %u\n", __func__, new_state);
830 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
834 ctx->dev = c4iw_alloc(&ctx->lldi);
835 if (IS_ERR(ctx->dev)) {
837 "%s: initialization failed: %ld\n",
838 pci_name(ctx->lldi.pdev),
843 ret = c4iw_register_device(ctx->dev);
846 "%s: RDMA registration failed: %d\n",
847 pci_name(ctx->lldi.pdev), ret);
852 case CXGB4_STATE_DOWN:
853 printk(KERN_INFO MOD "%s: Down\n",
854 pci_name(ctx->lldi.pdev));
858 case CXGB4_STATE_START_RECOVERY:
859 printk(KERN_INFO MOD "%s: Fatal Error\n",
860 pci_name(ctx->lldi.pdev));
862 struct ib_event event;
864 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
865 memset(&event, 0, sizeof event);
866 event.event = IB_EVENT_DEVICE_FATAL;
867 event.device = &ctx->dev->ibdev;
868 ib_dispatch_event(&event);
872 case CXGB4_STATE_DETACH:
873 printk(KERN_INFO MOD "%s: Detach\n",
874 pci_name(ctx->lldi.pdev));
882 static int disable_qp_db(int id, void *p, void *data)
884 struct c4iw_qp *qp = p;
886 t4_disable_wq_db(&qp->wq);
890 static void stop_queues(struct uld_ctx *ctx)
892 spin_lock_irq(&ctx->dev->lock);
893 if (ctx->dev->db_state == NORMAL) {
894 ctx->dev->rdev.stats.db_state_transitions++;
895 ctx->dev->db_state = FLOW_CONTROL;
896 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
898 spin_unlock_irq(&ctx->dev->lock);
901 static int enable_qp_db(int id, void *p, void *data)
903 struct c4iw_qp *qp = p;
905 t4_enable_wq_db(&qp->wq);
909 static void resume_queues(struct uld_ctx *ctx)
911 spin_lock_irq(&ctx->dev->lock);
912 if (ctx->dev->qpcnt <= db_fc_threshold &&
913 ctx->dev->db_state == FLOW_CONTROL) {
914 ctx->dev->db_state = NORMAL;
915 ctx->dev->rdev.stats.db_state_transitions++;
916 idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
918 spin_unlock_irq(&ctx->dev->lock);
923 struct c4iw_qp **qps;
926 static int add_and_ref_qp(int id, void *p, void *data)
928 struct qp_list *qp_listp = data;
929 struct c4iw_qp *qp = p;
931 c4iw_qp_add_ref(&qp->ibqp);
932 qp_listp->qps[qp_listp->idx++] = qp;
936 static int count_qps(int id, void *p, void *data)
938 unsigned *countp = data;
943 static void deref_qps(struct qp_list qp_list)
947 for (idx = 0; idx < qp_list.idx; idx++)
948 c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp);
951 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
956 for (idx = 0; idx < qp_list->idx; idx++) {
957 struct c4iw_qp *qp = qp_list->qps[idx];
959 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
961 t4_sq_host_wq_pidx(&qp->wq),
962 t4_sq_wq_size(&qp->wq));
964 printk(KERN_ERR MOD "%s: Fatal error - "
965 "DB overflow recovery failed - "
966 "error syncing SQ qid %u\n",
967 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
971 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
973 t4_rq_host_wq_pidx(&qp->wq),
974 t4_rq_wq_size(&qp->wq));
977 printk(KERN_ERR MOD "%s: Fatal error - "
978 "DB overflow recovery failed - "
979 "error syncing RQ qid %u\n",
980 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
984 /* Wait for the dbfifo to drain */
985 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
986 set_current_state(TASK_UNINTERRUPTIBLE);
987 schedule_timeout(usecs_to_jiffies(10));
992 static void recover_queues(struct uld_ctx *ctx)
995 struct qp_list qp_list;
998 /* lock out kernel db ringers */
999 mutex_lock(&ctx->dev->db_mutex);
1001 /* put all queues in to recovery mode */
1002 spin_lock_irq(&ctx->dev->lock);
1003 ctx->dev->db_state = RECOVERY;
1004 ctx->dev->rdev.stats.db_state_transitions++;
1005 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
1006 spin_unlock_irq(&ctx->dev->lock);
1008 /* slow everybody down */
1009 set_current_state(TASK_UNINTERRUPTIBLE);
1010 schedule_timeout(usecs_to_jiffies(1000));
1012 /* Wait for the dbfifo to completely drain. */
1013 while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
1014 set_current_state(TASK_UNINTERRUPTIBLE);
1015 schedule_timeout(usecs_to_jiffies(10));
1018 /* flush the SGE contexts */
1019 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1021 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1022 pci_name(ctx->lldi.pdev));
1026 /* Count active queues so we can build a list of queues to recover */
1027 spin_lock_irq(&ctx->dev->lock);
1028 idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1030 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1032 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1033 pci_name(ctx->lldi.pdev));
1034 spin_unlock_irq(&ctx->dev->lock);
1039 /* add and ref each qp so it doesn't get freed */
1040 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1042 spin_unlock_irq(&ctx->dev->lock);
1044 /* now traverse the list in a safe context to recover the db state*/
1045 recover_lost_dbs(ctx, &qp_list);
1047 /* we're almost done! deref the qps and clean up */
1051 /* Wait for the dbfifo to completely drain again */
1052 while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
1053 set_current_state(TASK_UNINTERRUPTIBLE);
1054 schedule_timeout(usecs_to_jiffies(10));
1057 /* resume the queues */
1058 spin_lock_irq(&ctx->dev->lock);
1059 if (ctx->dev->qpcnt > db_fc_threshold)
1060 ctx->dev->db_state = FLOW_CONTROL;
1062 ctx->dev->db_state = NORMAL;
1063 idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
1065 ctx->dev->rdev.stats.db_state_transitions++;
1066 spin_unlock_irq(&ctx->dev->lock);
1069 /* start up kernel db ringers again */
1070 mutex_unlock(&ctx->dev->db_mutex);
1073 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1075 struct uld_ctx *ctx = handle;
1078 case CXGB4_CONTROL_DB_FULL:
1080 mutex_lock(&ctx->dev->rdev.stats.lock);
1081 ctx->dev->rdev.stats.db_full++;
1082 mutex_unlock(&ctx->dev->rdev.stats.lock);
1084 case CXGB4_CONTROL_DB_EMPTY:
1086 mutex_lock(&ctx->dev->rdev.stats.lock);
1087 ctx->dev->rdev.stats.db_empty++;
1088 mutex_unlock(&ctx->dev->rdev.stats.lock);
1090 case CXGB4_CONTROL_DB_DROP:
1091 recover_queues(ctx);
1092 mutex_lock(&ctx->dev->rdev.stats.lock);
1093 ctx->dev->rdev.stats.db_drop++;
1094 mutex_unlock(&ctx->dev->rdev.stats.lock);
1097 printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
1098 pci_name(ctx->lldi.pdev), control);
1104 static struct cxgb4_uld_info c4iw_uld_info = {
1106 .add = c4iw_uld_add,
1107 .rx_handler = c4iw_uld_rx_handler,
1108 .state_change = c4iw_uld_state_change,
1109 .control = c4iw_uld_control,
1112 static int __init c4iw_init_module(void)
1116 err = c4iw_cm_init();
1120 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1121 if (!c4iw_debugfs_root)
1122 printk(KERN_WARNING MOD
1123 "could not create debugfs entry, continuing\n");
1125 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1130 static void __exit c4iw_exit_module(void)
1132 struct uld_ctx *ctx, *tmp;
1134 mutex_lock(&dev_mutex);
1135 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1140 mutex_unlock(&dev_mutex);
1141 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1143 debugfs_remove_recursive(c4iw_debugfs_root);
1146 module_init(c4iw_init_module);
1147 module_exit(c4iw_exit_module);