2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
37 #include <rdma/ib_verbs.h>
41 #define DRV_VERSION "0.1"
43 MODULE_AUTHOR("Steve Wise");
44 MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
45 MODULE_LICENSE("Dual BSD/GPL");
46 MODULE_VERSION(DRV_VERSION);
48 static int allow_db_fc_on_t5;
49 module_param(allow_db_fc_on_t5, int, 0644);
50 MODULE_PARM_DESC(allow_db_fc_on_t5,
51 "Allow DB Flow Control on T5 (default = 0)");
53 static int allow_db_coalescing_on_t5;
54 module_param(allow_db_coalescing_on_t5, int, 0644);
55 MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)");
59 struct list_head entry;
60 struct cxgb4_lld_info lldi;
64 static LIST_HEAD(uld_ctx_list);
65 static DEFINE_MUTEX(dev_mutex);
67 #define DB_FC_RESUME_SIZE 64
68 #define DB_FC_RESUME_DELAY 1
69 #define DB_FC_DRAIN_THRESH 0
71 static struct dentry *c4iw_debugfs_root;
73 struct c4iw_debugfs_data {
74 struct c4iw_dev *devp;
80 static int count_idrs(int id, void *p, void *data)
84 *countp = *countp + 1;
88 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
91 struct c4iw_debugfs_data *d = file->private_data;
93 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
96 static int dump_qp(int id, void *p, void *data)
98 struct c4iw_qp *qp = p;
99 struct c4iw_debugfs_data *qpd = data;
103 if (id != qp->wq.sq.qid)
106 space = qpd->bufsize - qpd->pos - 1;
111 if (qp->ep->com.local_addr.ss_family == AF_INET) {
112 struct sockaddr_in *lsin = (struct sockaddr_in *)
113 &qp->ep->com.local_addr;
114 struct sockaddr_in *rsin = (struct sockaddr_in *)
115 &qp->ep->com.remote_addr;
117 cc = snprintf(qpd->buf + qpd->pos, space,
118 "rc qp sq id %u rq id %u state %u "
119 "onchip %u ep tid %u state %u "
120 "%pI4:%u->%pI4:%u\n",
121 qp->wq.sq.qid, qp->wq.rq.qid,
123 qp->wq.sq.flags & T4_SQ_ONCHIP,
124 qp->ep->hwtid, (int)qp->ep->com.state,
125 &lsin->sin_addr, ntohs(lsin->sin_port),
126 &rsin->sin_addr, ntohs(rsin->sin_port));
128 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
129 &qp->ep->com.local_addr;
130 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
131 &qp->ep->com.remote_addr;
133 cc = snprintf(qpd->buf + qpd->pos, space,
134 "rc qp sq id %u rq id %u state %u "
135 "onchip %u ep tid %u state %u "
136 "%pI6:%u->%pI6:%u\n",
137 qp->wq.sq.qid, qp->wq.rq.qid,
139 qp->wq.sq.flags & T4_SQ_ONCHIP,
140 qp->ep->hwtid, (int)qp->ep->com.state,
142 ntohs(lsin6->sin6_port),
144 ntohs(rsin6->sin6_port));
147 cc = snprintf(qpd->buf + qpd->pos, space,
148 "qp sq id %u rq id %u state %u onchip %u\n",
149 qp->wq.sq.qid, qp->wq.rq.qid,
151 qp->wq.sq.flags & T4_SQ_ONCHIP);
157 static int qp_release(struct inode *inode, struct file *file)
159 struct c4iw_debugfs_data *qpd = file->private_data;
161 printk(KERN_INFO "%s null qpd?\n", __func__);
169 static int qp_open(struct inode *inode, struct file *file)
171 struct c4iw_debugfs_data *qpd;
175 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
180 qpd->devp = inode->i_private;
183 spin_lock_irq(&qpd->devp->lock);
184 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
185 spin_unlock_irq(&qpd->devp->lock);
187 qpd->bufsize = count * 128;
188 qpd->buf = vmalloc(qpd->bufsize);
194 spin_lock_irq(&qpd->devp->lock);
195 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
196 spin_unlock_irq(&qpd->devp->lock);
198 qpd->buf[qpd->pos++] = 0;
199 file->private_data = qpd;
207 static const struct file_operations qp_debugfs_fops = {
208 .owner = THIS_MODULE,
210 .release = qp_release,
211 .read = debugfs_read,
212 .llseek = default_llseek,
215 static int dump_stag(int id, void *p, void *data)
217 struct c4iw_debugfs_data *stagd = data;
221 space = stagd->bufsize - stagd->pos - 1;
225 cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
231 static int stag_release(struct inode *inode, struct file *file)
233 struct c4iw_debugfs_data *stagd = file->private_data;
235 printk(KERN_INFO "%s null stagd?\n", __func__);
243 static int stag_open(struct inode *inode, struct file *file)
245 struct c4iw_debugfs_data *stagd;
249 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
254 stagd->devp = inode->i_private;
257 spin_lock_irq(&stagd->devp->lock);
258 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
259 spin_unlock_irq(&stagd->devp->lock);
261 stagd->bufsize = count * sizeof("0x12345678\n");
262 stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
268 spin_lock_irq(&stagd->devp->lock);
269 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
270 spin_unlock_irq(&stagd->devp->lock);
272 stagd->buf[stagd->pos++] = 0;
273 file->private_data = stagd;
281 static const struct file_operations stag_debugfs_fops = {
282 .owner = THIS_MODULE,
284 .release = stag_release,
285 .read = debugfs_read,
286 .llseek = default_llseek,
289 static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
291 static int stats_show(struct seq_file *seq, void *v)
293 struct c4iw_dev *dev = seq->private;
295 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
297 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
298 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
299 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
300 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
301 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
302 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
303 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
304 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
305 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
306 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
307 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
308 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
309 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
310 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
311 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
312 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
313 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
314 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
315 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
316 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
317 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
318 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
319 db_state_str[dev->db_state],
320 dev->rdev.stats.db_state_transitions,
321 dev->rdev.stats.db_fc_interruptions);
322 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
323 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
324 dev->rdev.stats.act_ofld_conn_fails);
325 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
326 dev->rdev.stats.pas_ofld_conn_fails);
330 static int stats_open(struct inode *inode, struct file *file)
332 return single_open(file, stats_show, inode->i_private);
335 static ssize_t stats_clear(struct file *file, const char __user *buf,
336 size_t count, loff_t *pos)
338 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
340 mutex_lock(&dev->rdev.stats.lock);
341 dev->rdev.stats.pd.max = 0;
342 dev->rdev.stats.pd.fail = 0;
343 dev->rdev.stats.qid.max = 0;
344 dev->rdev.stats.qid.fail = 0;
345 dev->rdev.stats.stag.max = 0;
346 dev->rdev.stats.stag.fail = 0;
347 dev->rdev.stats.pbl.max = 0;
348 dev->rdev.stats.pbl.fail = 0;
349 dev->rdev.stats.rqt.max = 0;
350 dev->rdev.stats.rqt.fail = 0;
351 dev->rdev.stats.ocqp.max = 0;
352 dev->rdev.stats.ocqp.fail = 0;
353 dev->rdev.stats.db_full = 0;
354 dev->rdev.stats.db_empty = 0;
355 dev->rdev.stats.db_drop = 0;
356 dev->rdev.stats.db_state_transitions = 0;
357 dev->rdev.stats.tcam_full = 0;
358 dev->rdev.stats.act_ofld_conn_fails = 0;
359 dev->rdev.stats.pas_ofld_conn_fails = 0;
360 mutex_unlock(&dev->rdev.stats.lock);
364 static const struct file_operations stats_debugfs_fops = {
365 .owner = THIS_MODULE,
367 .release = single_release,
370 .write = stats_clear,
373 static int dump_ep(int id, void *p, void *data)
375 struct c4iw_ep *ep = p;
376 struct c4iw_debugfs_data *epd = data;
380 space = epd->bufsize - epd->pos - 1;
384 if (ep->com.local_addr.ss_family == AF_INET) {
385 struct sockaddr_in *lsin = (struct sockaddr_in *)
387 struct sockaddr_in *rsin = (struct sockaddr_in *)
388 &ep->com.remote_addr;
390 cc = snprintf(epd->buf + epd->pos, space,
391 "ep %p cm_id %p qp %p state %d flags 0x%lx "
392 "history 0x%lx hwtid %d atid %d "
393 "%pI4:%d <-> %pI4:%d\n",
394 ep, ep->com.cm_id, ep->com.qp,
395 (int)ep->com.state, ep->com.flags,
396 ep->com.history, ep->hwtid, ep->atid,
397 &lsin->sin_addr, ntohs(lsin->sin_port),
398 &rsin->sin_addr, ntohs(rsin->sin_port));
400 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
402 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
403 &ep->com.remote_addr;
405 cc = snprintf(epd->buf + epd->pos, space,
406 "ep %p cm_id %p qp %p state %d flags 0x%lx "
407 "history 0x%lx hwtid %d atid %d "
408 "%pI6:%d <-> %pI6:%d\n",
409 ep, ep->com.cm_id, ep->com.qp,
410 (int)ep->com.state, ep->com.flags,
411 ep->com.history, ep->hwtid, ep->atid,
412 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
413 &rsin6->sin6_addr, ntohs(rsin6->sin6_port));
420 static int dump_listen_ep(int id, void *p, void *data)
422 struct c4iw_listen_ep *ep = p;
423 struct c4iw_debugfs_data *epd = data;
427 space = epd->bufsize - epd->pos - 1;
431 if (ep->com.local_addr.ss_family == AF_INET) {
432 struct sockaddr_in *lsin = (struct sockaddr_in *)
435 cc = snprintf(epd->buf + epd->pos, space,
436 "ep %p cm_id %p state %d flags 0x%lx stid %d "
437 "backlog %d %pI4:%d\n",
438 ep, ep->com.cm_id, (int)ep->com.state,
439 ep->com.flags, ep->stid, ep->backlog,
440 &lsin->sin_addr, ntohs(lsin->sin_port));
442 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
445 cc = snprintf(epd->buf + epd->pos, space,
446 "ep %p cm_id %p state %d flags 0x%lx stid %d "
447 "backlog %d %pI6:%d\n",
448 ep, ep->com.cm_id, (int)ep->com.state,
449 ep->com.flags, ep->stid, ep->backlog,
450 &lsin6->sin6_addr, ntohs(lsin6->sin6_port));
457 static int ep_release(struct inode *inode, struct file *file)
459 struct c4iw_debugfs_data *epd = file->private_data;
461 pr_info("%s null qpd?\n", __func__);
469 static int ep_open(struct inode *inode, struct file *file)
471 struct c4iw_debugfs_data *epd;
475 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
480 epd->devp = inode->i_private;
483 spin_lock_irq(&epd->devp->lock);
484 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
485 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
486 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
487 spin_unlock_irq(&epd->devp->lock);
489 epd->bufsize = count * 160;
490 epd->buf = vmalloc(epd->bufsize);
496 spin_lock_irq(&epd->devp->lock);
497 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
498 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
499 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
500 spin_unlock_irq(&epd->devp->lock);
502 file->private_data = epd;
510 static const struct file_operations ep_debugfs_fops = {
511 .owner = THIS_MODULE,
513 .release = ep_release,
514 .read = debugfs_read,
517 static int setup_debugfs(struct c4iw_dev *devp)
521 if (!devp->debugfs_root)
524 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
525 (void *)devp, &qp_debugfs_fops);
526 if (de && de->d_inode)
527 de->d_inode->i_size = 4096;
529 de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
530 (void *)devp, &stag_debugfs_fops);
531 if (de && de->d_inode)
532 de->d_inode->i_size = 4096;
534 de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
535 (void *)devp, &stats_debugfs_fops);
536 if (de && de->d_inode)
537 de->d_inode->i_size = 4096;
539 de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
540 (void *)devp, &ep_debugfs_fops);
541 if (de && de->d_inode)
542 de->d_inode->i_size = 4096;
547 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
548 struct c4iw_dev_ucontext *uctx)
550 struct list_head *pos, *nxt;
551 struct c4iw_qid_list *entry;
553 mutex_lock(&uctx->lock);
554 list_for_each_safe(pos, nxt, &uctx->qpids) {
555 entry = list_entry(pos, struct c4iw_qid_list, entry);
556 list_del_init(&entry->entry);
557 if (!(entry->qid & rdev->qpmask)) {
558 c4iw_put_resource(&rdev->resource.qid_table,
560 mutex_lock(&rdev->stats.lock);
561 rdev->stats.qid.cur -= rdev->qpmask + 1;
562 mutex_unlock(&rdev->stats.lock);
567 list_for_each_safe(pos, nxt, &uctx->qpids) {
568 entry = list_entry(pos, struct c4iw_qid_list, entry);
569 list_del_init(&entry->entry);
572 mutex_unlock(&uctx->lock);
575 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
576 struct c4iw_dev_ucontext *uctx)
578 INIT_LIST_HEAD(&uctx->qpids);
579 INIT_LIST_HEAD(&uctx->cqids);
580 mutex_init(&uctx->lock);
583 /* Caller takes care of locking if needed */
584 static int c4iw_rdev_open(struct c4iw_rdev *rdev)
588 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
591 * qpshift is the number of bits to shift the qpid left in order
592 * to get the correct address of the doorbell for that qp.
594 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
595 rdev->qpmask = rdev->lldi.udb_density - 1;
596 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
597 rdev->cqmask = rdev->lldi.ucq_density - 1;
598 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
599 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
600 "qp qid start %u size %u cq qid start %u size %u\n",
601 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
602 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
603 rdev->lldi.vr->pbl.start,
604 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
605 rdev->lldi.vr->rq.size,
606 rdev->lldi.vr->qp.start,
607 rdev->lldi.vr->qp.size,
608 rdev->lldi.vr->cq.start,
609 rdev->lldi.vr->cq.size);
610 PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
611 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
612 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
613 (u64)pci_resource_start(rdev->lldi.pdev, 2),
616 rdev->qpshift, rdev->qpmask,
617 rdev->cqshift, rdev->cqmask);
619 if (c4iw_num_stags(rdev) == 0) {
624 rdev->stats.pd.total = T4_MAX_NUM_PD;
625 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
626 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
627 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
628 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
629 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
631 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
633 printk(KERN_ERR MOD "error %d initializing resources\n", err);
636 err = c4iw_pblpool_create(rdev);
638 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
641 err = c4iw_rqtpool_create(rdev);
643 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
646 err = c4iw_ocqp_pool_create(rdev);
648 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
651 rdev->status_page = (struct t4_dev_status_page *)
652 __get_free_page(GFP_KERNEL);
653 if (!rdev->status_page) {
654 pr_err(MOD "error allocating status page\n");
659 c4iw_rqtpool_destroy(rdev);
661 c4iw_pblpool_destroy(rdev);
663 c4iw_destroy_resource(&rdev->resource);
668 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
670 free_page((unsigned long)rdev->status_page);
671 c4iw_pblpool_destroy(rdev);
672 c4iw_rqtpool_destroy(rdev);
673 c4iw_destroy_resource(&rdev->resource);
676 static void c4iw_dealloc(struct uld_ctx *ctx)
678 c4iw_rdev_close(&ctx->dev->rdev);
679 idr_destroy(&ctx->dev->cqidr);
680 idr_destroy(&ctx->dev->qpidr);
681 idr_destroy(&ctx->dev->mmidr);
682 idr_destroy(&ctx->dev->hwtid_idr);
683 idr_destroy(&ctx->dev->stid_idr);
684 idr_destroy(&ctx->dev->atid_idr);
685 iounmap(ctx->dev->rdev.oc_mw_kva);
686 ib_dealloc_device(&ctx->dev->ibdev);
690 static void c4iw_remove(struct uld_ctx *ctx)
692 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
693 c4iw_unregister_device(ctx->dev);
697 static int rdma_supported(const struct cxgb4_lld_info *infop)
699 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
700 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
701 infop->vr->cq.size > 0;
704 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
706 struct c4iw_dev *devp;
709 if (!rdma_supported(infop)) {
710 printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
711 pci_name(infop->pdev));
712 return ERR_PTR(-ENOSYS);
714 if (!ocqp_supported(infop))
715 pr_info("%s: On-Chip Queues not supported on this device.\n",
716 pci_name(infop->pdev));
718 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
720 printk(KERN_ERR MOD "Cannot allocate ib device\n");
721 return ERR_PTR(-ENOMEM);
723 devp->rdev.lldi = *infop;
725 devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
726 (pci_resource_len(devp->rdev.lldi.pdev, 2) -
727 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
728 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
729 devp->rdev.lldi.vr->ocq.size);
731 PDBG(KERN_INFO MOD "ocq memory: "
732 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
733 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
734 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
736 ret = c4iw_rdev_open(&devp->rdev);
738 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
739 ib_dealloc_device(&devp->ibdev);
743 idr_init(&devp->cqidr);
744 idr_init(&devp->qpidr);
745 idr_init(&devp->mmidr);
746 idr_init(&devp->hwtid_idr);
747 idr_init(&devp->stid_idr);
748 idr_init(&devp->atid_idr);
749 spin_lock_init(&devp->lock);
750 mutex_init(&devp->rdev.stats.lock);
751 mutex_init(&devp->db_mutex);
752 INIT_LIST_HEAD(&devp->db_fc_list);
754 if (c4iw_debugfs_root) {
755 devp->debugfs_root = debugfs_create_dir(
756 pci_name(devp->rdev.lldi.pdev),
763 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
766 static int vers_printed;
770 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
773 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
775 ctx = ERR_PTR(-ENOMEM);
780 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
781 __func__, pci_name(ctx->lldi.pdev),
782 ctx->lldi.nchan, ctx->lldi.nrxq,
783 ctx->lldi.ntxq, ctx->lldi.nports);
785 mutex_lock(&dev_mutex);
786 list_add_tail(&ctx->entry, &uld_ctx_list);
787 mutex_unlock(&dev_mutex);
789 for (i = 0; i < ctx->lldi.nrxq; i++)
790 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
795 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
802 * Allocate space for cpl_pass_accept_req which will be synthesized by
803 * driver. Once the driver synthesizes the request the skb will go
804 * through the regular cpl_pass_accept_req processing.
805 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
808 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
809 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
813 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
814 sizeof(struct rss_header) - pktshift);
817 * This skb will contain:
818 * rss_header from the rspq descriptor (1 flit)
819 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
820 * space for the difference between the size of an
821 * rx_pkt and pass_accept_req cpl (1 flit)
822 * the packet data from the gl
824 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
825 sizeof(struct rss_header));
826 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
827 sizeof(struct cpl_pass_accept_req),
829 gl->tot_len - pktshift);
833 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
836 unsigned int opcode = *(u8 *)rsp;
839 if (opcode != CPL_RX_PKT)
842 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
846 if (c4iw_handlers[opcode] == NULL) {
847 pr_info("%s no handler opcode 0x%x...\n", __func__,
852 c4iw_handlers[opcode](dev, skb);
858 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
859 const struct pkt_gl *gl)
861 struct uld_ctx *ctx = handle;
862 struct c4iw_dev *dev = ctx->dev;
867 /* omit RSS and rsp_ctrl at end of descriptor */
868 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
870 skb = alloc_skb(256, GFP_ATOMIC);
874 skb_copy_to_linear_data(skb, &rsp[1], len);
875 } else if (gl == CXGB4_MSG_AN) {
876 const struct rsp_ctrl *rc = (void *)rsp;
878 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
879 c4iw_ev_handler(dev, qid);
881 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
882 if (recv_rx_pkt(dev, gl, rsp))
885 pr_info("%s: unexpected FL contents at %p, " \
886 "RSS %#llx, FL %#llx, len %u\n",
887 pci_name(ctx->lldi.pdev), gl->va,
888 (unsigned long long)be64_to_cpu(*rsp),
889 (unsigned long long)be64_to_cpu(
890 *(__force __be64 *)gl->va),
895 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
901 if (c4iw_handlers[opcode]) {
902 c4iw_handlers[opcode](dev, skb);
904 pr_info("%s no handler opcode 0x%x...\n", __func__,
914 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
916 struct uld_ctx *ctx = handle;
918 PDBG("%s new_state %u\n", __func__, new_state);
921 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
925 ctx->dev = c4iw_alloc(&ctx->lldi);
926 if (IS_ERR(ctx->dev)) {
928 "%s: initialization failed: %ld\n",
929 pci_name(ctx->lldi.pdev),
934 ret = c4iw_register_device(ctx->dev);
937 "%s: RDMA registration failed: %d\n",
938 pci_name(ctx->lldi.pdev), ret);
943 case CXGB4_STATE_DOWN:
944 printk(KERN_INFO MOD "%s: Down\n",
945 pci_name(ctx->lldi.pdev));
949 case CXGB4_STATE_START_RECOVERY:
950 printk(KERN_INFO MOD "%s: Fatal Error\n",
951 pci_name(ctx->lldi.pdev));
953 struct ib_event event;
955 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
956 memset(&event, 0, sizeof event);
957 event.event = IB_EVENT_DEVICE_FATAL;
958 event.device = &ctx->dev->ibdev;
959 ib_dispatch_event(&event);
963 case CXGB4_STATE_DETACH:
964 printk(KERN_INFO MOD "%s: Detach\n",
965 pci_name(ctx->lldi.pdev));
973 static int disable_qp_db(int id, void *p, void *data)
975 struct c4iw_qp *qp = p;
977 t4_disable_wq_db(&qp->wq);
981 static void stop_queues(struct uld_ctx *ctx)
985 spin_lock_irqsave(&ctx->dev->lock, flags);
986 ctx->dev->rdev.stats.db_state_transitions++;
987 ctx->dev->db_state = STOPPED;
988 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
989 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
991 ctx->dev->rdev.status_page->db_off = 1;
992 spin_unlock_irqrestore(&ctx->dev->lock, flags);
995 static int enable_qp_db(int id, void *p, void *data)
997 struct c4iw_qp *qp = p;
999 t4_enable_wq_db(&qp->wq);
1003 static void resume_rc_qp(struct c4iw_qp *qp)
1005 spin_lock(&qp->lock);
1006 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc);
1007 qp->wq.sq.wq_pidx_inc = 0;
1008 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc);
1009 qp->wq.rq.wq_pidx_inc = 0;
1010 spin_unlock(&qp->lock);
1013 static void resume_a_chunk(struct uld_ctx *ctx)
1018 for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1019 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1021 list_del_init(&qp->db_fc_entry);
1023 if (list_empty(&ctx->dev->db_fc_list))
1028 static void resume_queues(struct uld_ctx *ctx)
1030 spin_lock_irq(&ctx->dev->lock);
1031 if (ctx->dev->db_state != STOPPED)
1033 ctx->dev->db_state = FLOW_CONTROL;
1035 if (list_empty(&ctx->dev->db_fc_list)) {
1036 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1037 ctx->dev->db_state = NORMAL;
1038 ctx->dev->rdev.stats.db_state_transitions++;
1039 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1040 idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1043 ctx->dev->rdev.status_page->db_off = 0;
1047 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1048 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1049 DB_FC_DRAIN_THRESH)) {
1050 resume_a_chunk(ctx);
1052 if (!list_empty(&ctx->dev->db_fc_list)) {
1053 spin_unlock_irq(&ctx->dev->lock);
1054 if (DB_FC_RESUME_DELAY) {
1055 set_current_state(TASK_UNINTERRUPTIBLE);
1056 schedule_timeout(DB_FC_RESUME_DELAY);
1058 spin_lock_irq(&ctx->dev->lock);
1059 if (ctx->dev->db_state != FLOW_CONTROL)
1065 if (ctx->dev->db_state != NORMAL)
1066 ctx->dev->rdev.stats.db_fc_interruptions++;
1067 spin_unlock_irq(&ctx->dev->lock);
1072 struct c4iw_qp **qps;
1075 static int add_and_ref_qp(int id, void *p, void *data)
1077 struct qp_list *qp_listp = data;
1078 struct c4iw_qp *qp = p;
1080 c4iw_qp_add_ref(&qp->ibqp);
1081 qp_listp->qps[qp_listp->idx++] = qp;
1085 static int count_qps(int id, void *p, void *data)
1087 unsigned *countp = data;
1092 static void deref_qps(struct qp_list *qp_list)
1096 for (idx = 0; idx < qp_list->idx; idx++)
1097 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
1100 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1105 for (idx = 0; idx < qp_list->idx; idx++) {
1106 struct c4iw_qp *qp = qp_list->qps[idx];
1108 spin_lock_irq(&qp->rhp->lock);
1109 spin_lock(&qp->lock);
1110 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1112 t4_sq_host_wq_pidx(&qp->wq),
1113 t4_sq_wq_size(&qp->wq));
1115 pr_err(KERN_ERR MOD "%s: Fatal error - "
1116 "DB overflow recovery failed - "
1117 "error syncing SQ qid %u\n",
1118 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
1119 spin_unlock(&qp->lock);
1120 spin_unlock_irq(&qp->rhp->lock);
1123 qp->wq.sq.wq_pidx_inc = 0;
1125 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1127 t4_rq_host_wq_pidx(&qp->wq),
1128 t4_rq_wq_size(&qp->wq));
1131 pr_err(KERN_ERR MOD "%s: Fatal error - "
1132 "DB overflow recovery failed - "
1133 "error syncing RQ qid %u\n",
1134 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
1135 spin_unlock(&qp->lock);
1136 spin_unlock_irq(&qp->rhp->lock);
1139 qp->wq.rq.wq_pidx_inc = 0;
1140 spin_unlock(&qp->lock);
1141 spin_unlock_irq(&qp->rhp->lock);
1143 /* Wait for the dbfifo to drain */
1144 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1145 set_current_state(TASK_UNINTERRUPTIBLE);
1146 schedule_timeout(usecs_to_jiffies(10));
1151 static void recover_queues(struct uld_ctx *ctx)
1154 struct qp_list qp_list;
1157 /* slow everybody down */
1158 set_current_state(TASK_UNINTERRUPTIBLE);
1159 schedule_timeout(usecs_to_jiffies(1000));
1161 /* flush the SGE contexts */
1162 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1164 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1165 pci_name(ctx->lldi.pdev));
1169 /* Count active queues so we can build a list of queues to recover */
1170 spin_lock_irq(&ctx->dev->lock);
1171 WARN_ON(ctx->dev->db_state != STOPPED);
1172 ctx->dev->db_state = RECOVERY;
1173 idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1175 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1177 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1178 pci_name(ctx->lldi.pdev));
1179 spin_unlock_irq(&ctx->dev->lock);
1184 /* add and ref each qp so it doesn't get freed */
1185 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1187 spin_unlock_irq(&ctx->dev->lock);
1189 /* now traverse the list in a safe context to recover the db state*/
1190 recover_lost_dbs(ctx, &qp_list);
1192 /* we're almost done! deref the qps and clean up */
1193 deref_qps(&qp_list);
1196 spin_lock_irq(&ctx->dev->lock);
1197 WARN_ON(ctx->dev->db_state != RECOVERY);
1198 ctx->dev->db_state = STOPPED;
1199 spin_unlock_irq(&ctx->dev->lock);
1202 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1204 struct uld_ctx *ctx = handle;
1207 case CXGB4_CONTROL_DB_FULL:
1209 ctx->dev->rdev.stats.db_full++;
1211 case CXGB4_CONTROL_DB_EMPTY:
1213 mutex_lock(&ctx->dev->rdev.stats.lock);
1214 ctx->dev->rdev.stats.db_empty++;
1215 mutex_unlock(&ctx->dev->rdev.stats.lock);
1217 case CXGB4_CONTROL_DB_DROP:
1218 recover_queues(ctx);
1219 mutex_lock(&ctx->dev->rdev.stats.lock);
1220 ctx->dev->rdev.stats.db_drop++;
1221 mutex_unlock(&ctx->dev->rdev.stats.lock);
1224 printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
1225 pci_name(ctx->lldi.pdev), control);
1231 static struct cxgb4_uld_info c4iw_uld_info = {
1233 .add = c4iw_uld_add,
1234 .rx_handler = c4iw_uld_rx_handler,
1235 .state_change = c4iw_uld_state_change,
1236 .control = c4iw_uld_control,
1239 static int __init c4iw_init_module(void)
1243 err = c4iw_cm_init();
1247 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1248 if (!c4iw_debugfs_root)
1249 printk(KERN_WARNING MOD
1250 "could not create debugfs entry, continuing\n");
1252 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1257 static void __exit c4iw_exit_module(void)
1259 struct uld_ctx *ctx, *tmp;
1261 mutex_lock(&dev_mutex);
1262 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1267 mutex_unlock(&dev_mutex);
1268 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1270 debugfs_remove_recursive(c4iw_debugfs_root);
1273 module_init(c4iw_init_module);
1274 module_exit(c4iw_exit_module);