2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
4 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/vmalloc.h>
38 #include <linux/delay.h>
39 #include <linux/idr.h>
40 #include <linux/module.h>
43 #include "qib_common.h"
46 * min buffers we want to have per context, after driver
48 #define QIB_MIN_USER_CTXT_BUFCNT 7
50 #define QLOGIC_IB_R_SOFTWARE_MASK 0xFF
51 #define QLOGIC_IB_R_SOFTWARE_SHIFT 24
52 #define QLOGIC_IB_R_EMULATOR_MASK (1ULL<<62)
55 * Number of ctxts we are configured to use (to allow for more pio
56 * buffers per ctxt, etc.) Zero means use chip value.
59 module_param_named(cfgctxts, qib_cfgctxts, ushort, S_IRUGO);
60 MODULE_PARM_DESC(cfgctxts, "Set max number of contexts to use");
63 * If set, do not write to any regs if avoidable, hack to allow
64 * check for deranged default register values.
67 module_param_named(mini_init, qib_mini_init, ushort, S_IRUGO);
68 MODULE_PARM_DESC(mini_init, "If set, do minimal diag init");
70 unsigned qib_n_krcv_queues;
71 module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
72 MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
75 * qib_wc_pat parameter:
78 * If PAT initialization fails, code reverts back to MTRR
80 unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
81 module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
82 MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
84 struct workqueue_struct *qib_cq_wq;
86 static void verify_interrupt(unsigned long);
88 static struct idr qib_unit_table;
89 u32 qib_cpulist_count;
90 unsigned long *qib_cpulist;
92 /* set number of contexts we'll actually use */
93 void qib_set_ctxtcnt(struct qib_devdata *dd)
96 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
97 if (dd->cfgctxts > dd->ctxtcnt)
98 dd->cfgctxts = dd->ctxtcnt;
99 } else if (qib_cfgctxts < dd->num_pports)
100 dd->cfgctxts = dd->ctxtcnt;
101 else if (qib_cfgctxts <= dd->ctxtcnt)
102 dd->cfgctxts = qib_cfgctxts;
104 dd->cfgctxts = dd->ctxtcnt;
105 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 :
106 dd->cfgctxts - dd->first_user_ctxt;
110 * Common code for creating the receive context array.
112 int qib_create_ctxts(struct qib_devdata *dd)
118 * Allocate full ctxtcnt array, rather than just cfgctxts, because
119 * cleanup iterates across all possible ctxts.
121 dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
123 qib_dev_err(dd, "Unable to allocate ctxtdata array, "
129 /* create (one or more) kctxt */
130 for (i = 0; i < dd->first_user_ctxt; ++i) {
131 struct qib_pportdata *ppd;
132 struct qib_ctxtdata *rcd;
134 if (dd->skip_kctxt_mask & (1 << i))
137 ppd = dd->pport + (i % dd->num_pports);
138 rcd = qib_create_ctxtdata(ppd, i);
140 qib_dev_err(dd, "Unable to allocate ctxtdata"
141 " for Kernel ctxt, failing\n");
145 rcd->pkeys[0] = QIB_DEFAULT_P_KEY;
154 * Common code for user and kernel context setup.
156 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
158 struct qib_devdata *dd = ppd->dd;
159 struct qib_ctxtdata *rcd;
161 rcd = kzalloc(sizeof(*rcd), GFP_KERNEL);
163 INIT_LIST_HEAD(&rcd->qp_wait_list);
170 dd->f_init_ctxt(rcd);
173 * To avoid wasting a lot of memory, we allocate 32KB chunks
174 * of physically contiguous memory, advance through it until
175 * used up and then allocate more. Of course, we need
176 * memory to store those extra pointers, now. 32KB seems to
177 * be the most that is "safe" under memory pressure
178 * (creating large files and then copying them over
179 * NFS while doing lots of MPI jobs). The OOM killer can
180 * get invoked, even though we say we can sleep and this can
181 * cause significant system problems....
183 rcd->rcvegrbuf_size = 0x8000;
184 rcd->rcvegrbufs_perchunk =
185 rcd->rcvegrbuf_size / dd->rcvegrbufsize;
186 rcd->rcvegrbuf_chunks = (rcd->rcvegrcnt +
187 rcd->rcvegrbufs_perchunk - 1) /
188 rcd->rcvegrbufs_perchunk;
189 BUG_ON(!is_power_of_2(rcd->rcvegrbufs_perchunk));
190 rcd->rcvegrbufs_perchunk_shift =
191 ilog2(rcd->rcvegrbufs_perchunk);
197 * Common code for initializing the physical port structure.
199 void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
203 ppd->hw_pidx = hw_pidx;
204 ppd->port = port; /* IB port number, not index */
206 spin_lock_init(&ppd->sdma_lock);
207 spin_lock_init(&ppd->lflags_lock);
208 init_waitqueue_head(&ppd->state_wait);
210 init_timer(&ppd->symerr_clear_timer);
211 ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
212 ppd->symerr_clear_timer.data = (unsigned long)ppd;
215 static int init_pioavailregs(struct qib_devdata *dd)
220 dd->pioavailregs_dma = dma_alloc_coherent(
221 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
223 if (!dd->pioavailregs_dma) {
224 qib_dev_err(dd, "failed to allocate PIOavail reg area "
231 * We really want L2 cache aligned, but for current CPUs of
232 * interest, they are the same.
234 status_page = (u64 *)
235 ((char *) dd->pioavailregs_dma +
236 ((2 * L1_CACHE_BYTES +
237 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
238 /* device status comes first, for backwards compatibility */
239 dd->devstatusp = status_page;
241 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
242 dd->pport[pidx].statusp = status_page;
247 * Setup buffer to hold freeze and other messages, accessible to
248 * apps, following statusp. This is per-unit, not per port.
250 dd->freezemsg = (char *) status_page;
252 /* length of msg buffer is "whatever is left" */
253 ret = (char *) status_page - (char *) dd->pioavailregs_dma;
254 dd->freezelen = PAGE_SIZE - ret;
263 * init_shadow_tids - allocate the shadow TID array
264 * @dd: the qlogic_ib device
266 * allocate the shadow TID array, so we can qib_munlock previous
267 * entries. It may make more sense to move the pageshadow to the
268 * ctxt data structure, so we only allocate memory for ctxts actually
269 * in use, since we at 8k per ctxt, now.
270 * We don't want failures here to prevent use of the driver/chip,
271 * so no return value.
273 static void init_shadow_tids(struct qib_devdata *dd)
278 pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
280 qib_dev_err(dd, "failed to allocate shadow page * "
281 "array, no expected sends!\n");
285 addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
287 qib_dev_err(dd, "failed to allocate shadow dma handle "
288 "array, no expected sends!\n");
292 dd->pageshadow = pages;
293 dd->physshadow = addrs;
299 dd->pageshadow = NULL;
303 * Do initialization for device that is only needed on
304 * first detect, not on resets.
306 static int loadtime_init(struct qib_devdata *dd)
310 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
311 QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
312 qib_dev_err(dd, "Driver only handles version %d, "
313 "chip swversion is %d (%llx), failng\n",
315 (int)(dd->revision >>
316 QLOGIC_IB_R_SOFTWARE_SHIFT) &
317 QLOGIC_IB_R_SOFTWARE_MASK,
318 (unsigned long long) dd->revision);
323 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK)
324 qib_devinfo(dd->pcidev, "%s", dd->boardversion);
326 spin_lock_init(&dd->pioavail_lock);
327 spin_lock_init(&dd->sendctrl_lock);
328 spin_lock_init(&dd->uctxt_lock);
329 spin_lock_init(&dd->qib_diag_trans_lock);
330 spin_lock_init(&dd->eep_st_lock);
331 mutex_init(&dd->eep_lock);
336 ret = init_pioavailregs(dd);
337 init_shadow_tids(dd);
339 qib_get_eeprom_info(dd);
341 /* setup time (don't start yet) to verify we got interrupt */
342 init_timer(&dd->intrchk_timer);
343 dd->intrchk_timer.function = verify_interrupt;
344 dd->intrchk_timer.data = (unsigned long) dd;
351 * init_after_reset - re-initialize after a reset
352 * @dd: the qlogic_ib device
354 * sanity check at least some of the values after reset, and
355 * ensure no receive or transmit (explicitly, in case reset
358 static int init_after_reset(struct qib_devdata *dd)
363 * Ensure chip does no sends or receives, tail updates, or
364 * pioavail updates while we re-initialize. This is mostly
365 * for the driver data structures, not chip registers.
367 for (i = 0; i < dd->num_pports; ++i) {
369 * ctxt == -1 means "all contexts". Only really safe for
370 * _dis_abling things, as here.
372 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS |
373 QIB_RCVCTRL_INTRAVAIL_DIS |
374 QIB_RCVCTRL_TAILUPD_DIS, -1);
375 /* Redundant across ports for some, but no big deal. */
376 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS |
377 QIB_SENDCTRL_AVAIL_DIS);
383 static void enable_chip(struct qib_devdata *dd)
389 * Enable PIO send, and update of PIOavail regs to memory.
391 for (i = 0; i < dd->num_pports; ++i)
392 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB |
393 QIB_SENDCTRL_AVAIL_ENB);
395 * Enable kernel ctxts' receive and receive interrupt.
396 * Other ctxts done as user opens and inits them.
398 rcvmask = QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_INTRAVAIL_ENB;
399 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ?
400 QIB_RCVCTRL_TAILUPD_DIS : QIB_RCVCTRL_TAILUPD_ENB;
401 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
402 struct qib_ctxtdata *rcd = dd->rcd[i];
405 dd->f_rcvctrl(rcd->ppd, rcvmask, i);
409 static void verify_interrupt(unsigned long opaque)
411 struct qib_devdata *dd = (struct qib_devdata *) opaque;
414 return; /* being torn down */
417 * If we don't have a lid or any interrupts, let the user know and
418 * don't bother checking again.
420 if (dd->int_counter == 0) {
421 if (!dd->f_intr_fallback(dd))
422 dev_err(&dd->pcidev->dev, "No interrupts detected, "
424 else /* re-arm the timer to see if fallback works */
425 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
429 static void init_piobuf_state(struct qib_devdata *dd)
435 * Ensure all buffers are free, and fifos empty. Buffers
436 * are common, so only do once for port 0.
438 * After enable and qib_chg_pioavailkernel so we can safely
439 * enable pioavail updates and PIOENABLE. After this, packets
440 * are ready and able to go out.
442 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL);
443 for (pidx = 0; pidx < dd->num_pports; ++pidx)
444 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH);
447 * If not all sendbufs are used, add the one to each of the lower
448 * numbered contexts. pbufsctxt and lastctxt_piobuf are
449 * calculated in chip-specific code because it may cause some
450 * chip-specific adjustments to be made.
452 uctxts = dd->cfgctxts - dd->first_user_ctxt;
453 dd->ctxts_extrabuf = dd->pbufsctxt ?
454 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0;
457 * Set up the shadow copies of the piobufavail registers,
458 * which we compare against the chip registers for now, and
459 * the in memory DMA'ed copies of the registers.
460 * By now pioavail updates to memory should have occurred, so
461 * copy them into our working/shadow registers; this is in
462 * case something went wrong with abort, but mostly to get the
463 * initial values of the generation bit correct.
465 for (i = 0; i < dd->pioavregs; i++) {
468 tmp = dd->pioavailregs_dma[i];
470 * Don't need to worry about pioavailkernel here
471 * because we will call qib_chg_pioavailkernel() later
472 * in initialization, to busy out buffers as needed.
474 dd->pioavailshadow[i] = le64_to_cpu(tmp);
476 while (i < ARRAY_SIZE(dd->pioavailshadow))
477 dd->pioavailshadow[i++] = 0; /* for debugging sanity */
479 /* after pioavailshadow is setup */
480 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k,
481 TXCHK_CHG_TYPE_KERN, NULL);
482 dd->f_initvl15_bufs(dd);
486 * qib_init - do the actual initialization sequence on the chip
487 * @dd: the qlogic_ib device
488 * @reinit: reinitializing, so don't allocate new memory
490 * Do the actual initialization sequence on the chip. This is done
491 * both from the init routine called from the PCI infrastructure, and
492 * when we reset the chip, or detect that it was reset internally,
493 * or it's administratively re-enabled.
495 * Memory allocation here and in called routines is only done in
496 * the first case (reinit == 0). We have to be careful, because even
497 * without memory allocation, we need to re-write all the chip registers
498 * TIDs, etc. after the reset or enable has completed.
500 int qib_init(struct qib_devdata *dd, int reinit)
502 int ret = 0, pidx, lastfail = 0;
505 struct qib_ctxtdata *rcd;
506 struct qib_pportdata *ppd;
509 /* Set linkstate to unknown, so we can watch for a transition. */
510 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
511 ppd = dd->pport + pidx;
512 spin_lock_irqsave(&ppd->lflags_lock, flags);
513 ppd->lflags &= ~(QIBL_LINKACTIVE | QIBL_LINKARMED |
514 QIBL_LINKDOWN | QIBL_LINKINIT |
516 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
520 ret = init_after_reset(dd);
522 ret = loadtime_init(dd);
526 /* Bypass most chip-init, to get to device creation */
530 ret = dd->f_late_initreg(dd);
534 /* dd->rcd can be NULL if early init failed */
535 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) {
537 * Set up the (kernel) rcvhdr queue and egr TIDs. If doing
538 * re-init, the simplest way to handle this is to free
539 * existing, and re-allocate.
540 * Need to re-create rest of ctxt 0 ctxtdata as well.
546 lastfail = qib_create_rcvhdrq(dd, rcd);
548 lastfail = qib_setup_eagerbufs(rcd);
550 qib_dev_err(dd, "failed to allocate kernel ctxt's "
551 "rcvhdrq and/or egr bufs\n");
556 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
560 ppd = dd->pport + pidx;
561 mtu = ib_mtu_enum_to_int(qib_ibmtu);
563 mtu = QIB_DEFAULT_MTU;
564 qib_ibmtu = 0; /* don't leave invalid value */
566 /* set max we can ever have for this driver load */
567 ppd->init_ibmaxlen = min(mtu > 2048 ?
568 dd->piosize4k : dd->piosize2k,
570 (dd->rcvhdrentsize << 2));
572 * Have to initialize ibmaxlen, but this will normally
573 * change immediately in qib_set_mtu().
575 ppd->ibmaxlen = ppd->init_ibmaxlen;
576 qib_set_mtu(ppd, mtu);
578 spin_lock_irqsave(&ppd->lflags_lock, flags);
579 ppd->lflags |= QIBL_IB_LINK_DISABLED;
580 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
582 lastfail = dd->f_bringup_serdes(ppd);
584 qib_devinfo(dd->pcidev,
585 "Failed to bringup IB port %u\n", ppd->port);
586 lastfail = -ENETDOWN;
594 /* none of the ports initialized */
595 if (!ret && lastfail)
599 /* but continue on, so we can debug cause */
604 init_piobuf_state(dd);
608 /* chip is OK for user apps; mark it as initialized */
609 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
610 ppd = dd->pport + pidx;
612 * Set status even if port serdes is not initialized
613 * so that diags will work.
615 *ppd->statusp |= QIB_STATUS_CHIP_PRESENT |
617 if (!ppd->link_speed_enabled)
619 if (dd->flags & QIB_HAS_SEND_DMA)
620 ret = qib_setup_sdma(ppd);
621 init_timer(&ppd->hol_timer);
622 ppd->hol_timer.function = qib_hol_event;
623 ppd->hol_timer.data = (unsigned long)ppd;
624 ppd->hol_state = QIB_HOL_UP;
627 /* now we can enable all interrupts from the chip */
628 dd->f_set_intr_state(dd, 1);
631 * Setup to verify we get an interrupt, and fallback
632 * to an alternate if necessary and possible.
634 mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
635 /* start stats retrieval timer */
636 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
639 /* if ret is non-zero, we probably should do some cleanup here... */
644 * These next two routines are placeholders in case we don't have per-arch
645 * code for controlling write combining. If explicit control of write
646 * combining is not available, performance will probably be awful.
649 int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd)
654 void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd)
658 static inline struct qib_devdata *__qib_lookup(int unit)
660 return idr_find(&qib_unit_table, unit);
663 struct qib_devdata *qib_lookup(int unit)
665 struct qib_devdata *dd;
668 spin_lock_irqsave(&qib_devs_lock, flags);
669 dd = __qib_lookup(unit);
670 spin_unlock_irqrestore(&qib_devs_lock, flags);
676 * Stop the timers during unit shutdown, or after an error late
679 static void qib_stop_timers(struct qib_devdata *dd)
681 struct qib_pportdata *ppd;
684 if (dd->stats_timer.data) {
685 del_timer_sync(&dd->stats_timer);
686 dd->stats_timer.data = 0;
688 if (dd->intrchk_timer.data) {
689 del_timer_sync(&dd->intrchk_timer);
690 dd->intrchk_timer.data = 0;
692 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
693 ppd = dd->pport + pidx;
694 if (ppd->hol_timer.data)
695 del_timer_sync(&ppd->hol_timer);
696 if (ppd->led_override_timer.data) {
697 del_timer_sync(&ppd->led_override_timer);
698 atomic_set(&ppd->led_override_timer_active, 0);
700 if (ppd->symerr_clear_timer.data)
701 del_timer_sync(&ppd->symerr_clear_timer);
706 * qib_shutdown_device - shut down a device
707 * @dd: the qlogic_ib device
709 * This is called to make the device quiet when we are about to
710 * unload the driver, and also when the device is administratively
711 * disabled. It does not free any data structures.
712 * Everything it does has to be setup again by qib_init(dd, 1)
714 static void qib_shutdown_device(struct qib_devdata *dd)
716 struct qib_pportdata *ppd;
719 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
720 ppd = dd->pport + pidx;
722 spin_lock_irq(&ppd->lflags_lock);
723 ppd->lflags &= ~(QIBL_LINKDOWN | QIBL_LINKINIT |
724 QIBL_LINKARMED | QIBL_LINKACTIVE |
726 spin_unlock_irq(&ppd->lflags_lock);
727 *ppd->statusp &= ~(QIB_STATUS_IB_CONF | QIB_STATUS_IB_READY);
729 dd->flags &= ~QIB_INITTED;
731 /* mask interrupts, but not errors */
732 dd->f_set_intr_state(dd, 0);
734 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
735 ppd = dd->pport + pidx;
736 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS |
737 QIB_RCVCTRL_CTXT_DIS |
738 QIB_RCVCTRL_INTRAVAIL_DIS |
739 QIB_RCVCTRL_PKEY_ENB, -1);
741 * Gracefully stop all sends allowing any in progress to
744 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR);
748 * Enough for anything that's going to trickle out to have actually
753 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
754 ppd = dd->pport + pidx;
755 dd->f_setextled(ppd, 0); /* make sure LEDs are off */
757 if (dd->flags & QIB_HAS_SEND_DMA)
758 qib_teardown_sdma(ppd);
760 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS |
761 QIB_SENDCTRL_SEND_DIS);
763 * Clear SerdesEnable.
764 * We can't count on interrupts since we are stopping.
766 dd->f_quiet_serdes(ppd);
769 qib_update_eeprom_log(dd);
773 * qib_free_ctxtdata - free a context's allocated data
774 * @dd: the qlogic_ib device
775 * @rcd: the ctxtdata structure
777 * free up any allocated data for a context
778 * This should not touch anything that would affect a simultaneous
779 * re-allocation of context data, because it is called after qib_mutex
780 * is released (and can be called from reinit as well).
781 * It should never change any chip state, or global driver state.
783 void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
789 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
790 rcd->rcvhdrq, rcd->rcvhdrq_phys);
792 if (rcd->rcvhdrtail_kvaddr) {
793 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
794 rcd->rcvhdrtail_kvaddr,
795 rcd->rcvhdrqtailaddr_phys);
796 rcd->rcvhdrtail_kvaddr = NULL;
799 if (rcd->rcvegrbuf) {
802 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
803 void *base = rcd->rcvegrbuf[e];
804 size_t size = rcd->rcvegrbuf_size;
806 dma_free_coherent(&dd->pcidev->dev, size,
807 base, rcd->rcvegrbuf_phys[e]);
809 kfree(rcd->rcvegrbuf);
810 rcd->rcvegrbuf = NULL;
811 kfree(rcd->rcvegrbuf_phys);
812 rcd->rcvegrbuf_phys = NULL;
813 rcd->rcvegrbuf_chunks = 0;
816 kfree(rcd->tid_pg_list);
817 vfree(rcd->user_event_mask);
818 vfree(rcd->subctxt_uregbase);
819 vfree(rcd->subctxt_rcvegrbuf);
820 vfree(rcd->subctxt_rcvhdr_base);
825 * Perform a PIO buffer bandwidth write test, to verify proper system
826 * configuration. Even when all the setup calls work, occasionally
827 * BIOS or other issues can prevent write combining from working, or
828 * can cause other bandwidth problems to the chip.
830 * This test simply writes the same buffer over and over again, and
831 * measures close to the peak bandwidth to the chip (not testing
832 * data bandwidth to the wire). On chips that use an address-based
833 * trigger to send packets to the wire, this is easy. On chips that
834 * use a count to trigger, we want to make sure that the packet doesn't
835 * go out on the wire, or trigger flow control checks.
837 static void qib_verify_pioperf(struct qib_devdata *dd)
839 u32 pbnum, cnt, lcnt;
844 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum);
846 qib_devinfo(dd->pcidev,
847 "No PIObufs for checking perf, skipping\n");
852 * Enough to give us a reasonable test, less than piobuf size, and
853 * likely multiple of store buffer length.
859 qib_devinfo(dd->pcidev,
860 "Couldn't get memory for checking PIO perf,"
865 preempt_disable(); /* we want reasonably accurate elapsed time */
866 msecs = 1 + jiffies_to_msecs(jiffies);
867 for (lcnt = 0; lcnt < 10000U; lcnt++) {
868 /* wait until we cross msec boundary */
869 if (jiffies_to_msecs(jiffies) >= msecs)
874 dd->f_set_armlaunch(dd, 0);
877 * length 0, no dwords actually sent
883 * This is only roughly accurate, since even with preempt we
884 * still take interrupts that could take a while. Running for
885 * >= 5 msec seems to get us "close enough" to accurate values.
887 msecs = jiffies_to_msecs(jiffies);
888 for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
889 qib_pio_copy(piobuf + 64, addr, cnt >> 2);
890 emsecs = jiffies_to_msecs(jiffies) - msecs;
893 /* 1 GiB/sec, slightly over IB SDR line rate */
894 if (lcnt < (emsecs * 1024U))
896 "Performance problem: bandwidth to PIO buffers is "
898 lcnt / (u32) emsecs);
905 /* disarm piobuf, so it's available again */
906 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum));
907 qib_sendbuf_done(dd, pbnum);
908 dd->f_set_armlaunch(dd, 1);
912 void qib_free_devdata(struct qib_devdata *dd)
916 spin_lock_irqsave(&qib_devs_lock, flags);
917 idr_remove(&qib_unit_table, dd->unit);
919 spin_unlock_irqrestore(&qib_devs_lock, flags);
921 ib_dealloc_device(&dd->verbs_dev.ibdev);
925 * Allocate our primary per-unit data structure. Must be done via verbs
926 * allocator, because the verbs cleanup process both does cleanup and
927 * free of the data structure.
928 * "extra" is for chip-specific data.
930 * Use the idr mechanism to get a unit number for this unit.
932 struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
935 struct qib_devdata *dd;
938 if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
939 dd = ERR_PTR(-ENOMEM);
943 dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
945 dd = ERR_PTR(-ENOMEM);
949 spin_lock_irqsave(&qib_devs_lock, flags);
950 ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
952 list_add(&dd->list, &qib_dev_list);
953 spin_unlock_irqrestore(&qib_devs_lock, flags);
956 qib_early_err(&pdev->dev,
957 "Could not allocate unit ID: error %d\n", -ret);
958 ib_dealloc_device(&dd->verbs_dev.ibdev);
963 if (!qib_cpulist_count) {
964 u32 count = num_online_cpus();
965 qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
966 sizeof(long), GFP_KERNEL);
968 qib_cpulist_count = count;
970 qib_early_err(&pdev->dev, "Could not alloc cpulist "
971 "info, cpu affinity might be wrong\n");
979 * Called from freeze mode handlers, and from PCI error
980 * reporting code. Should be paranoid about state of
981 * system and data structures.
983 void qib_disable_after_error(struct qib_devdata *dd)
985 if (dd->flags & QIB_INITTED) {
988 dd->flags &= ~QIB_INITTED;
990 for (pidx = 0; pidx < dd->num_pports; ++pidx) {
991 struct qib_pportdata *ppd;
993 ppd = dd->pport + pidx;
994 if (dd->flags & QIB_PRESENT) {
995 qib_set_linkstate(ppd,
996 QIB_IB_LINKDOWN_DISABLE);
997 dd->f_setextled(ppd, 0);
999 *ppd->statusp &= ~QIB_STATUS_IB_READY;
1004 * Mark as having had an error for driver, and also
1005 * for /sys and status word mapped to user programs.
1006 * This marks unit as not usable, until reset.
1009 *dd->devstatusp |= QIB_STATUS_HWERROR;
1012 static void __devexit qib_remove_one(struct pci_dev *);
1013 static int __devinit qib_init_one(struct pci_dev *,
1014 const struct pci_device_id *);
1016 #define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
1017 #define PFX QIB_DRV_NAME ": "
1019 static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
1020 { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
1021 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
1022 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
1026 MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
1028 struct pci_driver qib_driver = {
1029 .name = QIB_DRV_NAME,
1030 .probe = qib_init_one,
1031 .remove = __devexit_p(qib_remove_one),
1032 .id_table = qib_pci_tbl,
1033 .err_handler = &qib_pci_err_handler,
1037 * Do all the generic driver unit- and chip-independent memory
1038 * allocation and initialization.
1040 static int __init qlogic_ib_init(void)
1044 ret = qib_dev_init();
1048 qib_cq_wq = create_singlethread_workqueue("qib_cq");
1055 * These must be called before the driver is registered with
1056 * the PCI subsystem.
1058 idr_init(&qib_unit_table);
1059 if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
1060 printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n");
1065 ret = pci_register_driver(&qib_driver);
1067 printk(KERN_ERR QIB_DRV_NAME
1068 ": Unable to register driver: error %d\n", -ret);
1072 /* not fatal if it doesn't work */
1073 if (qib_init_qibfs())
1074 printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n");
1075 goto bail; /* all OK */
1078 idr_destroy(&qib_unit_table);
1080 destroy_workqueue(qib_cq_wq);
1087 module_init(qlogic_ib_init);
1090 * Do the non-unit driver cleanup, memory free, etc. at unload.
1092 static void __exit qlogic_ib_cleanup(void)
1096 ret = qib_exit_qibfs();
1098 printk(KERN_ERR QIB_DRV_NAME ": "
1099 "Unable to cleanup counter filesystem: "
1100 "error %d\n", -ret);
1102 pci_unregister_driver(&qib_driver);
1104 destroy_workqueue(qib_cq_wq);
1106 qib_cpulist_count = 0;
1109 idr_destroy(&qib_unit_table);
1113 module_exit(qlogic_ib_cleanup);
1115 /* this can only be called after a successful initialization */
1116 static void cleanup_device_data(struct qib_devdata *dd)
1120 struct qib_ctxtdata **tmp;
1121 unsigned long flags;
1123 /* users can't do anything more with chip */
1124 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1125 if (dd->pport[pidx].statusp)
1126 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
1131 if (dd->pioavailregs_dma) {
1132 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
1133 (void *) dd->pioavailregs_dma,
1134 dd->pioavailregs_phys);
1135 dd->pioavailregs_dma = NULL;
1138 if (dd->pageshadow) {
1139 struct page **tmpp = dd->pageshadow;
1140 dma_addr_t *tmpd = dd->physshadow;
1143 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) {
1144 int ctxt_tidbase = ctxt * dd->rcvtidcnt;
1145 int maxtid = ctxt_tidbase + dd->rcvtidcnt;
1147 for (i = ctxt_tidbase; i < maxtid; i++) {
1150 pci_unmap_page(dd->pcidev, tmpd[i],
1151 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1152 qib_release_user_pages(&tmpp[i], 1);
1158 tmpp = dd->pageshadow;
1159 dd->pageshadow = NULL;
1164 * Free any resources still in use (usually just kernel contexts)
1165 * at unload; we do for ctxtcnt, because that's what we allocate.
1166 * We acquire lock to be really paranoid that rcd isn't being
1167 * accessed from some interrupt-related code (that should not happen,
1168 * but best to be sure).
1170 spin_lock_irqsave(&dd->uctxt_lock, flags);
1173 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1174 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) {
1175 struct qib_ctxtdata *rcd = tmp[ctxt];
1177 tmp[ctxt] = NULL; /* debugging paranoia */
1178 qib_free_ctxtdata(dd, rcd);
1181 kfree(dd->boardname);
1185 * Clean up on unit shutdown, or error during unit load after
1186 * successful initialization.
1188 static void qib_postinit_cleanup(struct qib_devdata *dd)
1191 * Clean up chip-specific stuff.
1192 * We check for NULL here, because it's outside
1193 * the kregbase check, and we need to call it
1194 * after the free_irq. Thus it's possible that
1195 * the function pointers were never initialized.
1200 qib_pcie_ddcleanup(dd);
1202 cleanup_device_data(dd);
1204 qib_free_devdata(dd);
1207 static int __devinit qib_init_one(struct pci_dev *pdev,
1208 const struct pci_device_id *ent)
1210 int ret, j, pidx, initfail;
1211 struct qib_devdata *dd = NULL;
1213 ret = qib_pcie_init(pdev, ent);
1218 * Do device-specific initialiation, function table setup, dd
1221 switch (ent->device) {
1222 case PCI_DEVICE_ID_QLOGIC_IB_6120:
1223 #ifdef CONFIG_PCI_MSI
1224 dd = qib_init_iba6120_funcs(pdev, ent);
1226 qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
1227 "work if CONFIG_PCI_MSI is not enabled\n",
1229 dd = ERR_PTR(-ENODEV);
1233 case PCI_DEVICE_ID_QLOGIC_IB_7220:
1234 dd = qib_init_iba7220_funcs(pdev, ent);
1237 case PCI_DEVICE_ID_QLOGIC_IB_7322:
1238 dd = qib_init_iba7322_funcs(pdev, ent);
1242 qib_early_err(&pdev->dev, "Failing on unknown QLogic "
1243 "deviceid 0x%x\n", ent->device);
1250 goto bail; /* error already printed */
1252 /* do the generic initialization */
1253 initfail = qib_init(dd, 0);
1255 ret = qib_register_ib_device(dd);
1258 * Now ready for use. this should be cleared whenever we
1259 * detect a reset, or initiate one. If earlier failure,
1260 * we still create devices, so diags, etc. can be used
1261 * to determine cause of problem.
1263 if (!qib_mini_init && !initfail && !ret)
1264 dd->flags |= QIB_INITTED;
1266 j = qib_device_create(dd);
1268 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
1271 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n",
1274 if (qib_mini_init || initfail || ret) {
1275 qib_stop_timers(dd);
1276 flush_workqueue(ib_wq);
1277 for (pidx = 0; pidx < dd->num_pports; ++pidx)
1278 dd->f_quiet_serdes(dd->pport + pidx);
1282 (void) qibfs_remove(dd);
1283 qib_device_remove(dd);
1286 qib_unregister_ib_device(dd);
1287 qib_postinit_cleanup(dd);
1294 ret = qib_enable_wc(dd);
1296 qib_dev_err(dd, "Write combining not enabled "
1297 "(err %d): performance may be poor\n",
1303 qib_verify_pioperf(dd);
1308 static void __devexit qib_remove_one(struct pci_dev *pdev)
1310 struct qib_devdata *dd = pci_get_drvdata(pdev);
1313 /* unregister from IB core */
1314 qib_unregister_ib_device(dd);
1317 * Disable the IB link, disable interrupts on the device,
1318 * clear dma engines, etc.
1321 qib_shutdown_device(dd);
1323 qib_stop_timers(dd);
1325 /* wait until all of our (qsfp) queue_work() calls complete */
1326 flush_workqueue(ib_wq);
1328 ret = qibfs_remove(dd);
1330 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n",
1333 qib_device_remove(dd);
1335 qib_postinit_cleanup(dd);
1339 * qib_create_rcvhdrq - create a receive header queue
1340 * @dd: the qlogic_ib device
1341 * @rcd: the context data
1343 * This must be contiguous memory (from an i/o perspective), and must be
1344 * DMA'able (which means for some systems, it will go through an IOMMU,
1345 * or be forced into a low address range).
1347 int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
1351 if (!rcd->rcvhdrq) {
1352 dma_addr_t phys_hdrqtail;
1355 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1356 sizeof(u32), PAGE_SIZE);
1357 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
1358 GFP_USER : GFP_KERNEL;
1359 rcd->rcvhdrq = dma_alloc_coherent(
1360 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
1361 gfp_flags | __GFP_COMP);
1363 if (!rcd->rcvhdrq) {
1364 qib_dev_err(dd, "attempt to allocate %d bytes "
1365 "for ctxt %u rcvhdrq failed\n",
1370 if (rcd->ctxt >= dd->first_user_ctxt) {
1371 rcd->user_event_mask = vmalloc_user(PAGE_SIZE);
1372 if (!rcd->user_event_mask)
1373 goto bail_free_hdrq;
1376 if (!(dd->flags & QIB_NODMA_RTAIL)) {
1377 rcd->rcvhdrtail_kvaddr = dma_alloc_coherent(
1378 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
1380 if (!rcd->rcvhdrtail_kvaddr)
1382 rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
1385 rcd->rcvhdrq_size = amt;
1388 /* clear for security and sanity on each use */
1389 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
1390 if (rcd->rcvhdrtail_kvaddr)
1391 memset(rcd->rcvhdrtail_kvaddr, 0, PAGE_SIZE);
1395 qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u "
1396 "rcvhdrqtailaddr failed\n", rcd->ctxt);
1397 vfree(rcd->user_event_mask);
1398 rcd->user_event_mask = NULL;
1400 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
1402 rcd->rcvhdrq = NULL;
1408 * allocate eager buffers, both kernel and user contexts.
1409 * @rcd: the context we are setting up.
1411 * Allocate the eager TID buffers and program them into hip.
1412 * They are no longer completely contiguous, we do multiple allocation
1413 * calls. Otherwise we get the OOM code involved, by asking for too
1414 * much per call, with disastrous results on some kernels.
1416 int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
1418 struct qib_devdata *dd = rcd->dd;
1419 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
1424 * GFP_USER, but without GFP_FS, so buffer cache can be
1425 * coalesced (we hope); otherwise, even at order 4,
1426 * heavy filesystem activity makes these fail, and we can
1427 * use compound pages.
1429 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
1431 egrcnt = rcd->rcvegrcnt;
1432 egroff = rcd->rcvegr_tid_base;
1433 egrsize = dd->rcvegrbufsize;
1435 chunk = rcd->rcvegrbuf_chunks;
1436 egrperchunk = rcd->rcvegrbufs_perchunk;
1437 size = rcd->rcvegrbuf_size;
1438 if (!rcd->rcvegrbuf) {
1440 kzalloc(chunk * sizeof(rcd->rcvegrbuf[0]),
1442 if (!rcd->rcvegrbuf)
1445 if (!rcd->rcvegrbuf_phys) {
1446 rcd->rcvegrbuf_phys =
1447 kmalloc(chunk * sizeof(rcd->rcvegrbuf_phys[0]),
1449 if (!rcd->rcvegrbuf_phys)
1450 goto bail_rcvegrbuf;
1452 for (e = 0; e < rcd->rcvegrbuf_chunks; e++) {
1453 if (rcd->rcvegrbuf[e])
1456 dma_alloc_coherent(&dd->pcidev->dev, size,
1457 &rcd->rcvegrbuf_phys[e],
1459 if (!rcd->rcvegrbuf[e])
1460 goto bail_rcvegrbuf_phys;
1463 rcd->rcvegr_phys = rcd->rcvegrbuf_phys[0];
1465 for (e = chunk = 0; chunk < rcd->rcvegrbuf_chunks; chunk++) {
1466 dma_addr_t pa = rcd->rcvegrbuf_phys[chunk];
1469 /* clear for security and sanity on each use */
1470 memset(rcd->rcvegrbuf[chunk], 0, size);
1472 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
1473 dd->f_put_tid(dd, e + egroff +
1478 RCVHQ_RCV_TYPE_EAGER, pa);
1481 cond_resched(); /* don't hog the cpu */
1486 bail_rcvegrbuf_phys:
1487 for (e = 0; e < rcd->rcvegrbuf_chunks && rcd->rcvegrbuf[e]; e++)
1488 dma_free_coherent(&dd->pcidev->dev, size,
1489 rcd->rcvegrbuf[e], rcd->rcvegrbuf_phys[e]);
1490 kfree(rcd->rcvegrbuf_phys);
1491 rcd->rcvegrbuf_phys = NULL;
1493 kfree(rcd->rcvegrbuf);
1494 rcd->rcvegrbuf = NULL;
1500 * Note: Changes to this routine should be mirrored
1501 * for the diagnostics routine qib_remap_ioaddr32().
1502 * There is also related code for VL15 buffers in qib_init_7322_variables().
1503 * The teardown code that unmaps is in qib_pcie_ddcleanup()
1505 int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen)
1507 u64 __iomem *qib_kregbase = NULL;
1508 void __iomem *qib_piobase = NULL;
1509 u64 __iomem *qib_userbase = NULL;
1511 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff;
1512 u64 qib_pio4koffset = dd->piobufbase >> 32;
1513 u64 qib_pio2klen = dd->piobcnt2k * dd->palign;
1514 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k;
1515 u64 qib_physaddr = dd->physaddr;
1517 u64 qib_userlen = 0;
1520 * Free the old mapping because the kernel will try to reuse the
1521 * old mapping and not create a new mapping with the
1522 * write combining attribute.
1524 iounmap(dd->kregbase);
1525 dd->kregbase = NULL;
1528 * Assumes chip address space looks like:
1529 * - kregs + sregs + cregs + uregs (in any order)
1530 * - piobufs (2K and 4K bufs in either order)
1532 * - kregs + sregs + cregs (in any order)
1533 * - piobufs (2K and 4K bufs in either order)
1536 if (dd->piobcnt4k == 0) {
1537 qib_kreglen = qib_pio2koffset;
1538 qib_piolen = qib_pio2klen;
1539 } else if (qib_pio2koffset < qib_pio4koffset) {
1540 qib_kreglen = qib_pio2koffset;
1541 qib_piolen = qib_pio4koffset + qib_pio4klen - qib_kreglen;
1543 qib_kreglen = qib_pio4koffset;
1544 qib_piolen = qib_pio2koffset + qib_pio2klen - qib_kreglen;
1546 qib_piolen += vl15buflen;
1547 /* Map just the configured ports (not all hw ports) */
1548 if (dd->uregbase > qib_kreglen)
1549 qib_userlen = dd->ureg_align * dd->cfgctxts;
1551 /* Sanity checks passed, now create the new mappings */
1552 qib_kregbase = ioremap_nocache(qib_physaddr, qib_kreglen);
1556 qib_piobase = ioremap_wc(qib_physaddr + qib_kreglen, qib_piolen);
1561 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase,
1567 dd->kregbase = qib_kregbase;
1568 dd->kregend = (u64 __iomem *)
1569 ((char __iomem *) qib_kregbase + qib_kreglen);
1570 dd->piobase = qib_piobase;
1571 dd->pio2kbase = (void __iomem *)
1572 (((char __iomem *) dd->piobase) +
1573 qib_pio2koffset - qib_kreglen);
1575 dd->pio4kbase = (void __iomem *)
1576 (((char __iomem *) dd->piobase) +
1577 qib_pio4koffset - qib_kreglen);
1579 /* ureg will now be accessed relative to dd->userbase */
1580 dd->userbase = qib_userbase;
1584 iounmap(qib_piobase);
1586 iounmap(qib_kregbase);