2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * These are the routines used by layered drivers, currently just the
36 * layered ethernet driver and verbs layer.
40 #include <linux/pci.h>
41 #include <asm/byteorder.h>
43 #include "ipath_kernel.h"
44 #include "ipath_layer.h"
45 #include "ipath_verbs.h"
46 #include "ipath_common.h"
48 /* Acquire before ipath_devs_lock. */
49 static DEFINE_MUTEX(ipath_layer_mutex);
51 u16 ipath_layer_rcv_opcode;
53 static int (*layer_intr)(void *, u32);
54 static int (*layer_rcv)(void *, void *, struct sk_buff *);
55 static int (*layer_rcv_lid)(void *, void *);
57 static void *(*layer_add_one)(int, struct ipath_devdata *);
58 static void (*layer_remove_one)(void *);
60 int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
64 if (dd->ipath_layer.l_arg && layer_intr)
65 ret = layer_intr(dd->ipath_layer.l_arg, arg);
70 int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
74 mutex_lock(&ipath_layer_mutex);
76 ret = __ipath_layer_intr(dd, arg);
78 mutex_unlock(&ipath_layer_mutex);
83 int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
88 if (dd->ipath_layer.l_arg && layer_rcv)
89 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
94 int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
98 if (dd->ipath_layer.l_arg && layer_rcv_lid)
99 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
104 int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
110 case IPATH_IB_LINKDOWN:
111 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
112 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
117 case IPATH_IB_LINKDOWN_SLEEP:
118 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
119 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
124 case IPATH_IB_LINKDOWN_DISABLE:
125 ipath_set_ib_lstate(dd,
126 INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
127 INFINIPATH_IBCC_LINKINITCMD_SHIFT);
132 case IPATH_IB_LINKINIT:
133 if (dd->ipath_flags & IPATH_LINKINIT) {
137 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
138 INFINIPATH_IBCC_LINKCMD_SHIFT);
139 lstate = IPATH_LINKINIT;
142 case IPATH_IB_LINKARM:
143 if (dd->ipath_flags & IPATH_LINKARMED) {
147 if (!(dd->ipath_flags &
148 (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
152 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
153 INFINIPATH_IBCC_LINKCMD_SHIFT);
155 * Since the port can transition to ACTIVE by receiving
156 * a non VL 15 packet, wait for either state.
158 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
161 case IPATH_IB_LINKACTIVE:
162 if (dd->ipath_flags & IPATH_LINKACTIVE) {
166 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
170 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
171 INFINIPATH_IBCC_LINKCMD_SHIFT);
172 lstate = IPATH_LINKACTIVE;
176 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
180 ret = ipath_wait_linkstate(dd, lstate, 2000);
187 * ipath_layer_set_mtu - set the MTU
188 * @dd: the infinipath device
191 * we can handle "any" incoming size, the issue here is whether we
192 * need to restrict our outgoing size. For now, we don't do any
193 * sanity checking on this, and we don't deal with what happens to
194 * programs that are already running when the size changes.
195 * NOTE: changing the MTU will usually cause the IBC to go back to
196 * link initialize (IPATH_IBSTATE_INIT) state...
198 int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
205 * mtu is IB data payload max. It's the largest power of 2 less
206 * than piosize (or even larger, since it only really controls the
207 * largest we can receive; we can send the max of the mtu and
208 * piosize). We check that it's one of the valid IB sizes.
210 if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
212 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
216 if (dd->ipath_ibmtu == arg) {
217 ret = 0; /* same as current */
221 piosize = dd->ipath_ibmaxlen;
222 dd->ipath_ibmtu = arg;
224 if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
225 /* Only if it's not the initial value (or reset to it) */
226 if (piosize != dd->ipath_init_ibmaxlen) {
227 dd->ipath_ibmaxlen = piosize;
230 } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
231 piosize = arg + IPATH_PIO_MAXIBHDR;
232 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
233 "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
235 dd->ipath_ibmaxlen = piosize;
241 * set the IBC maxpktlength to the size of our pio
244 u64 ibc = dd->ipath_ibcctrl;
245 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
246 INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
248 piosize = piosize - 2 * sizeof(u32); /* ignore pbc */
249 dd->ipath_ibmaxlen = piosize;
250 piosize /= sizeof(u32); /* in words */
252 * for ICRC, which we only send in diag test pkt mode, and
253 * we don't need to worry about that for mtu
257 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
258 dd->ipath_ibcctrl = ibc;
259 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
261 dd->ipath_f_tidtemplate(dd);
270 int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
275 mutex_lock(&ipath_layer_mutex);
277 if (dd->ipath_layer.l_arg && layer_intr)
278 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
280 mutex_unlock(&ipath_layer_mutex);
285 int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
287 /* XXX - need to inform anyone who cares this just happened. */
288 dd->ipath_guid = guid;
292 __be64 ipath_layer_get_guid(struct ipath_devdata *dd)
294 return dd->ipath_guid;
297 u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
299 return dd->ipath_majrev;
302 u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
304 return dd->ipath_minrev;
307 u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
309 return dd->ipath_pcirev;
312 u32 ipath_layer_get_flags(struct ipath_devdata *dd)
314 return dd->ipath_flags;
317 struct device *ipath_layer_get_device(struct ipath_devdata *dd)
319 return &dd->pcidev->dev;
322 u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
324 return dd->ipath_deviceid;
327 u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
329 return dd->ipath_vendorid;
332 u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
334 return dd->ipath_lastibcstat;
337 u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
339 return dd->ipath_ibmtu;
342 void ipath_layer_add(struct ipath_devdata *dd)
344 mutex_lock(&ipath_layer_mutex);
347 dd->ipath_layer.l_arg =
348 layer_add_one(dd->ipath_unit, dd);
350 mutex_unlock(&ipath_layer_mutex);
353 void ipath_layer_remove(struct ipath_devdata *dd)
355 mutex_lock(&ipath_layer_mutex);
357 if (dd->ipath_layer.l_arg && layer_remove_one) {
358 layer_remove_one(dd->ipath_layer.l_arg);
359 dd->ipath_layer.l_arg = NULL;
362 mutex_unlock(&ipath_layer_mutex);
365 int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
366 void (*l_remove)(void *),
367 int (*l_intr)(void *, u32),
368 int (*l_rcv)(void *, void *, struct sk_buff *),
370 int (*l_rcv_lid)(void *, void *))
372 struct ipath_devdata *dd, *tmp;
375 mutex_lock(&ipath_layer_mutex);
377 layer_add_one = l_add;
378 layer_remove_one = l_remove;
381 layer_rcv_lid = l_rcv_lid;
382 ipath_layer_rcv_opcode = l_rcv_opcode;
384 spin_lock_irqsave(&ipath_devs_lock, flags);
386 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
387 if (!(dd->ipath_flags & IPATH_INITTED))
390 if (dd->ipath_layer.l_arg)
393 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
394 *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
396 spin_unlock_irqrestore(&ipath_devs_lock, flags);
397 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
398 spin_lock_irqsave(&ipath_devs_lock, flags);
401 spin_unlock_irqrestore(&ipath_devs_lock, flags);
402 mutex_unlock(&ipath_layer_mutex);
407 EXPORT_SYMBOL_GPL(ipath_layer_register);
409 void ipath_layer_unregister(void)
411 struct ipath_devdata *dd, *tmp;
414 mutex_lock(&ipath_layer_mutex);
415 spin_lock_irqsave(&ipath_devs_lock, flags);
417 list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
418 if (dd->ipath_layer.l_arg && layer_remove_one) {
419 spin_unlock_irqrestore(&ipath_devs_lock, flags);
420 layer_remove_one(dd->ipath_layer.l_arg);
421 spin_lock_irqsave(&ipath_devs_lock, flags);
422 dd->ipath_layer.l_arg = NULL;
426 spin_unlock_irqrestore(&ipath_devs_lock, flags);
428 layer_add_one = NULL;
429 layer_remove_one = NULL;
432 layer_rcv_lid = NULL;
434 mutex_unlock(&ipath_layer_mutex);
437 EXPORT_SYMBOL_GPL(ipath_layer_unregister);
439 static void __ipath_verbs_timer(unsigned long arg)
441 struct ipath_devdata *dd = (struct ipath_devdata *) arg;
444 * If port 0 receive packet interrupts are not available, or
445 * can be missed, poll the receive queue
447 if (dd->ipath_flags & IPATH_POLL_RX_INTR)
450 /* Handle verbs layer timeouts. */
451 ipath_ib_timer(dd->verbs_dev);
452 mod_timer(&dd->verbs_timer, jiffies + 1);
455 int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
460 mutex_lock(&ipath_layer_mutex);
462 if (!dd->ipath_layer.l_arg) {
467 ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
472 *pktmax = dd->ipath_ibmaxlen;
474 if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
475 intval |= IPATH_LAYER_INT_IF_UP;
477 intval |= IPATH_LAYER_INT_LID;
479 intval |= IPATH_LAYER_INT_BCAST;
481 * do this on open, in case low level is already up and
482 * just layered driver was reloaded, etc.
485 layer_intr(dd->ipath_layer.l_arg, intval);
489 mutex_unlock(&ipath_layer_mutex);
494 EXPORT_SYMBOL_GPL(ipath_layer_open);
496 u16 ipath_layer_get_lid(struct ipath_devdata *dd)
498 return dd->ipath_lid;
501 EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
504 * ipath_layer_get_mac - get the MAC address
505 * @dd: the infinipath device
506 * @mac: the MAC is put here
508 * This is the EUID-64 OUI octets (top 3), then
509 * skip the next 2 (which should both be zero or 0xff).
510 * The returned MAC is in network order
511 * mac points to at least 6 bytes of buffer
512 * We assume that by the time the LID is set, that the GUID is as valid
513 * as it's ever going to be, rather than adding yet another status bit.
516 int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
520 guid = (u8 *) &dd->ipath_guid;
528 if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
529 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
530 "%x %x\n", guid[3], guid[4]);
534 EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
536 u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
538 return dd->ipath_mlid;
541 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
543 u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
545 return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
548 static void update_sge(struct ipath_sge_state *ss, u32 length)
550 struct ipath_sge *sge = &ss->sge;
552 sge->vaddr += length;
553 sge->length -= length;
554 sge->sge_length -= length;
555 if (sge->sge_length == 0) {
557 *sge = *ss->sg_list++;
558 } else if (sge->length == 0 && sge->mr != NULL) {
559 if (++sge->n >= IPATH_SEGSZ) {
560 if (++sge->m >= sge->mr->mapsz)
564 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
565 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
569 #ifdef __LITTLE_ENDIAN
570 static inline u32 get_upper_bits(u32 data, u32 shift)
572 return data >> shift;
575 static inline u32 set_upper_bits(u32 data, u32 shift)
577 return data << shift;
580 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
582 data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
583 data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
587 static inline u32 get_upper_bits(u32 data, u32 shift)
589 return data << shift;
592 static inline u32 set_upper_bits(u32 data, u32 shift)
594 return data >> shift;
597 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
599 data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
600 data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
605 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
613 u32 len = ss->sge.length;
619 if (len > ss->sge.sge_length)
620 len = ss->sge.sge_length;
621 /* If the source address is not aligned, try to align it. */
622 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
624 u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
626 u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
629 y = sizeof(u32) - off;
632 if (len + extra >= sizeof(u32)) {
633 data |= set_upper_bits(v, extra *
635 len = sizeof(u32) - extra;
640 __raw_writel(data, piobuf);
645 /* Clear unused upper bytes */
646 data |= clear_upper_bytes(v, len, extra);
654 /* Source address is aligned. */
655 u32 *addr = (u32 *) ss->sge.vaddr;
656 int shift = extra * BITS_PER_BYTE;
657 int ushift = 32 - shift;
660 while (l >= sizeof(u32)) {
663 data |= set_upper_bits(v, shift);
664 __raw_writel(data, piobuf);
665 data = get_upper_bits(v, ushift);
671 * We still have 'extra' number of bytes leftover.
676 if (l + extra >= sizeof(u32)) {
677 data |= set_upper_bits(v, shift);
678 len -= l + extra - sizeof(u32);
683 __raw_writel(data, piobuf);
688 /* Clear unused upper bytes */
689 data |= clear_upper_bytes(v, l,
697 } else if (len == length) {
701 } else if (len == length) {
705 * Need to round up for the last dword in the
709 __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
711 last = ((u32 *) ss->sge.vaddr)[w - 1];
716 __iowrite32_copy(piobuf, ss->sge.vaddr, w);
719 extra = len & (sizeof(u32) - 1);
721 u32 v = ((u32 *) ss->sge.vaddr)[w];
723 /* Clear unused upper bytes */
724 data = clear_upper_bytes(v, extra, 0);
730 /* Update address before sending packet. */
731 update_sge(ss, length);
732 /* must flush early everything before trigger word */
734 __raw_writel(last, piobuf);
735 /* be sure trigger word is written */
740 * ipath_verbs_send - send a packet from the verbs layer
741 * @dd: the infinipath device
742 * @hdrwords: the number of words in the header
743 * @hdr: the packet header
744 * @len: the length of the packet in bytes
745 * @ss: the SGE to send
747 * This is like ipath_sma_send_pkt() in that we need to be able to send
748 * packets after the chip is initialized (MADs) but also like
749 * ipath_layer_send_hdr() since its used by the verbs layer.
751 int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
752 u32 *hdr, u32 len, struct ipath_sge_state *ss)
758 /* +1 is for the qword padding of pbc */
759 plen = hdrwords + ((len + 3) >> 2) + 1;
760 if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
761 ipath_dbg("packet len 0x%x too long, failing\n", plen);
766 /* Get a PIO buffer to use. */
767 piobuf = ipath_getpiobuf(dd, NULL);
768 if (unlikely(piobuf == NULL)) {
774 * Write len to control qword, no flags.
775 * We have to flush after the PBC for correctness on some cpus
776 * or WC buffer can be written out of order.
778 writeq(plen, piobuf);
783 * If there is just the header portion, must flush before
784 * writing last word of header for correctness, and after
785 * the last header word (trigger word).
787 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
789 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
795 __iowrite32_copy(piobuf, hdr, hdrwords);
798 /* The common case is aligned and contained in one segment. */
799 if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
800 !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
802 u32 *addr = (u32 *) ss->sge.vaddr;
804 /* Update address before sending packet. */
806 /* Need to round up for the last dword in the packet. */
808 __iowrite32_copy(piobuf, addr, w - 1);
809 /* must flush early everything before trigger word */
811 __raw_writel(addr[w - 1], piobuf + w - 1);
812 /* be sure trigger word is written */
817 copy_io(piobuf, ss, len);
824 int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
825 u64 *rwords, u64 *spkts, u64 *rpkts,
830 if (!(dd->ipath_flags & IPATH_INITTED)) {
831 /* no hardware, freeze, etc. */
832 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
836 *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
837 *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
838 *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
839 *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
840 *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
849 * ipath_layer_get_counters - get various chip counters
850 * @dd: the infinipath device
851 * @cntrs: counters are placed here
853 * Return the counters needed by recv_pma_get_portcounters().
855 int ipath_layer_get_counters(struct ipath_devdata *dd,
856 struct ipath_layer_counters *cntrs)
860 if (!(dd->ipath_flags & IPATH_INITTED)) {
861 /* no hardware, freeze, etc. */
862 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
866 cntrs->symbol_error_counter =
867 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
868 cntrs->link_error_recovery_counter =
869 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
871 * The link downed counter counts when the other side downs the
872 * connection. We add in the number of times we downed the link
873 * due to local link integrity errors to compensate.
875 cntrs->link_downed_counter =
876 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
877 cntrs->port_rcv_errors =
878 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
879 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
880 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
881 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
882 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
883 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
884 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
885 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
886 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
887 cntrs->port_rcv_remphys_errors =
888 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
889 cntrs->port_xmit_discards =
890 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
891 cntrs->port_xmit_data =
892 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
893 cntrs->port_rcv_data =
894 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
895 cntrs->port_xmit_packets =
896 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
897 cntrs->port_rcv_packets =
898 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
899 cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
900 cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
908 int ipath_layer_want_buffer(struct ipath_devdata *dd)
910 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
911 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
917 int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
925 if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
926 ipath_dbg("send while not open\n");
929 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
930 dd->ipath_lid == 0) {
932 * lid check is for when sma hasn't yet configured
935 ipath_cdbg(VERBOSE, "send while not ready, "
936 "mylid=%u, flags=0x%x\n",
937 dd->ipath_lid, dd->ipath_flags);
940 vlsllnh = *((__be16 *) hdr);
941 if (vlsllnh != htons(IPATH_LRH_BTH)) {
942 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
943 "not sending\n", be16_to_cpu(vlsllnh),
950 /* Get a PIO buffer to use. */
951 piobuf = ipath_getpiobuf(dd, NULL);
952 if (piobuf == NULL) {
957 plen = (sizeof(*hdr) >> 2); /* actual length */
958 ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
960 writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
964 count = plen-1; /* amount we can copy before trigger word */
965 __iowrite32_copy(piobuf, uhdr, count);
967 __raw_writel(uhdr[count], piobuf + count);
968 ipath_flush_wc(); /* ensure it's sent, now */
970 ipath_stats.sps_ether_spkts++; /* ether packet sent */
976 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
978 int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
980 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
982 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
987 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
989 int ipath_layer_enable_timer(struct ipath_devdata *dd)
992 * HT-400 has a design flaw where the chip and kernel idea
993 * of the tail register don't always agree, and therefore we won't
994 * get an interrupt on the next packet received.
995 * If the board supports per packet receive interrupts, use it.
996 * Otherwise, the timer function periodically checks for packets
997 * to cover this case.
998 * Either way, the timer is needed for verbs layer related
1001 if (dd->ipath_flags & IPATH_GPIO_INTR) {
1002 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1003 0x2074076542310ULL);
1004 /* Enable GPIO bit 2 interrupt */
1005 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1009 init_timer(&dd->verbs_timer);
1010 dd->verbs_timer.function = __ipath_verbs_timer;
1011 dd->verbs_timer.data = (unsigned long)dd;
1012 dd->verbs_timer.expires = jiffies + 1;
1013 add_timer(&dd->verbs_timer);
1018 int ipath_layer_disable_timer(struct ipath_devdata *dd)
1020 /* Disable GPIO bit 2 interrupt */
1021 if (dd->ipath_flags & IPATH_GPIO_INTR)
1022 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1024 del_timer_sync(&dd->verbs_timer);
1030 * ipath_layer_set_verbs_flags - set the verbs layer flags
1031 * @dd: the infinipath device
1032 * @flags: the flags to set
1034 int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1036 struct ipath_devdata *ss;
1037 unsigned long lflags;
1039 spin_lock_irqsave(&ipath_devs_lock, lflags);
1041 list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1042 if (!(ss->ipath_flags & IPATH_INITTED))
1044 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1045 !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1046 *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1048 *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1051 spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1057 * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1058 * @dd: the infinipath device
1060 unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1062 return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1066 * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1067 * @dd: the infinipath device
1068 * @index: the PKEY index
1070 unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1074 if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1077 ret = dd->ipath_pd[0]->port_pkeys[index];
1083 * ipath_layer_get_pkeys - return the PKEY table for port 0
1084 * @dd: the infinipath device
1085 * @pkeys: the pkey table is placed here
1087 int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1089 struct ipath_portdata *pd = dd->ipath_pd[0];
1091 memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1097 * rm_pkey - decrecment the reference count for the given PKEY
1098 * @dd: the infinipath device
1099 * @key: the PKEY index
1101 * Return true if this was the last reference and the hardware table entry
1102 * needs to be changed.
1104 static int rm_pkey(struct ipath_devdata *dd, u16 key)
1109 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1110 if (dd->ipath_pkeys[i] != key)
1112 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1113 dd->ipath_pkeys[i] = 0;
1127 * add_pkey - add the given PKEY to the hardware table
1128 * @dd: the infinipath device
1131 * Return an error code if unable to add the entry, zero if no change,
1132 * or 1 if the hardware PKEY register needs to be updated.
1134 static int add_pkey(struct ipath_devdata *dd, u16 key)
1137 u16 lkey = key & 0x7FFF;
1141 if (lkey == 0x7FFF) {
1146 /* Look for an empty slot or a matching PKEY. */
1147 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1148 if (!dd->ipath_pkeys[i]) {
1152 /* If it matches exactly, try to increment the ref count */
1153 if (dd->ipath_pkeys[i] == key) {
1154 if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1158 /* Lost the race. Look for an empty slot below. */
1159 atomic_dec(&dd->ipath_pkeyrefs[i]);
1163 * It makes no sense to have both the limited and unlimited
1164 * PKEY set at the same time since the unlimited one will
1165 * disable the limited one.
1167 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1176 for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1177 if (!dd->ipath_pkeys[i] &&
1178 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1179 /* for ipathstats, etc. */
1180 ipath_stats.sps_pkeys[i] = lkey;
1181 dd->ipath_pkeys[i] = key;
1193 * ipath_layer_set_pkeys - set the PKEY table for port 0
1194 * @dd: the infinipath device
1195 * @pkeys: the PKEY table
1197 int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1199 struct ipath_portdata *pd;
1203 pd = dd->ipath_pd[0];
1205 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1207 u16 okey = pd->port_pkeys[i];
1212 * The value of this PKEY table entry is changing.
1213 * Remove the old entry in the hardware's array of PKEYs.
1216 changed |= rm_pkey(dd, okey);
1218 int ret = add_pkey(dd, key);
1225 pd->port_pkeys[i] = key;
1230 pkey = (u64) dd->ipath_pkeys[0] |
1231 ((u64) dd->ipath_pkeys[1] << 16) |
1232 ((u64) dd->ipath_pkeys[2] << 32) |
1233 ((u64) dd->ipath_pkeys[3] << 48);
1234 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1235 (unsigned long long) pkey);
1236 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1243 * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1244 * @dd: the infinipath device
1246 * Returns zero if the default is POLL, 1 if the default is SLEEP.
1248 int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1250 return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1254 * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1255 * @dd: the infinipath device
1256 * @sleep: the new state
1258 * Note that this will only take effect when the link state changes.
1260 int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1264 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1266 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1267 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1272 int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1274 return (dd->ipath_ibcctrl >>
1275 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1276 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1280 * ipath_layer_set_phyerrthreshold - set the physical error threshold
1281 * @dd: the infinipath device
1282 * @n: the new threshold
1284 * Note that this will only take effect when the link state changes.
1286 int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1290 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1291 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1293 dd->ipath_ibcctrl &=
1294 ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1295 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1296 dd->ipath_ibcctrl |=
1297 (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1298 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1304 int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1306 return (dd->ipath_ibcctrl >>
1307 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1308 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1312 * ipath_layer_set_overrunthreshold - set the overrun threshold
1313 * @dd: the infinipath device
1314 * @n: the new threshold
1316 * Note that this will only take effect when the link state changes.
1318 int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1322 v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1323 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1325 dd->ipath_ibcctrl &=
1326 ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1327 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1328 dd->ipath_ibcctrl |=
1329 (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1330 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1336 int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1339 return dd->ipath_f_get_boardname(dd, name, namelen);
1342 u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1344 return dd->ipath_rcvhdrentsize;