IB/ipath: merge ipath_core and ib_ipath drivers
[firefly-linux-kernel-4.4.55.git] / drivers / infiniband / hw / ipath / ipath_layer.c
1 /*
2  * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 /*
35  * These are the routines used by layered drivers, currently just the
36  * layered ethernet driver and verbs layer.
37  */
38
39 #include <linux/io.h>
40 #include <linux/pci.h>
41 #include <asm/byteorder.h>
42
43 #include "ipath_kernel.h"
44 #include "ipath_layer.h"
45 #include "ipath_verbs.h"
46 #include "ipath_common.h"
47
48 /* Acquire before ipath_devs_lock. */
49 static DEFINE_MUTEX(ipath_layer_mutex);
50
51 u16 ipath_layer_rcv_opcode;
52
53 static int (*layer_intr)(void *, u32);
54 static int (*layer_rcv)(void *, void *, struct sk_buff *);
55 static int (*layer_rcv_lid)(void *, void *);
56
57 static void *(*layer_add_one)(int, struct ipath_devdata *);
58 static void (*layer_remove_one)(void *);
59
60 int __ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
61 {
62         int ret = -ENODEV;
63
64         if (dd->ipath_layer.l_arg && layer_intr)
65                 ret = layer_intr(dd->ipath_layer.l_arg, arg);
66
67         return ret;
68 }
69
70 int ipath_layer_intr(struct ipath_devdata *dd, u32 arg)
71 {
72         int ret;
73
74         mutex_lock(&ipath_layer_mutex);
75
76         ret = __ipath_layer_intr(dd, arg);
77
78         mutex_unlock(&ipath_layer_mutex);
79
80         return ret;
81 }
82
83 int __ipath_layer_rcv(struct ipath_devdata *dd, void *hdr,
84                       struct sk_buff *skb)
85 {
86         int ret = -ENODEV;
87
88         if (dd->ipath_layer.l_arg && layer_rcv)
89                 ret = layer_rcv(dd->ipath_layer.l_arg, hdr, skb);
90
91         return ret;
92 }
93
94 int __ipath_layer_rcv_lid(struct ipath_devdata *dd, void *hdr)
95 {
96         int ret = -ENODEV;
97
98         if (dd->ipath_layer.l_arg && layer_rcv_lid)
99                 ret = layer_rcv_lid(dd->ipath_layer.l_arg, hdr);
100
101         return ret;
102 }
103
104 int ipath_layer_set_linkstate(struct ipath_devdata *dd, u8 newstate)
105 {
106         u32 lstate;
107         int ret;
108
109         switch (newstate) {
110         case IPATH_IB_LINKDOWN:
111                 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL <<
112                                     INFINIPATH_IBCC_LINKINITCMD_SHIFT);
113                 /* don't wait */
114                 ret = 0;
115                 goto bail;
116
117         case IPATH_IB_LINKDOWN_SLEEP:
118                 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_SLEEP <<
119                                     INFINIPATH_IBCC_LINKINITCMD_SHIFT);
120                 /* don't wait */
121                 ret = 0;
122                 goto bail;
123
124         case IPATH_IB_LINKDOWN_DISABLE:
125                 ipath_set_ib_lstate(dd,
126                                     INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
127                                     INFINIPATH_IBCC_LINKINITCMD_SHIFT);
128                 /* don't wait */
129                 ret = 0;
130                 goto bail;
131
132         case IPATH_IB_LINKINIT:
133                 if (dd->ipath_flags & IPATH_LINKINIT) {
134                         ret = 0;
135                         goto bail;
136                 }
137                 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT <<
138                                     INFINIPATH_IBCC_LINKCMD_SHIFT);
139                 lstate = IPATH_LINKINIT;
140                 break;
141
142         case IPATH_IB_LINKARM:
143                 if (dd->ipath_flags & IPATH_LINKARMED) {
144                         ret = 0;
145                         goto bail;
146                 }
147                 if (!(dd->ipath_flags &
148                       (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
149                         ret = -EINVAL;
150                         goto bail;
151                 }
152                 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED <<
153                                     INFINIPATH_IBCC_LINKCMD_SHIFT);
154                 /*
155                  * Since the port can transition to ACTIVE by receiving
156                  * a non VL 15 packet, wait for either state.
157                  */
158                 lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
159                 break;
160
161         case IPATH_IB_LINKACTIVE:
162                 if (dd->ipath_flags & IPATH_LINKACTIVE) {
163                         ret = 0;
164                         goto bail;
165                 }
166                 if (!(dd->ipath_flags & IPATH_LINKARMED)) {
167                         ret = -EINVAL;
168                         goto bail;
169                 }
170                 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE <<
171                                     INFINIPATH_IBCC_LINKCMD_SHIFT);
172                 lstate = IPATH_LINKACTIVE;
173                 break;
174
175         default:
176                 ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
177                 ret = -EINVAL;
178                 goto bail;
179         }
180         ret = ipath_wait_linkstate(dd, lstate, 2000);
181
182 bail:
183         return ret;
184 }
185
186 /**
187  * ipath_layer_set_mtu - set the MTU
188  * @dd: the infinipath device
189  * @arg: the new MTU
190  *
191  * we can handle "any" incoming size, the issue here is whether we
192  * need to restrict our outgoing size.   For now, we don't do any
193  * sanity checking on this, and we don't deal with what happens to
194  * programs that are already running when the size changes.
195  * NOTE: changing the MTU will usually cause the IBC to go back to
196  * link initialize (IPATH_IBSTATE_INIT) state...
197  */
198 int ipath_layer_set_mtu(struct ipath_devdata *dd, u16 arg)
199 {
200         u32 piosize;
201         int changed = 0;
202         int ret;
203
204         /*
205          * mtu is IB data payload max.  It's the largest power of 2 less
206          * than piosize (or even larger, since it only really controls the
207          * largest we can receive; we can send the max of the mtu and
208          * piosize).  We check that it's one of the valid IB sizes.
209          */
210         if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
211             arg != 4096) {
212                 ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
213                 ret = -EINVAL;
214                 goto bail;
215         }
216         if (dd->ipath_ibmtu == arg) {
217                 ret = 0;        /* same as current */
218                 goto bail;
219         }
220
221         piosize = dd->ipath_ibmaxlen;
222         dd->ipath_ibmtu = arg;
223
224         if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
225                 /* Only if it's not the initial value (or reset to it) */
226                 if (piosize != dd->ipath_init_ibmaxlen) {
227                         dd->ipath_ibmaxlen = piosize;
228                         changed = 1;
229                 }
230         } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
231                 piosize = arg + IPATH_PIO_MAXIBHDR;
232                 ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
233                            "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
234                            arg);
235                 dd->ipath_ibmaxlen = piosize;
236                 changed = 1;
237         }
238
239         if (changed) {
240                 /*
241                  * set the IBC maxpktlength to the size of our pio
242                  * buffers in words
243                  */
244                 u64 ibc = dd->ipath_ibcctrl;
245                 ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
246                          INFINIPATH_IBCC_MAXPKTLEN_SHIFT);
247
248                 piosize = piosize - 2 * sizeof(u32);    /* ignore pbc */
249                 dd->ipath_ibmaxlen = piosize;
250                 piosize /= sizeof(u32); /* in words */
251                 /*
252                  * for ICRC, which we only send in diag test pkt mode, and
253                  * we don't need to worry about that for mtu
254                  */
255                 piosize += 1;
256
257                 ibc |= piosize << INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
258                 dd->ipath_ibcctrl = ibc;
259                 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
260                                  dd->ipath_ibcctrl);
261                 dd->ipath_f_tidtemplate(dd);
262         }
263
264         ret = 0;
265
266 bail:
267         return ret;
268 }
269
270 int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
271 {
272         dd->ipath_lid = arg;
273         dd->ipath_lmc = lmc;
274
275         mutex_lock(&ipath_layer_mutex);
276
277         if (dd->ipath_layer.l_arg && layer_intr)
278                 layer_intr(dd->ipath_layer.l_arg, IPATH_LAYER_INT_LID);
279
280         mutex_unlock(&ipath_layer_mutex);
281
282         return 0;
283 }
284
285 int ipath_layer_set_guid(struct ipath_devdata *dd, __be64 guid)
286 {
287         /* XXX - need to inform anyone who cares this just happened. */
288         dd->ipath_guid = guid;
289         return 0;
290 }
291
292 __be64 ipath_layer_get_guid(struct ipath_devdata *dd)
293 {
294         return dd->ipath_guid;
295 }
296
297 u32 ipath_layer_get_majrev(struct ipath_devdata *dd)
298 {
299         return dd->ipath_majrev;
300 }
301
302 u32 ipath_layer_get_minrev(struct ipath_devdata *dd)
303 {
304         return dd->ipath_minrev;
305 }
306
307 u32 ipath_layer_get_pcirev(struct ipath_devdata *dd)
308 {
309         return dd->ipath_pcirev;
310 }
311
312 u32 ipath_layer_get_flags(struct ipath_devdata *dd)
313 {
314         return dd->ipath_flags;
315 }
316
317 struct device *ipath_layer_get_device(struct ipath_devdata *dd)
318 {
319         return &dd->pcidev->dev;
320 }
321
322 u16 ipath_layer_get_deviceid(struct ipath_devdata *dd)
323 {
324         return dd->ipath_deviceid;
325 }
326
327 u32 ipath_layer_get_vendorid(struct ipath_devdata *dd)
328 {
329         return dd->ipath_vendorid;
330 }
331
332 u64 ipath_layer_get_lastibcstat(struct ipath_devdata *dd)
333 {
334         return dd->ipath_lastibcstat;
335 }
336
337 u32 ipath_layer_get_ibmtu(struct ipath_devdata *dd)
338 {
339         return dd->ipath_ibmtu;
340 }
341
342 void ipath_layer_add(struct ipath_devdata *dd)
343 {
344         mutex_lock(&ipath_layer_mutex);
345
346         if (layer_add_one)
347                 dd->ipath_layer.l_arg =
348                         layer_add_one(dd->ipath_unit, dd);
349
350         mutex_unlock(&ipath_layer_mutex);
351 }
352
353 void ipath_layer_remove(struct ipath_devdata *dd)
354 {
355         mutex_lock(&ipath_layer_mutex);
356
357         if (dd->ipath_layer.l_arg && layer_remove_one) {
358                 layer_remove_one(dd->ipath_layer.l_arg);
359                 dd->ipath_layer.l_arg = NULL;
360         }
361
362         mutex_unlock(&ipath_layer_mutex);
363 }
364
365 int ipath_layer_register(void *(*l_add)(int, struct ipath_devdata *),
366                          void (*l_remove)(void *),
367                          int (*l_intr)(void *, u32),
368                          int (*l_rcv)(void *, void *, struct sk_buff *),
369                          u16 l_rcv_opcode,
370                          int (*l_rcv_lid)(void *, void *))
371 {
372         struct ipath_devdata *dd, *tmp;
373         unsigned long flags;
374
375         mutex_lock(&ipath_layer_mutex);
376
377         layer_add_one = l_add;
378         layer_remove_one = l_remove;
379         layer_intr = l_intr;
380         layer_rcv = l_rcv;
381         layer_rcv_lid = l_rcv_lid;
382         ipath_layer_rcv_opcode = l_rcv_opcode;
383
384         spin_lock_irqsave(&ipath_devs_lock, flags);
385
386         list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
387                 if (!(dd->ipath_flags & IPATH_INITTED))
388                         continue;
389
390                 if (dd->ipath_layer.l_arg)
391                         continue;
392
393                 if (!(*dd->ipath_statusp & IPATH_STATUS_SMA))
394                         *dd->ipath_statusp |= IPATH_STATUS_OIB_SMA;
395
396                 spin_unlock_irqrestore(&ipath_devs_lock, flags);
397                 dd->ipath_layer.l_arg = l_add(dd->ipath_unit, dd);
398                 spin_lock_irqsave(&ipath_devs_lock, flags);
399         }
400
401         spin_unlock_irqrestore(&ipath_devs_lock, flags);
402         mutex_unlock(&ipath_layer_mutex);
403
404         return 0;
405 }
406
407 EXPORT_SYMBOL_GPL(ipath_layer_register);
408
409 void ipath_layer_unregister(void)
410 {
411         struct ipath_devdata *dd, *tmp;
412         unsigned long flags;
413
414         mutex_lock(&ipath_layer_mutex);
415         spin_lock_irqsave(&ipath_devs_lock, flags);
416
417         list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
418                 if (dd->ipath_layer.l_arg && layer_remove_one) {
419                         spin_unlock_irqrestore(&ipath_devs_lock, flags);
420                         layer_remove_one(dd->ipath_layer.l_arg);
421                         spin_lock_irqsave(&ipath_devs_lock, flags);
422                         dd->ipath_layer.l_arg = NULL;
423                 }
424         }
425
426         spin_unlock_irqrestore(&ipath_devs_lock, flags);
427
428         layer_add_one = NULL;
429         layer_remove_one = NULL;
430         layer_intr = NULL;
431         layer_rcv = NULL;
432         layer_rcv_lid = NULL;
433
434         mutex_unlock(&ipath_layer_mutex);
435 }
436
437 EXPORT_SYMBOL_GPL(ipath_layer_unregister);
438
439 static void __ipath_verbs_timer(unsigned long arg)
440 {
441         struct ipath_devdata *dd = (struct ipath_devdata *) arg;
442
443         /*
444          * If port 0 receive packet interrupts are not available, or
445          * can be missed, poll the receive queue
446          */
447         if (dd->ipath_flags & IPATH_POLL_RX_INTR)
448                 ipath_kreceive(dd);
449
450         /* Handle verbs layer timeouts. */
451         ipath_ib_timer(dd->verbs_dev);
452         mod_timer(&dd->verbs_timer, jiffies + 1);
453 }
454
455 int ipath_layer_open(struct ipath_devdata *dd, u32 * pktmax)
456 {
457         int ret;
458         u32 intval = 0;
459
460         mutex_lock(&ipath_layer_mutex);
461
462         if (!dd->ipath_layer.l_arg) {
463                 ret = -EINVAL;
464                 goto bail;
465         }
466
467         ret = ipath_setrcvhdrsize(dd, IPATH_HEADER_QUEUE_WORDS);
468
469         if (ret < 0)
470                 goto bail;
471
472         *pktmax = dd->ipath_ibmaxlen;
473
474         if (*dd->ipath_statusp & IPATH_STATUS_IB_READY)
475                 intval |= IPATH_LAYER_INT_IF_UP;
476         if (dd->ipath_lid)
477                 intval |= IPATH_LAYER_INT_LID;
478         if (dd->ipath_mlid)
479                 intval |= IPATH_LAYER_INT_BCAST;
480         /*
481          * do this on open, in case low level is already up and
482          * just layered driver was reloaded, etc.
483          */
484         if (intval)
485                 layer_intr(dd->ipath_layer.l_arg, intval);
486
487         ret = 0;
488 bail:
489         mutex_unlock(&ipath_layer_mutex);
490
491         return ret;
492 }
493
494 EXPORT_SYMBOL_GPL(ipath_layer_open);
495
496 u16 ipath_layer_get_lid(struct ipath_devdata *dd)
497 {
498         return dd->ipath_lid;
499 }
500
501 EXPORT_SYMBOL_GPL(ipath_layer_get_lid);
502
503 /**
504  * ipath_layer_get_mac - get the MAC address
505  * @dd: the infinipath device
506  * @mac: the MAC is put here
507  *
508  * This is the EUID-64 OUI octets (top 3), then
509  * skip the next 2 (which should both be zero or 0xff).
510  * The returned MAC is in network order
511  * mac points to at least 6 bytes of buffer
512  * We assume that by the time the LID is set, that the GUID is as valid
513  * as it's ever going to be, rather than adding yet another status bit.
514  */
515
516 int ipath_layer_get_mac(struct ipath_devdata *dd, u8 * mac)
517 {
518         u8 *guid;
519
520         guid = (u8 *) &dd->ipath_guid;
521
522         mac[0] = guid[0];
523         mac[1] = guid[1];
524         mac[2] = guid[2];
525         mac[3] = guid[5];
526         mac[4] = guid[6];
527         mac[5] = guid[7];
528         if ((guid[3] || guid[4]) && !(guid[3] == 0xff && guid[4] == 0xff))
529                 ipath_dbg("Warning, guid bytes 3 and 4 not 0 or 0xffff: "
530                           "%x %x\n", guid[3], guid[4]);
531         return 0;
532 }
533
534 EXPORT_SYMBOL_GPL(ipath_layer_get_mac);
535
536 u16 ipath_layer_get_bcast(struct ipath_devdata *dd)
537 {
538         return dd->ipath_mlid;
539 }
540
541 EXPORT_SYMBOL_GPL(ipath_layer_get_bcast);
542
543 u32 ipath_layer_get_cr_errpkey(struct ipath_devdata *dd)
544 {
545         return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
546 }
547
548 static void update_sge(struct ipath_sge_state *ss, u32 length)
549 {
550         struct ipath_sge *sge = &ss->sge;
551
552         sge->vaddr += length;
553         sge->length -= length;
554         sge->sge_length -= length;
555         if (sge->sge_length == 0) {
556                 if (--ss->num_sge)
557                         *sge = *ss->sg_list++;
558         } else if (sge->length == 0 && sge->mr != NULL) {
559                 if (++sge->n >= IPATH_SEGSZ) {
560                         if (++sge->m >= sge->mr->mapsz)
561                                 return;
562                         sge->n = 0;
563                 }
564                 sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
565                 sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
566         }
567 }
568
569 #ifdef __LITTLE_ENDIAN
570 static inline u32 get_upper_bits(u32 data, u32 shift)
571 {
572         return data >> shift;
573 }
574
575 static inline u32 set_upper_bits(u32 data, u32 shift)
576 {
577         return data << shift;
578 }
579
580 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
581 {
582         data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
583         data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
584         return data;
585 }
586 #else
587 static inline u32 get_upper_bits(u32 data, u32 shift)
588 {
589         return data << shift;
590 }
591
592 static inline u32 set_upper_bits(u32 data, u32 shift)
593 {
594         return data >> shift;
595 }
596
597 static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
598 {
599         data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
600         data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
601         return data;
602 }
603 #endif
604
605 static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
606                     u32 length)
607 {
608         u32 extra = 0;
609         u32 data = 0;
610         u32 last;
611
612         while (1) {
613                 u32 len = ss->sge.length;
614                 u32 off;
615
616                 BUG_ON(len == 0);
617                 if (len > length)
618                         len = length;
619                 if (len > ss->sge.sge_length)
620                         len = ss->sge.sge_length;
621                 /* If the source address is not aligned, try to align it. */
622                 off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
623                 if (off) {
624                         u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
625                                             ~(sizeof(u32) - 1));
626                         u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
627                         u32 y;
628
629                         y = sizeof(u32) - off;
630                         if (len > y)
631                                 len = y;
632                         if (len + extra >= sizeof(u32)) {
633                                 data |= set_upper_bits(v, extra *
634                                                        BITS_PER_BYTE);
635                                 len = sizeof(u32) - extra;
636                                 if (len == length) {
637                                         last = data;
638                                         break;
639                                 }
640                                 __raw_writel(data, piobuf);
641                                 piobuf++;
642                                 extra = 0;
643                                 data = 0;
644                         } else {
645                                 /* Clear unused upper bytes */
646                                 data |= clear_upper_bytes(v, len, extra);
647                                 if (len == length) {
648                                         last = data;
649                                         break;
650                                 }
651                                 extra += len;
652                         }
653                 } else if (extra) {
654                         /* Source address is aligned. */
655                         u32 *addr = (u32 *) ss->sge.vaddr;
656                         int shift = extra * BITS_PER_BYTE;
657                         int ushift = 32 - shift;
658                         u32 l = len;
659
660                         while (l >= sizeof(u32)) {
661                                 u32 v = *addr;
662
663                                 data |= set_upper_bits(v, shift);
664                                 __raw_writel(data, piobuf);
665                                 data = get_upper_bits(v, ushift);
666                                 piobuf++;
667                                 addr++;
668                                 l -= sizeof(u32);
669                         }
670                         /*
671                          * We still have 'extra' number of bytes leftover.
672                          */
673                         if (l) {
674                                 u32 v = *addr;
675
676                                 if (l + extra >= sizeof(u32)) {
677                                         data |= set_upper_bits(v, shift);
678                                         len -= l + extra - sizeof(u32);
679                                         if (len == length) {
680                                                 last = data;
681                                                 break;
682                                         }
683                                         __raw_writel(data, piobuf);
684                                         piobuf++;
685                                         extra = 0;
686                                         data = 0;
687                                 } else {
688                                         /* Clear unused upper bytes */
689                                         data |= clear_upper_bytes(v, l,
690                                                                   extra);
691                                         if (len == length) {
692                                                 last = data;
693                                                 break;
694                                         }
695                                         extra += l;
696                                 }
697                         } else if (len == length) {
698                                 last = data;
699                                 break;
700                         }
701                 } else if (len == length) {
702                         u32 w;
703
704                         /*
705                          * Need to round up for the last dword in the
706                          * packet.
707                          */
708                         w = (len + 3) >> 2;
709                         __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
710                         piobuf += w - 1;
711                         last = ((u32 *) ss->sge.vaddr)[w - 1];
712                         break;
713                 } else {
714                         u32 w = len >> 2;
715
716                         __iowrite32_copy(piobuf, ss->sge.vaddr, w);
717                         piobuf += w;
718
719                         extra = len & (sizeof(u32) - 1);
720                         if (extra) {
721                                 u32 v = ((u32 *) ss->sge.vaddr)[w];
722
723                                 /* Clear unused upper bytes */
724                                 data = clear_upper_bytes(v, extra, 0);
725                         }
726                 }
727                 update_sge(ss, len);
728                 length -= len;
729         }
730         /* Update address before sending packet. */
731         update_sge(ss, length);
732         /* must flush early everything before trigger word */
733         ipath_flush_wc();
734         __raw_writel(last, piobuf);
735         /* be sure trigger word is written */
736         ipath_flush_wc();
737 }
738
739 /**
740  * ipath_verbs_send - send a packet from the verbs layer
741  * @dd: the infinipath device
742  * @hdrwords: the number of words in the header
743  * @hdr: the packet header
744  * @len: the length of the packet in bytes
745  * @ss: the SGE to send
746  *
747  * This is like ipath_sma_send_pkt() in that we need to be able to send
748  * packets after the chip is initialized (MADs) but also like
749  * ipath_layer_send_hdr() since its used by the verbs layer.
750  */
751 int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
752                      u32 *hdr, u32 len, struct ipath_sge_state *ss)
753 {
754         u32 __iomem *piobuf;
755         u32 plen;
756         int ret;
757
758         /* +1 is for the qword padding of pbc */
759         plen = hdrwords + ((len + 3) >> 2) + 1;
760         if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
761                 ipath_dbg("packet len 0x%x too long, failing\n", plen);
762                 ret = -EINVAL;
763                 goto bail;
764         }
765
766         /* Get a PIO buffer to use. */
767         piobuf = ipath_getpiobuf(dd, NULL);
768         if (unlikely(piobuf == NULL)) {
769                 ret = -EBUSY;
770                 goto bail;
771         }
772
773         /*
774          * Write len to control qword, no flags.
775          * We have to flush after the PBC for correctness on some cpus
776          * or WC buffer can be written out of order.
777          */
778         writeq(plen, piobuf);
779         ipath_flush_wc();
780         piobuf += 2;
781         if (len == 0) {
782                 /*
783                  * If there is just the header portion, must flush before
784                  * writing last word of header for correctness, and after
785                  * the last header word (trigger word).
786                  */
787                 __iowrite32_copy(piobuf, hdr, hdrwords - 1);
788                 ipath_flush_wc();
789                 __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
790                 ipath_flush_wc();
791                 ret = 0;
792                 goto bail;
793         }
794
795         __iowrite32_copy(piobuf, hdr, hdrwords);
796         piobuf += hdrwords;
797
798         /* The common case is aligned and contained in one segment. */
799         if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
800                    !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
801                 u32 w;
802                 u32 *addr = (u32 *) ss->sge.vaddr;
803
804                 /* Update address before sending packet. */
805                 update_sge(ss, len);
806                 /* Need to round up for the last dword in the packet. */
807                 w = (len + 3) >> 2;
808                 __iowrite32_copy(piobuf, addr, w - 1);
809                 /* must flush early everything before trigger word */
810                 ipath_flush_wc();
811                 __raw_writel(addr[w - 1], piobuf + w - 1);
812                 /* be sure trigger word is written */
813                 ipath_flush_wc();
814                 ret = 0;
815                 goto bail;
816         }
817         copy_io(piobuf, ss, len);
818         ret = 0;
819
820 bail:
821         return ret;
822 }
823
824 int ipath_layer_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
825                                   u64 *rwords, u64 *spkts, u64 *rpkts,
826                                   u64 *xmit_wait)
827 {
828         int ret;
829
830         if (!(dd->ipath_flags & IPATH_INITTED)) {
831                 /* no hardware, freeze, etc. */
832                 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
833                 ret = -EINVAL;
834                 goto bail;
835         }
836         *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
837         *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
838         *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
839         *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
840         *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
841
842         ret = 0;
843
844 bail:
845         return ret;
846 }
847
848 /**
849  * ipath_layer_get_counters - get various chip counters
850  * @dd: the infinipath device
851  * @cntrs: counters are placed here
852  *
853  * Return the counters needed by recv_pma_get_portcounters().
854  */
855 int ipath_layer_get_counters(struct ipath_devdata *dd,
856                               struct ipath_layer_counters *cntrs)
857 {
858         int ret;
859
860         if (!(dd->ipath_flags & IPATH_INITTED)) {
861                 /* no hardware, freeze, etc. */
862                 ipath_dbg("unit %u not usable\n", dd->ipath_unit);
863                 ret = -EINVAL;
864                 goto bail;
865         }
866         cntrs->symbol_error_counter =
867                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_ibsymbolerrcnt);
868         cntrs->link_error_recovery_counter =
869                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt);
870         /*
871          * The link downed counter counts when the other side downs the
872          * connection.  We add in the number of times we downed the link
873          * due to local link integrity errors to compensate.
874          */
875         cntrs->link_downed_counter =
876                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_iblinkdowncnt);
877         cntrs->port_rcv_errors =
878                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rxdroppktcnt) +
879                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvovflcnt) +
880                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_portovflcnt) +
881                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_err_rlencnt) +
882                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_invalidrlencnt) +
883                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_erricrccnt) +
884                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errvcrccnt) +
885                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_errlpcrccnt) +
886                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_badformatcnt);
887         cntrs->port_rcv_remphys_errors =
888                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_rcvebpcnt);
889         cntrs->port_xmit_discards =
890                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_unsupvlcnt);
891         cntrs->port_xmit_data =
892                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
893         cntrs->port_rcv_data =
894                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
895         cntrs->port_xmit_packets =
896                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
897         cntrs->port_rcv_packets =
898                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
899         cntrs->local_link_integrity_errors = dd->ipath_lli_errors;
900         cntrs->excessive_buffer_overrun_errors = 0; /* XXX */
901
902         ret = 0;
903
904 bail:
905         return ret;
906 }
907
908 int ipath_layer_want_buffer(struct ipath_devdata *dd)
909 {
910         set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
911         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
912                          dd->ipath_sendctrl);
913
914         return 0;
915 }
916
917 int ipath_layer_send_hdr(struct ipath_devdata *dd, struct ether_header *hdr)
918 {
919         int ret = 0;
920         u32 __iomem *piobuf;
921         u32 plen, *uhdr;
922         size_t count;
923         __be16 vlsllnh;
924
925         if (!(dd->ipath_flags & IPATH_RCVHDRSZ_SET)) {
926                 ipath_dbg("send while not open\n");
927                 ret = -EINVAL;
928         } else
929                 if ((dd->ipath_flags & (IPATH_LINKUNK | IPATH_LINKDOWN)) ||
930                     dd->ipath_lid == 0) {
931                         /*
932                          * lid check is for when sma hasn't yet configured
933                          */
934                         ret = -ENETDOWN;
935                         ipath_cdbg(VERBOSE, "send while not ready, "
936                                    "mylid=%u, flags=0x%x\n",
937                                    dd->ipath_lid, dd->ipath_flags);
938                 }
939
940         vlsllnh = *((__be16 *) hdr);
941         if (vlsllnh != htons(IPATH_LRH_BTH)) {
942                 ipath_dbg("Warning: lrh[0] wrong (%x, not %x); "
943                           "not sending\n", be16_to_cpu(vlsllnh),
944                           IPATH_LRH_BTH);
945                 ret = -EINVAL;
946         }
947         if (ret)
948                 goto done;
949
950         /* Get a PIO buffer to use. */
951         piobuf = ipath_getpiobuf(dd, NULL);
952         if (piobuf == NULL) {
953                 ret = -EBUSY;
954                 goto done;
955         }
956
957         plen = (sizeof(*hdr) >> 2); /* actual length */
958         ipath_cdbg(EPKT, "0x%x+1w pio %p\n", plen, piobuf);
959
960         writeq(plen+1, piobuf); /* len (+1 for pad) to pbc, no flags */
961         ipath_flush_wc();
962         piobuf += 2;
963         uhdr = (u32 *)hdr;
964         count = plen-1; /* amount we can copy before trigger word */
965         __iowrite32_copy(piobuf, uhdr, count);
966         ipath_flush_wc();
967         __raw_writel(uhdr[count], piobuf + count);
968         ipath_flush_wc(); /* ensure it's sent, now */
969
970         ipath_stats.sps_ether_spkts++;  /* ether packet sent */
971
972 done:
973         return ret;
974 }
975
976 EXPORT_SYMBOL_GPL(ipath_layer_send_hdr);
977
978 int ipath_layer_set_piointbufavail_int(struct ipath_devdata *dd)
979 {
980         set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
981
982         ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
983                          dd->ipath_sendctrl);
984         return 0;
985 }
986
987 EXPORT_SYMBOL_GPL(ipath_layer_set_piointbufavail_int);
988
989 int ipath_layer_enable_timer(struct ipath_devdata *dd)
990 {
991         /*
992          * HT-400 has a design flaw where the chip and kernel idea
993          * of the tail register don't always agree, and therefore we won't
994          * get an interrupt on the next packet received.
995          * If the board supports per packet receive interrupts, use it.
996          * Otherwise, the timer function periodically checks for packets
997          * to cover this case.
998          * Either way, the timer is needed for verbs layer related
999          * processing.
1000          */
1001         if (dd->ipath_flags & IPATH_GPIO_INTR) {
1002                 ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
1003                                  0x2074076542310ULL);
1004                 /* Enable GPIO bit 2 interrupt */
1005                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
1006                                  (u64) (1 << 2));
1007         }
1008
1009         init_timer(&dd->verbs_timer);
1010         dd->verbs_timer.function = __ipath_verbs_timer;
1011         dd->verbs_timer.data = (unsigned long)dd;
1012         dd->verbs_timer.expires = jiffies + 1;
1013         add_timer(&dd->verbs_timer);
1014
1015         return 0;
1016 }
1017
1018 int ipath_layer_disable_timer(struct ipath_devdata *dd)
1019 {
1020         /* Disable GPIO bit 2 interrupt */
1021         if (dd->ipath_flags & IPATH_GPIO_INTR)
1022                 ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, 0);
1023
1024         del_timer_sync(&dd->verbs_timer);
1025
1026         return 0;
1027 }
1028
1029 /**
1030  * ipath_layer_set_verbs_flags - set the verbs layer flags
1031  * @dd: the infinipath device
1032  * @flags: the flags to set
1033  */
1034 int ipath_layer_set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
1035 {
1036         struct ipath_devdata *ss;
1037         unsigned long lflags;
1038
1039         spin_lock_irqsave(&ipath_devs_lock, lflags);
1040
1041         list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
1042                 if (!(ss->ipath_flags & IPATH_INITTED))
1043                         continue;
1044                 if ((flags & IPATH_VERBS_KERNEL_SMA) &&
1045                     !(*ss->ipath_statusp & IPATH_STATUS_SMA))
1046                         *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
1047                 else
1048                         *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
1049         }
1050
1051         spin_unlock_irqrestore(&ipath_devs_lock, lflags);
1052
1053         return 0;
1054 }
1055
1056 /**
1057  * ipath_layer_get_npkeys - return the size of the PKEY table for port 0
1058  * @dd: the infinipath device
1059  */
1060 unsigned ipath_layer_get_npkeys(struct ipath_devdata *dd)
1061 {
1062         return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
1063 }
1064
1065 /**
1066  * ipath_layer_get_pkey - return the indexed PKEY from the port 0 PKEY table
1067  * @dd: the infinipath device
1068  * @index: the PKEY index
1069  */
1070 unsigned ipath_layer_get_pkey(struct ipath_devdata *dd, unsigned index)
1071 {
1072         unsigned ret;
1073
1074         if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
1075                 ret = 0;
1076         else
1077                 ret = dd->ipath_pd[0]->port_pkeys[index];
1078
1079         return ret;
1080 }
1081
1082 /**
1083  * ipath_layer_get_pkeys - return the PKEY table for port 0
1084  * @dd: the infinipath device
1085  * @pkeys: the pkey table is placed here
1086  */
1087 int ipath_layer_get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1088 {
1089         struct ipath_portdata *pd = dd->ipath_pd[0];
1090
1091         memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
1092
1093         return 0;
1094 }
1095
1096 /**
1097  * rm_pkey - decrecment the reference count for the given PKEY
1098  * @dd: the infinipath device
1099  * @key: the PKEY index
1100  *
1101  * Return true if this was the last reference and the hardware table entry
1102  * needs to be changed.
1103  */
1104 static int rm_pkey(struct ipath_devdata *dd, u16 key)
1105 {
1106         int i;
1107         int ret;
1108
1109         for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1110                 if (dd->ipath_pkeys[i] != key)
1111                         continue;
1112                 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
1113                         dd->ipath_pkeys[i] = 0;
1114                         ret = 1;
1115                         goto bail;
1116                 }
1117                 break;
1118         }
1119
1120         ret = 0;
1121
1122 bail:
1123         return ret;
1124 }
1125
1126 /**
1127  * add_pkey - add the given PKEY to the hardware table
1128  * @dd: the infinipath device
1129  * @key: the PKEY
1130  *
1131  * Return an error code if unable to add the entry, zero if no change,
1132  * or 1 if the hardware PKEY register needs to be updated.
1133  */
1134 static int add_pkey(struct ipath_devdata *dd, u16 key)
1135 {
1136         int i;
1137         u16 lkey = key & 0x7FFF;
1138         int any = 0;
1139         int ret;
1140
1141         if (lkey == 0x7FFF) {
1142                 ret = 0;
1143                 goto bail;
1144         }
1145
1146         /* Look for an empty slot or a matching PKEY. */
1147         for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1148                 if (!dd->ipath_pkeys[i]) {
1149                         any++;
1150                         continue;
1151                 }
1152                 /* If it matches exactly, try to increment the ref count */
1153                 if (dd->ipath_pkeys[i] == key) {
1154                         if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
1155                                 ret = 0;
1156                                 goto bail;
1157                         }
1158                         /* Lost the race. Look for an empty slot below. */
1159                         atomic_dec(&dd->ipath_pkeyrefs[i]);
1160                         any++;
1161                 }
1162                 /*
1163                  * It makes no sense to have both the limited and unlimited
1164                  * PKEY set at the same time since the unlimited one will
1165                  * disable the limited one.
1166                  */
1167                 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
1168                         ret = -EEXIST;
1169                         goto bail;
1170                 }
1171         }
1172         if (!any) {
1173                 ret = -EBUSY;
1174                 goto bail;
1175         }
1176         for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
1177                 if (!dd->ipath_pkeys[i] &&
1178                     atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
1179                         /* for ipathstats, etc. */
1180                         ipath_stats.sps_pkeys[i] = lkey;
1181                         dd->ipath_pkeys[i] = key;
1182                         ret = 1;
1183                         goto bail;
1184                 }
1185         }
1186         ret = -EBUSY;
1187
1188 bail:
1189         return ret;
1190 }
1191
1192 /**
1193  * ipath_layer_set_pkeys - set the PKEY table for port 0
1194  * @dd: the infinipath device
1195  * @pkeys: the PKEY table
1196  */
1197 int ipath_layer_set_pkeys(struct ipath_devdata *dd, u16 * pkeys)
1198 {
1199         struct ipath_portdata *pd;
1200         int i;
1201         int changed = 0;
1202
1203         pd = dd->ipath_pd[0];
1204
1205         for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
1206                 u16 key = pkeys[i];
1207                 u16 okey = pd->port_pkeys[i];
1208
1209                 if (key == okey)
1210                         continue;
1211                 /*
1212                  * The value of this PKEY table entry is changing.
1213                  * Remove the old entry in the hardware's array of PKEYs.
1214                  */
1215                 if (okey & 0x7FFF)
1216                         changed |= rm_pkey(dd, okey);
1217                 if (key & 0x7FFF) {
1218                         int ret = add_pkey(dd, key);
1219
1220                         if (ret < 0)
1221                                 key = 0;
1222                         else
1223                                 changed |= ret;
1224                 }
1225                 pd->port_pkeys[i] = key;
1226         }
1227         if (changed) {
1228                 u64 pkey;
1229
1230                 pkey = (u64) dd->ipath_pkeys[0] |
1231                         ((u64) dd->ipath_pkeys[1] << 16) |
1232                         ((u64) dd->ipath_pkeys[2] << 32) |
1233                         ((u64) dd->ipath_pkeys[3] << 48);
1234                 ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
1235                            (unsigned long long) pkey);
1236                 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
1237                                  pkey);
1238         }
1239         return 0;
1240 }
1241
1242 /**
1243  * ipath_layer_get_linkdowndefaultstate - get the default linkdown state
1244  * @dd: the infinipath device
1245  *
1246  * Returns zero if the default is POLL, 1 if the default is SLEEP.
1247  */
1248 int ipath_layer_get_linkdowndefaultstate(struct ipath_devdata *dd)
1249 {
1250         return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
1251 }
1252
1253 /**
1254  * ipath_layer_set_linkdowndefaultstate - set the default linkdown state
1255  * @dd: the infinipath device
1256  * @sleep: the new state
1257  *
1258  * Note that this will only take effect when the link state changes.
1259  */
1260 int ipath_layer_set_linkdowndefaultstate(struct ipath_devdata *dd,
1261                                          int sleep)
1262 {
1263         if (sleep)
1264                 dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1265         else
1266                 dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
1267         ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1268                          dd->ipath_ibcctrl);
1269         return 0;
1270 }
1271
1272 int ipath_layer_get_phyerrthreshold(struct ipath_devdata *dd)
1273 {
1274         return (dd->ipath_ibcctrl >>
1275                 INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1276                 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1277 }
1278
1279 /**
1280  * ipath_layer_set_phyerrthreshold - set the physical error threshold
1281  * @dd: the infinipath device
1282  * @n: the new threshold
1283  *
1284  * Note that this will only take effect when the link state changes.
1285  */
1286 int ipath_layer_set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
1287 {
1288         unsigned v;
1289
1290         v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
1291                 INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
1292         if (v != n) {
1293                 dd->ipath_ibcctrl &=
1294                         ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
1295                           INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
1296                 dd->ipath_ibcctrl |=
1297                         (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
1298                 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1299                                  dd->ipath_ibcctrl);
1300         }
1301         return 0;
1302 }
1303
1304 int ipath_layer_get_overrunthreshold(struct ipath_devdata *dd)
1305 {
1306         return (dd->ipath_ibcctrl >>
1307                 INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1308                 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1309 }
1310
1311 /**
1312  * ipath_layer_set_overrunthreshold - set the overrun threshold
1313  * @dd: the infinipath device
1314  * @n: the new threshold
1315  *
1316  * Note that this will only take effect when the link state changes.
1317  */
1318 int ipath_layer_set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
1319 {
1320         unsigned v;
1321
1322         v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
1323                 INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
1324         if (v != n) {
1325                 dd->ipath_ibcctrl &=
1326                         ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
1327                           INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
1328                 dd->ipath_ibcctrl |=
1329                         (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
1330                 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
1331                                  dd->ipath_ibcctrl);
1332         }
1333         return 0;
1334 }
1335
1336 int ipath_layer_get_boardname(struct ipath_devdata *dd, char *name,
1337                               size_t namelen)
1338 {
1339         return dd->ipath_f_get_boardname(dd, name, namelen);
1340 }
1341
1342 u32 ipath_layer_get_rcvhdrentsize(struct ipath_devdata *dd)
1343 {
1344         return dd->ipath_rcvhdrentsize;
1345 }