Merge branch 'ixp4xx' of git://git.kernel.org/pub/scm/linux/kernel/git/chris/linux-2.6
[firefly-linux-kernel-4.4.55.git] / drivers / staging / et131x / et1310_rx.c
1 /*
2  * Agere Systems Inc.
3  * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs
4  *
5  * Copyright © 2005 Agere Systems Inc.
6  * All rights reserved.
7  *   http://www.agere.com
8  *
9  *------------------------------------------------------------------------------
10  *
11  * et1310_rx.c - Routines used to perform data reception
12  *
13  *------------------------------------------------------------------------------
14  *
15  * SOFTWARE LICENSE
16  *
17  * This software is provided subject to the following terms and conditions,
18  * which you should read carefully before using the software.  Using this
19  * software indicates your acceptance of these terms and conditions.  If you do
20  * not agree with these terms and conditions, do not use the software.
21  *
22  * Copyright © 2005 Agere Systems Inc.
23  * All rights reserved.
24  *
25  * Redistribution and use in source or binary forms, with or without
26  * modifications, are permitted provided that the following conditions are met:
27  *
28  * . Redistributions of source code must retain the above copyright notice, this
29  *    list of conditions and the following Disclaimer as comments in the code as
30  *    well as in the documentation and/or other materials provided with the
31  *    distribution.
32  *
33  * . Redistributions in binary form must reproduce the above copyright notice,
34  *    this list of conditions and the following Disclaimer in the documentation
35  *    and/or other materials provided with the distribution.
36  *
37  * . Neither the name of Agere Systems Inc. nor the names of the contributors
38  *    may be used to endorse or promote products derived from this software
39  *    without specific prior written permission.
40  *
41  * Disclaimer
42  *
43  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
44  * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
45  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  ANY
46  * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
47  * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY
48  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
49  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
50  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
51  * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT
52  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
53  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
54  * DAMAGE.
55  *
56  */
57
58 #include "et131x_version.h"
59 #include "et131x_defs.h"
60
61 #include <linux/pci.h>
62 #include <linux/init.h>
63 #include <linux/module.h>
64 #include <linux/types.h>
65 #include <linux/kernel.h>
66
67 #include <linux/sched.h>
68 #include <linux/ptrace.h>
69 #include <linux/slab.h>
70 #include <linux/ctype.h>
71 #include <linux/string.h>
72 #include <linux/timer.h>
73 #include <linux/interrupt.h>
74 #include <linux/in.h>
75 #include <linux/delay.h>
76 #include <linux/io.h>
77 #include <linux/bitops.h>
78 #include <asm/system.h>
79
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/ioport.h>
85
86 #include "et1310_phy.h"
87 #include "et1310_pm.h"
88 #include "et1310_jagcore.h"
89
90 #include "et131x_adapter.h"
91 #include "et131x_initpci.h"
92
93 #include "et1310_rx.h"
94
95
96 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD pMpRfd);
97
98 /**
99  * et131x_rx_dma_memory_alloc
100  * @adapter: pointer to our private adapter structure
101  *
102  * Returns 0 on success and errno on failure (as defined in errno.h)
103  *
104  * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
105  * and the Packet Status Ring.
106  */
107 int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
108 {
109         u32 i, j;
110         u32 bufsize;
111         u32 pktStatRingSize, FBRChunkSize;
112         RX_RING_t *rx_ring;
113
114         /* Setup some convenience pointers */
115         rx_ring = (RX_RING_t *) &adapter->RxRing;
116
117         /* Alloc memory for the lookup table */
118 #ifdef USE_FBR0
119         rx_ring->Fbr[0] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
120 #endif
121
122         rx_ring->Fbr[1] = kmalloc(sizeof(FBRLOOKUPTABLE), GFP_KERNEL);
123
124         /* The first thing we will do is configure the sizes of the buffer
125          * rings. These will change based on jumbo packet support.  Larger
126          * jumbo packets increases the size of each entry in FBR0, and the
127          * number of entries in FBR0, while at the same time decreasing the
128          * number of entries in FBR1.
129          *
130          * FBR1 holds "large" frames, FBR0 holds "small" frames.  If FBR1
131          * entries are huge in order to accomodate a "jumbo" frame, then it
132          * will have less entries.  Conversely, FBR1 will now be relied upon
133          * to carry more "normal" frames, thus it's entry size also increases
134          * and the number of entries goes up too (since it now carries
135          * "small" + "regular" packets.
136          *
137          * In this scheme, we try to maintain 512 entries between the two
138          * rings. Also, FBR1 remains a constant size - when it's size doubles
139          * the number of entries halves.  FBR0 increases in size, however.
140          */
141
142         if (adapter->RegistryJumboPacket < 2048) {
143 #ifdef USE_FBR0
144                 rx_ring->Fbr0BufferSize = 256;
145                 rx_ring->Fbr0NumEntries = 512;
146 #endif
147                 rx_ring->Fbr1BufferSize = 2048;
148                 rx_ring->Fbr1NumEntries = 512;
149         } else if (adapter->RegistryJumboPacket < 4096) {
150 #ifdef USE_FBR0
151                 rx_ring->Fbr0BufferSize = 512;
152                 rx_ring->Fbr0NumEntries = 1024;
153 #endif
154                 rx_ring->Fbr1BufferSize = 4096;
155                 rx_ring->Fbr1NumEntries = 512;
156         } else {
157 #ifdef USE_FBR0
158                 rx_ring->Fbr0BufferSize = 1024;
159                 rx_ring->Fbr0NumEntries = 768;
160 #endif
161                 rx_ring->Fbr1BufferSize = 16384;
162                 rx_ring->Fbr1NumEntries = 128;
163         }
164
165 #ifdef USE_FBR0
166         adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr0NumEntries +
167             adapter->RxRing.Fbr1NumEntries;
168 #else
169         adapter->RxRing.PsrNumEntries = adapter->RxRing.Fbr1NumEntries;
170 #endif
171
172         /* Allocate an area of memory for Free Buffer Ring 1 */
173         bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
174         rx_ring->pFbr1RingVa = pci_alloc_consistent(adapter->pdev,
175                                                     bufsize,
176                                                     &rx_ring->pFbr1RingPa);
177         if (!rx_ring->pFbr1RingVa) {
178                 dev_err(&adapter->pdev->dev,
179                           "Cannot alloc memory for Free Buffer Ring 1\n");
180                 return -ENOMEM;
181         }
182
183         /* Save physical address
184          *
185          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
186          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
187          * are ever returned, make sure the high part is retrieved here
188          * before storing the adjusted address.
189          */
190         rx_ring->Fbr1Realpa = rx_ring->pFbr1RingPa;
191
192         /* Align Free Buffer Ring 1 on a 4K boundary */
193         et131x_align_allocated_memory(adapter,
194                                       &rx_ring->Fbr1Realpa,
195                                       &rx_ring->Fbr1offset, 0x0FFF);
196
197         rx_ring->pFbr1RingVa = (void *)((uint8_t *) rx_ring->pFbr1RingVa +
198                                         rx_ring->Fbr1offset);
199
200 #ifdef USE_FBR0
201         /* Allocate an area of memory for Free Buffer Ring 0 */
202         bufsize = (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
203         rx_ring->pFbr0RingVa = pci_alloc_consistent(adapter->pdev,
204                                                     bufsize,
205                                                     &rx_ring->pFbr0RingPa);
206         if (!rx_ring->pFbr0RingVa) {
207                 dev_err(&adapter->pdev->dev,
208                           "Cannot alloc memory for Free Buffer Ring 0\n");
209                 return -ENOMEM;
210         }
211
212         /* Save physical address
213          *
214          * NOTE: pci_alloc_consistent(), used above to alloc DMA regions,
215          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
216          * are ever returned, make sure the high part is retrieved here before
217          * storing the adjusted address.
218          */
219         rx_ring->Fbr0Realpa = rx_ring->pFbr0RingPa;
220
221         /* Align Free Buffer Ring 0 on a 4K boundary */
222         et131x_align_allocated_memory(adapter,
223                                       &rx_ring->Fbr0Realpa,
224                                       &rx_ring->Fbr0offset, 0x0FFF);
225
226         rx_ring->pFbr0RingVa = (void *)((uint8_t *) rx_ring->pFbr0RingVa +
227                                         rx_ring->Fbr0offset);
228 #endif
229
230         for (i = 0; i < (rx_ring->Fbr1NumEntries / FBR_CHUNKS);
231              i++) {
232                 u64 Fbr1Offset;
233                 u64 Fbr1TempPa;
234                 u32 Fbr1Align;
235
236                 /* This code allocates an area of memory big enough for N
237                  * free buffers + (buffer_size - 1) so that the buffers can
238                  * be aligned on 4k boundaries.  If each buffer were aligned
239                  * to a buffer_size boundary, the effect would be to double
240                  * the size of FBR0.  By allocating N buffers at once, we
241                  * reduce this overhead.
242                  */
243                 if (rx_ring->Fbr1BufferSize > 4096)
244                         Fbr1Align = 4096;
245                 else
246                         Fbr1Align = rx_ring->Fbr1BufferSize;
247
248                 FBRChunkSize =
249                     (FBR_CHUNKS * rx_ring->Fbr1BufferSize) + Fbr1Align - 1;
250                 rx_ring->Fbr1MemVa[i] =
251                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
252                                          &rx_ring->Fbr1MemPa[i]);
253
254                 if (!rx_ring->Fbr1MemVa[i]) {
255                 dev_err(&adapter->pdev->dev,
256                                 "Could not alloc memory\n");
257                         return -ENOMEM;
258                 }
259
260                 /* See NOTE in "Save Physical Address" comment above */
261                 Fbr1TempPa = rx_ring->Fbr1MemPa[i];
262
263                 et131x_align_allocated_memory(adapter,
264                                               &Fbr1TempPa,
265                                               &Fbr1Offset, (Fbr1Align - 1));
266
267                 for (j = 0; j < FBR_CHUNKS; j++) {
268                         u32 index = (i * FBR_CHUNKS) + j;
269
270                         /* Save the Virtual address of this index for quick
271                          * access later
272                          */
273                         rx_ring->Fbr[1]->Va[index] =
274                             (uint8_t *) rx_ring->Fbr1MemVa[i] +
275                             (j * rx_ring->Fbr1BufferSize) + Fbr1Offset;
276
277                         /* now store the physical address in the descriptor
278                          * so the device can access it
279                          */
280                         rx_ring->Fbr[1]->PAHigh[index] =
281                             (u32) (Fbr1TempPa >> 32);
282                         rx_ring->Fbr[1]->PALow[index] = (u32) Fbr1TempPa;
283
284                         Fbr1TempPa += rx_ring->Fbr1BufferSize;
285
286                         rx_ring->Fbr[1]->Buffer1[index] =
287                             rx_ring->Fbr[1]->Va[index];
288                         rx_ring->Fbr[1]->Buffer2[index] =
289                             rx_ring->Fbr[1]->Va[index] - 4;
290                 }
291         }
292
293 #ifdef USE_FBR0
294         /* Same for FBR0 (if in use) */
295         for (i = 0; i < (rx_ring->Fbr0NumEntries / FBR_CHUNKS);
296              i++) {
297                 u64 Fbr0Offset;
298                 u64 Fbr0TempPa;
299
300                 FBRChunkSize = ((FBR_CHUNKS + 1) * rx_ring->Fbr0BufferSize) - 1;
301                 rx_ring->Fbr0MemVa[i] =
302                     pci_alloc_consistent(adapter->pdev, FBRChunkSize,
303                                          &rx_ring->Fbr0MemPa[i]);
304
305                 if (!rx_ring->Fbr0MemVa[i]) {
306                         dev_err(&adapter->pdev->dev,
307                                 "Could not alloc memory\n");
308                         return -ENOMEM;
309                 }
310
311                 /* See NOTE in "Save Physical Address" comment above */
312                 Fbr0TempPa = rx_ring->Fbr0MemPa[i];
313
314                 et131x_align_allocated_memory(adapter,
315                                               &Fbr0TempPa,
316                                               &Fbr0Offset,
317                                               rx_ring->Fbr0BufferSize - 1);
318
319                 for (j = 0; j < FBR_CHUNKS; j++) {
320                         u32 index = (i * FBR_CHUNKS) + j;
321
322                         rx_ring->Fbr[0]->Va[index] =
323                             (uint8_t *) rx_ring->Fbr0MemVa[i] +
324                             (j * rx_ring->Fbr0BufferSize) + Fbr0Offset;
325
326                         rx_ring->Fbr[0]->PAHigh[index] =
327                             (u32) (Fbr0TempPa >> 32);
328                         rx_ring->Fbr[0]->PALow[index] = (u32) Fbr0TempPa;
329
330                         Fbr0TempPa += rx_ring->Fbr0BufferSize;
331
332                         rx_ring->Fbr[0]->Buffer1[index] =
333                             rx_ring->Fbr[0]->Va[index];
334                         rx_ring->Fbr[0]->Buffer2[index] =
335                             rx_ring->Fbr[0]->Va[index] - 4;
336                 }
337         }
338 #endif
339
340         /* Allocate an area of memory for FIFO of Packet Status ring entries */
341         pktStatRingSize =
342             sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
343
344         rx_ring->pPSRingVa = pci_alloc_consistent(adapter->pdev,
345                                                   pktStatRingSize,
346                                                   &rx_ring->pPSRingPa);
347
348         if (!rx_ring->pPSRingVa) {
349                 dev_err(&adapter->pdev->dev,
350                           "Cannot alloc memory for Packet Status Ring\n");
351                 return -ENOMEM;
352         }
353         printk("PSR %lx\n", (unsigned long) rx_ring->pPSRingPa);
354
355         /*
356          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
357          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
358          * are ever returned, make sure the high part is retrieved here before
359          * storing the adjusted address.
360          */
361
362         /* Allocate an area of memory for writeback of status information */
363         rx_ring->pRxStatusVa = pci_alloc_consistent(adapter->pdev,
364                                                     sizeof(RX_STATUS_BLOCK_t),
365                                                     &rx_ring->pRxStatusPa);
366         if (!rx_ring->pRxStatusVa) {
367                 dev_err(&adapter->pdev->dev,
368                           "Cannot alloc memory for Status Block\n");
369                 return -ENOMEM;
370         }
371         rx_ring->NumRfd = NIC_DEFAULT_NUM_RFD;
372         printk("PRS %lx\n", (unsigned long)rx_ring->pRxStatusPa);
373
374         /* Recv
375          * pci_pool_create initializes a lookaside list. After successful
376          * creation, nonpaged fixed-size blocks can be allocated from and
377          * freed to the lookaside list.
378          * RFDs will be allocated from this pool.
379          */
380         rx_ring->RecvLookaside = kmem_cache_create(adapter->netdev->name,
381                                                    sizeof(MP_RFD),
382                                                    0,
383                                                    SLAB_CACHE_DMA |
384                                                    SLAB_HWCACHE_ALIGN,
385                                                    NULL);
386
387         adapter->Flags |= fMP_ADAPTER_RECV_LOOKASIDE;
388
389         /* The RFDs are going to be put on lists later on, so initialize the
390          * lists now.
391          */
392         INIT_LIST_HEAD(&rx_ring->RecvList);
393         return 0;
394 }
395
396 /**
397  * et131x_rx_dma_memory_free - Free all memory allocated within this module.
398  * @adapter: pointer to our private adapter structure
399  */
400 void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
401 {
402         u32 index;
403         u32 bufsize;
404         u32 pktStatRingSize;
405         PMP_RFD rfd;
406         RX_RING_t *rx_ring;
407
408         /* Setup some convenience pointers */
409         rx_ring = (RX_RING_t *) &adapter->RxRing;
410
411         /* Free RFDs and associated packet descriptors */
412         WARN_ON(rx_ring->nReadyRecv != rx_ring->NumRfd);
413
414         while (!list_empty(&rx_ring->RecvList)) {
415                 rfd = (MP_RFD *) list_entry(rx_ring->RecvList.next,
416                                                MP_RFD, list_node);
417
418                 list_del(&rfd->list_node);
419                 rfd->Packet = NULL;
420                 kmem_cache_free(adapter->RxRing.RecvLookaside, rfd);
421         }
422
423         /* Free Free Buffer Ring 1 */
424         if (rx_ring->pFbr1RingVa) {
425                 /* First the packet memory */
426                 for (index = 0; index <
427                      (rx_ring->Fbr1NumEntries / FBR_CHUNKS); index++) {
428                         if (rx_ring->Fbr1MemVa[index]) {
429                                 u32 Fbr1Align;
430
431                                 if (rx_ring->Fbr1BufferSize > 4096)
432                                         Fbr1Align = 4096;
433                                 else
434                                         Fbr1Align = rx_ring->Fbr1BufferSize;
435
436                                 bufsize =
437                                     (rx_ring->Fbr1BufferSize * FBR_CHUNKS) +
438                                     Fbr1Align - 1;
439
440                                 pci_free_consistent(adapter->pdev,
441                                                     bufsize,
442                                                     rx_ring->Fbr1MemVa[index],
443                                                     rx_ring->Fbr1MemPa[index]);
444
445                                 rx_ring->Fbr1MemVa[index] = NULL;
446                         }
447                 }
448
449                 /* Now the FIFO itself */
450                 rx_ring->pFbr1RingVa = (void *)((uint8_t *)
451                                 rx_ring->pFbr1RingVa - rx_ring->Fbr1offset);
452
453                 bufsize =
454                     (sizeof(FBR_DESC_t) * rx_ring->Fbr1NumEntries) + 0xfff;
455
456                 pci_free_consistent(adapter->pdev,
457                                     bufsize,
458                                     rx_ring->pFbr1RingVa, rx_ring->pFbr1RingPa);
459
460                 rx_ring->pFbr1RingVa = NULL;
461         }
462
463 #ifdef USE_FBR0
464         /* Now the same for Free Buffer Ring 0 */
465         if (rx_ring->pFbr0RingVa) {
466                 /* First the packet memory */
467                 for (index = 0; index <
468                      (rx_ring->Fbr0NumEntries / FBR_CHUNKS); index++) {
469                         if (rx_ring->Fbr0MemVa[index]) {
470                                 bufsize =
471                                     (rx_ring->Fbr0BufferSize *
472                                      (FBR_CHUNKS + 1)) - 1;
473
474                                 pci_free_consistent(adapter->pdev,
475                                                     bufsize,
476                                                     rx_ring->Fbr0MemVa[index],
477                                                     rx_ring->Fbr0MemPa[index]);
478
479                                 rx_ring->Fbr0MemVa[index] = NULL;
480                         }
481                 }
482
483                 /* Now the FIFO itself */
484                 rx_ring->pFbr0RingVa = (void *)((uint8_t *)
485                                 rx_ring->pFbr0RingVa - rx_ring->Fbr0offset);
486
487                 bufsize =
488                     (sizeof(FBR_DESC_t) * rx_ring->Fbr0NumEntries) + 0xfff;
489
490                 pci_free_consistent(adapter->pdev,
491                                     bufsize,
492                                     rx_ring->pFbr0RingVa, rx_ring->pFbr0RingPa);
493
494                 rx_ring->pFbr0RingVa = NULL;
495         }
496 #endif
497
498         /* Free Packet Status Ring */
499         if (rx_ring->pPSRingVa) {
500                 pktStatRingSize =
501                     sizeof(PKT_STAT_DESC_t) * adapter->RxRing.PsrNumEntries;
502
503                 pci_free_consistent(adapter->pdev, pktStatRingSize,
504                                     rx_ring->pPSRingVa, rx_ring->pPSRingPa);
505
506                 rx_ring->pPSRingVa = NULL;
507         }
508
509         /* Free area of memory for the writeback of status information */
510         if (rx_ring->pRxStatusVa) {
511                 pci_free_consistent(adapter->pdev,
512                                 sizeof(RX_STATUS_BLOCK_t),
513                                 rx_ring->pRxStatusVa, rx_ring->pRxStatusPa);
514
515                 rx_ring->pRxStatusVa = NULL;
516         }
517
518         /* Free receive buffer pool */
519
520         /* Free receive packet pool */
521
522         /* Destroy the lookaside (RFD) pool */
523         if (adapter->Flags & fMP_ADAPTER_RECV_LOOKASIDE) {
524                 kmem_cache_destroy(rx_ring->RecvLookaside);
525                 adapter->Flags &= ~fMP_ADAPTER_RECV_LOOKASIDE;
526         }
527
528         /* Free the FBR Lookup Table */
529 #ifdef USE_FBR0
530         kfree(rx_ring->Fbr[0]);
531 #endif
532
533         kfree(rx_ring->Fbr[1]);
534
535         /* Reset Counters */
536         rx_ring->nReadyRecv = 0;
537 }
538
539 /**
540  * et131x_init_recv - Initialize receive data structures.
541  * @adapter: pointer to our private adapter structure
542  *
543  * Returns 0 on success and errno on failure (as defined in errno.h)
544  */
545 int et131x_init_recv(struct et131x_adapter *adapter)
546 {
547         int status = -ENOMEM;
548         PMP_RFD rfd = NULL;
549         u32 rfdct;
550         u32 numrfd = 0;
551         RX_RING_t *rx_ring = NULL;
552
553         /* Setup some convenience pointers */
554         rx_ring = (RX_RING_t *) &adapter->RxRing;
555
556         /* Setup each RFD */
557         for (rfdct = 0; rfdct < rx_ring->NumRfd; rfdct++) {
558                 rfd = (MP_RFD *) kmem_cache_alloc(rx_ring->RecvLookaside,
559                                                      GFP_ATOMIC | GFP_DMA);
560
561                 if (!rfd) {
562                         dev_err(&adapter->pdev->dev,
563                                   "Couldn't alloc RFD out of kmem_cache\n");
564                         status = -ENOMEM;
565                         continue;
566                 }
567
568                 rfd->Packet = NULL;
569
570                 /* Add this RFD to the RecvList */
571                 list_add_tail(&rfd->list_node, &rx_ring->RecvList);
572
573                 /* Increment both the available RFD's, and the total RFD's. */
574                 rx_ring->nReadyRecv++;
575                 numrfd++;
576         }
577
578         if (numrfd > NIC_MIN_NUM_RFD)
579                 status = 0;
580
581         rx_ring->NumRfd = numrfd;
582
583         if (status != 0) {
584                 kmem_cache_free(rx_ring->RecvLookaside, rfd);
585                 dev_err(&adapter->pdev->dev,
586                           "Allocation problems in et131x_init_recv\n");
587         }
588         return status;
589 }
590
591 /**
592  * ConfigRxDmaRegs - Start of Rx_DMA init sequence
593  * @etdev: pointer to our adapter structure
594  */
595 void ConfigRxDmaRegs(struct et131x_adapter *etdev)
596 {
597         struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
598         struct _rx_ring_t *rx_local = &etdev->RxRing;
599         PFBR_DESC_t fbr_entry;
600         u32 entry;
601         u32 psr_num_des;
602         unsigned long flags;
603
604         /* Halt RXDMA to perform the reconfigure.  */
605         et131x_rx_dma_disable(etdev);
606
607         /* Load the completion writeback physical address
608          *
609          * NOTE : pci_alloc_consistent(), used above to alloc DMA regions,
610          * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses
611          * are ever returned, make sure the high part is retrieved here
612          * before storing the adjusted address.
613          */
614         writel((u32) ((u64)rx_local->pRxStatusPa >> 32),
615                &rx_dma->dma_wb_base_hi);
616         writel((u32) rx_local->pRxStatusPa, &rx_dma->dma_wb_base_lo);
617
618         memset(rx_local->pRxStatusVa, 0, sizeof(RX_STATUS_BLOCK_t));
619
620         /* Set the address and parameters of the packet status ring into the
621          * 1310's registers
622          */
623         writel((u32) ((u64)rx_local->pPSRingPa >> 32),
624                &rx_dma->psr_base_hi);
625         writel((u32) rx_local->pPSRingPa, &rx_dma->psr_base_lo);
626         writel(rx_local->PsrNumEntries - 1, &rx_dma->psr_num_des);
627         writel(0, &rx_dma->psr_full_offset);
628
629         psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF;
630         writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
631                &rx_dma->psr_min_des);
632
633         spin_lock_irqsave(&etdev->RcvLock, flags);
634
635         /* These local variables track the PSR in the adapter structure */
636         rx_local->local_psr_full = 0;
637
638         /* Now's the best time to initialize FBR1 contents */
639         fbr_entry = (PFBR_DESC_t) rx_local->pFbr1RingVa;
640         for (entry = 0; entry < rx_local->Fbr1NumEntries; entry++) {
641                 fbr_entry->addr_hi = rx_local->Fbr[1]->PAHigh[entry];
642                 fbr_entry->addr_lo = rx_local->Fbr[1]->PALow[entry];
643                 fbr_entry->word2.bits.bi = entry;
644                 fbr_entry++;
645         }
646
647         /* Set the address and parameters of Free buffer ring 1 (and 0 if
648          * required) into the 1310's registers
649          */
650         writel((u32) (rx_local->Fbr1Realpa >> 32), &rx_dma->fbr1_base_hi);
651         writel((u32) rx_local->Fbr1Realpa, &rx_dma->fbr1_base_lo);
652         writel(rx_local->Fbr1NumEntries - 1, &rx_dma->fbr1_num_des);
653         writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset);
654
655         /* This variable tracks the free buffer ring 1 full position, so it
656          * has to match the above.
657          */
658         rx_local->local_Fbr1_full = ET_DMA10_WRAP;
659         writel(((rx_local->Fbr1NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
660                &rx_dma->fbr1_min_des);
661
662 #ifdef USE_FBR0
663         /* Now's the best time to initialize FBR0 contents */
664         fbr_entry = (PFBR_DESC_t) rx_local->pFbr0RingVa;
665         for (entry = 0; entry < rx_local->Fbr0NumEntries; entry++) {
666                 fbr_entry->addr_hi = rx_local->Fbr[0]->PAHigh[entry];
667                 fbr_entry->addr_lo = rx_local->Fbr[0]->PALow[entry];
668                 fbr_entry->word2.bits.bi = entry;
669                 fbr_entry++;
670         }
671
672         writel((u32) (rx_local->Fbr0Realpa >> 32), &rx_dma->fbr0_base_hi);
673         writel((u32) rx_local->Fbr0Realpa, &rx_dma->fbr0_base_lo);
674         writel(rx_local->Fbr0NumEntries - 1, &rx_dma->fbr0_num_des);
675         writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset);
676
677         /* This variable tracks the free buffer ring 0 full position, so it
678          * has to match the above.
679          */
680         rx_local->local_Fbr0_full = ET_DMA10_WRAP;
681         writel(((rx_local->Fbr0NumEntries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
682                &rx_dma->fbr0_min_des);
683 #endif
684
685         /* Program the number of packets we will receive before generating an
686          * interrupt.
687          * For version B silicon, this value gets updated once autoneg is
688          *complete.
689          */
690         writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
691
692         /* The "time_done" is not working correctly to coalesce interrupts
693          * after a given time period, but rather is giving us an interrupt
694          * regardless of whether we have received packets.
695          * This value gets updated once autoneg is complete.
696          */
697         writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
698
699         spin_unlock_irqrestore(&etdev->RcvLock, flags);
700 }
701
702 /**
703  * SetRxDmaTimer - Set the heartbeat timer according to line rate.
704  * @etdev: pointer to our adapter structure
705  */
706 void SetRxDmaTimer(struct et131x_adapter *etdev)
707 {
708         /* For version B silicon, we do not use the RxDMA timer for 10 and 100
709          * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
710          */
711         if ((etdev->linkspeed == TRUEPHY_SPEED_100MBPS) ||
712             (etdev->linkspeed == TRUEPHY_SPEED_10MBPS)) {
713                 writel(0, &etdev->regs->rxdma.max_pkt_time);
714                 writel(1, &etdev->regs->rxdma.num_pkt_done);
715         }
716 }
717
718 /**
719  * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
720  * @etdev: pointer to our adapter structure
721  */
722 void et131x_rx_dma_disable(struct et131x_adapter *etdev)
723 {
724         RXDMA_CSR_t csr;
725
726         /* Setup the receive dma configuration register */
727         writel(0x00002001, &etdev->regs->rxdma.csr.value);
728         csr.value = readl(&etdev->regs->rxdma.csr.value);
729         if (csr.bits.halt_status != 1) {
730                 udelay(5);
731                 csr.value = readl(&etdev->regs->rxdma.csr.value);
732                 if (csr.bits.halt_status != 1)
733                         dev_err(&etdev->pdev->dev,
734                                 "RX Dma failed to enter halt state. CSR 0x%08x\n",
735                                 csr.value);
736         }
737 }
738
739 /**
740  * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
741  * @etdev: pointer to our adapter structure
742  */
743 void et131x_rx_dma_enable(struct et131x_adapter *etdev)
744 {
745         /* Setup the receive dma configuration register for normal operation */
746         RXDMA_CSR_t csr = { 0 };
747
748         csr.bits.fbr1_enable = 1;
749         if (etdev->RxRing.Fbr1BufferSize == 4096)
750                 csr.bits.fbr1_size = 1;
751         else if (etdev->RxRing.Fbr1BufferSize == 8192)
752                 csr.bits.fbr1_size = 2;
753         else if (etdev->RxRing.Fbr1BufferSize == 16384)
754                 csr.bits.fbr1_size = 3;
755 #ifdef USE_FBR0
756         csr.bits.fbr0_enable = 1;
757         if (etdev->RxRing.Fbr0BufferSize == 256)
758                 csr.bits.fbr0_size = 1;
759         else if (etdev->RxRing.Fbr0BufferSize == 512)
760                 csr.bits.fbr0_size = 2;
761         else if (etdev->RxRing.Fbr0BufferSize == 1024)
762                 csr.bits.fbr0_size = 3;
763 #endif
764         writel(csr.value, &etdev->regs->rxdma.csr.value);
765
766         csr.value = readl(&etdev->regs->rxdma.csr.value);
767         if (csr.bits.halt_status != 0) {
768                 udelay(5);
769                 csr.value = readl(&etdev->regs->rxdma.csr.value);
770                 if (csr.bits.halt_status != 0) {
771                         dev_err(&etdev->pdev->dev,
772                             "RX Dma failed to exit halt state.  CSR 0x%08x\n",
773                                 csr.value);
774                 }
775         }
776 }
777
778 /**
779  * nic_rx_pkts - Checks the hardware for available packets
780  * @etdev: pointer to our adapter
781  *
782  * Returns rfd, a pointer to our MPRFD.
783  *
784  * Checks the hardware for available packets, using completion ring
785  * If packets are available, it gets an RFD from the RecvList, attaches
786  * the packet to it, puts the RFD in the RecvPendList, and also returns
787  * the pointer to the RFD.
788  */
789 PMP_RFD nic_rx_pkts(struct et131x_adapter *etdev)
790 {
791         struct _rx_ring_t *rx_local = &etdev->RxRing;
792         PRX_STATUS_BLOCK_t status;
793         PPKT_STAT_DESC_t psr;
794         PMP_RFD rfd;
795         u32 i;
796         uint8_t *buf;
797         unsigned long flags;
798         struct list_head *element;
799         uint8_t rindex;
800         uint16_t bindex;
801         u32 len;
802         PKT_STAT_DESC_WORD0_t Word0;
803
804         /* RX Status block is written by the DMA engine prior to every
805          * interrupt. It contains the next to be used entry in the Packet
806          * Status Ring, and also the two Free Buffer rings.
807          */
808         status = (PRX_STATUS_BLOCK_t) rx_local->pRxStatusVa;
809
810         /* FIXME: tidy later when conversions complete */
811         if (status->Word1.bits.PSRoffset ==
812                         (rx_local->local_psr_full & 0xFFF) &&
813                         status->Word1.bits.PSRwrap ==
814                         ((rx_local->local_psr_full >> 12) & 1)) {
815                 /* Looks like this ring is not updated yet */
816                 return NULL;
817         }
818
819         /* The packet status ring indicates that data is available. */
820         psr = (PPKT_STAT_DESC_t) (rx_local->pPSRingVa) +
821                         (rx_local->local_psr_full & 0xFFF);
822
823         /* Grab any information that is required once the PSR is
824          * advanced, since we can no longer rely on the memory being
825          * accurate
826          */
827         len = psr->word1.bits.length;
828         rindex = (uint8_t) psr->word1.bits.ri;
829         bindex = (uint16_t) psr->word1.bits.bi;
830         Word0 = psr->word0;
831
832         /* Indicate that we have used this PSR entry. */
833         /* FIXME wrap 12 */
834         rx_local->local_psr_full = (rx_local->local_psr_full + 1) & 0xFFF;
835         if (rx_local->local_psr_full  > rx_local->PsrNumEntries - 1) {
836                 /* Clear psr full and toggle the wrap bit */
837                 rx_local->local_psr_full &=  0xFFF;
838                 rx_local->local_psr_full ^= 0x1000;
839         }
840
841         writel(rx_local->local_psr_full,
842                &etdev->regs->rxdma.psr_full_offset);
843
844 #ifndef USE_FBR0
845         if (rindex != 1) {
846                 return NULL;
847         }
848 #endif
849
850 #ifdef USE_FBR0
851         if (rindex > 1 ||
852                 (rindex == 0 &&
853                 bindex > rx_local->Fbr0NumEntries - 1) ||
854                 (rindex == 1 &&
855                 bindex > rx_local->Fbr1NumEntries - 1))
856 #else
857         if (rindex != 1 ||
858                 bindex > rx_local->Fbr1NumEntries - 1)
859 #endif
860         {
861                 /* Illegal buffer or ring index cannot be used by S/W*/
862                 dev_err(&etdev->pdev->dev,
863                           "NICRxPkts PSR Entry %d indicates "
864                           "length of %d and/or bad bi(%d)\n",
865                           rx_local->local_psr_full & 0xFFF,
866                           len, bindex);
867                 return NULL;
868         }
869
870         /* Get and fill the RFD. */
871         spin_lock_irqsave(&etdev->RcvLock, flags);
872
873         rfd = NULL;
874         element = rx_local->RecvList.next;
875         rfd = (PMP_RFD) list_entry(element, MP_RFD, list_node);
876
877         if (rfd == NULL) {
878                 spin_unlock_irqrestore(&etdev->RcvLock, flags);
879                 return NULL;
880         }
881
882         list_del(&rfd->list_node);
883         rx_local->nReadyRecv--;
884
885         spin_unlock_irqrestore(&etdev->RcvLock, flags);
886
887         rfd->bufferindex = bindex;
888         rfd->ringindex = rindex;
889
890         /* In V1 silicon, there is a bug which screws up filtering of
891          * runt packets.  Therefore runt packet filtering is disabled
892          * in the MAC and the packets are dropped here.  They are
893          * also counted here.
894          */
895         if (len < (NIC_MIN_PACKET_SIZE + 4)) {
896                 etdev->Stats.other_errors++;
897                 len = 0;
898         }
899
900         if (len) {
901                 if (etdev->ReplicaPhyLoopbk == 1) {
902                         buf = rx_local->Fbr[rindex]->Va[bindex];
903
904                         if (memcmp(&buf[6], &etdev->CurrentAddress[0],
905                                    ETH_ALEN) == 0) {
906                                 if (memcmp(&buf[42], "Replica packet",
907                                            ETH_HLEN)) {
908                                         etdev->ReplicaPhyLoopbkPF = 1;
909                                 }
910                         }
911                 }
912
913                 /* Determine if this is a multicast packet coming in */
914                 if ((Word0.value & ALCATEL_MULTICAST_PKT) &&
915                     !(Word0.value & ALCATEL_BROADCAST_PKT)) {
916                         /* Promiscuous mode and Multicast mode are
917                          * not mutually exclusive as was first
918                          * thought.  I guess Promiscuous is just
919                          * considered a super-set of the other
920                          * filters. Generally filter is 0x2b when in
921                          * promiscuous mode.
922                          */
923                         if ((etdev->PacketFilter & ET131X_PACKET_TYPE_MULTICAST)
924                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_PROMISCUOUS)
925                             && !(etdev->PacketFilter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
926                                 buf = rx_local->Fbr[rindex]->
927                                                 Va[bindex];
928
929                                 /* Loop through our list to see if the
930                                  * destination address of this packet
931                                  * matches one in our list.
932                                  */
933                                 for (i = 0;
934                                      i < etdev->MCAddressCount;
935                                      i++) {
936                                         if (buf[0] ==
937                                             etdev->MCList[i][0]
938                                             && buf[1] ==
939                                             etdev->MCList[i][1]
940                                             && buf[2] ==
941                                             etdev->MCList[i][2]
942                                             && buf[3] ==
943                                             etdev->MCList[i][3]
944                                             && buf[4] ==
945                                             etdev->MCList[i][4]
946                                             && buf[5] ==
947                                             etdev->MCList[i][5]) {
948                                                 break;
949                                         }
950                                 }
951
952                                 /* If our index is equal to the number
953                                  * of Multicast address we have, then
954                                  * this means we did not find this
955                                  * packet's matching address in our
956                                  * list.  Set the PacketSize to zero,
957                                  * so we free our RFD when we return
958                                  * from this function.
959                                  */
960                                 if (i == etdev->MCAddressCount)
961                                         len = 0;
962                         }
963
964                         if (len > 0)
965                                 etdev->Stats.multircv++;
966                 } else if (Word0.value & ALCATEL_BROADCAST_PKT)
967                         etdev->Stats.brdcstrcv++;
968                 else
969                         /* Not sure what this counter measures in
970                          * promiscuous mode. Perhaps we should check
971                          * the MAC address to see if it is directed
972                          * to us in promiscuous mode.
973                          */
974                         etdev->Stats.unircv++;
975         }
976
977         if (len > 0) {
978                 struct sk_buff *skb = NULL;
979
980                 /* rfd->PacketSize = len - 4; */
981                 rfd->PacketSize = len;
982
983                 skb = dev_alloc_skb(rfd->PacketSize + 2);
984                 if (!skb) {
985                         dev_err(&etdev->pdev->dev,
986                                   "Couldn't alloc an SKB for Rx\n");
987                         return NULL;
988                 }
989
990                 etdev->net_stats.rx_bytes += rfd->PacketSize;
991
992                 memcpy(skb_put(skb, rfd->PacketSize),
993                        rx_local->Fbr[rindex]->Va[bindex],
994                        rfd->PacketSize);
995
996                 skb->dev = etdev->netdev;
997                 skb->protocol = eth_type_trans(skb, etdev->netdev);
998                 skb->ip_summed = CHECKSUM_NONE;
999
1000                 netif_rx(skb);
1001         } else {
1002                 rfd->PacketSize = 0;
1003         }
1004
1005         nic_return_rfd(etdev, rfd);
1006         return rfd;
1007 }
1008
1009 /**
1010  * et131x_reset_recv - Reset the receive list
1011  * @etdev: pointer to our adapter
1012  *
1013  * Assumption, Rcv spinlock has been acquired.
1014  */
1015 void et131x_reset_recv(struct et131x_adapter *etdev)
1016 {
1017         WARN_ON(list_empty(&etdev->RxRing.RecvList));
1018
1019 }
1020
1021 /**
1022  * et131x_handle_recv_interrupt - Interrupt handler for receive processing
1023  * @etdev: pointer to our adapter
1024  *
1025  * Assumption, Rcv spinlock has been acquired.
1026  */
1027 void et131x_handle_recv_interrupt(struct et131x_adapter *etdev)
1028 {
1029         PMP_RFD rfd = NULL;
1030         u32 count = 0;
1031         bool done = true;
1032
1033         /* Process up to available RFD's */
1034         while (count < NUM_PACKETS_HANDLED) {
1035                 if (list_empty(&etdev->RxRing.RecvList)) {
1036                         WARN_ON(etdev->RxRing.nReadyRecv != 0);
1037                         done = false;
1038                         break;
1039                 }
1040
1041                 rfd = nic_rx_pkts(etdev);
1042
1043                 if (rfd == NULL)
1044                         break;
1045
1046                 /* Do not receive any packets until a filter has been set.
1047                  * Do not receive any packets until we have link.
1048                  * If length is zero, return the RFD in order to advance the
1049                  * Free buffer ring.
1050                  */
1051                 if (!etdev->PacketFilter ||
1052                     !(etdev->Flags & fMP_ADAPTER_LINK_DETECTION) ||
1053                     rfd->PacketSize == 0) {
1054                         continue;
1055                 }
1056
1057                 /* Increment the number of packets we received */
1058                 etdev->Stats.ipackets++;
1059
1060                 /* Set the status on the packet, either resources or success */
1061                 if (etdev->RxRing.nReadyRecv < RFD_LOW_WATER_MARK) {
1062                         dev_warn(&etdev->pdev->dev,
1063                                     "RFD's are running out\n");
1064                 }
1065                 count++;
1066         }
1067
1068         if (count == NUM_PACKETS_HANDLED || !done) {
1069                 etdev->RxRing.UnfinishedReceives = true;
1070                 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
1071                        &etdev->regs->global.watchdog_timer);
1072         } else
1073                 /* Watchdog timer will disable itself if appropriate. */
1074                 etdev->RxRing.UnfinishedReceives = false;
1075 }
1076
1077 static inline u32 bump_fbr(u32 *fbr, u32 limit)
1078 {
1079         u32 v = *fbr;
1080         v++;
1081         /* This works for all cases where limit < 1024. The 1023 case
1082            works because 1023++ is 1024 which means the if condition is not
1083            taken but the carry of the bit into the wrap bit toggles the wrap
1084            value correctly */
1085         if ((v & ET_DMA10_MASK) > limit) {
1086                 v &= ~ET_DMA10_MASK;
1087                 v ^= ET_DMA10_WRAP;
1088         }
1089         /* For the 1023 case */
1090         v &= (ET_DMA10_MASK|ET_DMA10_WRAP);
1091         *fbr = v;
1092         return v;
1093 }
1094
1095 /**
1096  * NICReturnRFD - Recycle a RFD and put it back onto the receive list
1097  * @etdev: pointer to our adapter
1098  * @rfd: pointer to the RFD
1099  */
1100 void nic_return_rfd(struct et131x_adapter *etdev, PMP_RFD rfd)
1101 {
1102         struct _rx_ring_t *rx_local = &etdev->RxRing;
1103         struct _RXDMA_t __iomem *rx_dma = &etdev->regs->rxdma;
1104         uint16_t bi = rfd->bufferindex;
1105         uint8_t ri = rfd->ringindex;
1106         unsigned long flags;
1107
1108         /* We don't use any of the OOB data besides status. Otherwise, we
1109          * need to clean up OOB data
1110          */
1111         if (
1112 #ifdef USE_FBR0
1113             (ri == 0 && bi < rx_local->Fbr0NumEntries) ||
1114 #endif
1115             (ri == 1 && bi < rx_local->Fbr1NumEntries)) {
1116                 spin_lock_irqsave(&etdev->FbrLock, flags);
1117
1118                 if (ri == 1) {
1119                         PFBR_DESC_t next =
1120                             (PFBR_DESC_t) (rx_local->pFbr1RingVa) +
1121                             INDEX10(rx_local->local_Fbr1_full);
1122
1123                         /* Handle the Free Buffer Ring advancement here. Write
1124                          * the PA / Buffer Index for the returned buffer into
1125                          * the oldest (next to be freed)FBR entry
1126                          */
1127                         next->addr_hi = rx_local->Fbr[1]->PAHigh[bi];
1128                         next->addr_lo = rx_local->Fbr[1]->PALow[bi];
1129                         next->word2.value = bi;
1130
1131                         writel(bump_fbr(&rx_local->local_Fbr1_full,
1132                                 rx_local->Fbr1NumEntries - 1),
1133                                 &rx_dma->fbr1_full_offset);
1134                 }
1135 #ifdef USE_FBR0
1136                 else {
1137                         PFBR_DESC_t next =
1138                             (PFBR_DESC_t) rx_local->pFbr0RingVa +
1139                             INDEX10(rx_local->local_Fbr0_full);
1140
1141                         /* Handle the Free Buffer Ring advancement here. Write
1142                          * the PA / Buffer Index for the returned buffer into
1143                          * the oldest (next to be freed) FBR entry
1144                          */
1145                         next->addr_hi = rx_local->Fbr[0]->PAHigh[bi];
1146                         next->addr_lo = rx_local->Fbr[0]->PALow[bi];
1147                         next->word2.value = bi;
1148
1149                         writel(bump_fbr(&rx_local->local_Fbr0_full,
1150                                         rx_local->Fbr0NumEntries - 1),
1151                                &rx_dma->fbr0_full_offset);
1152                 }
1153 #endif
1154                 spin_unlock_irqrestore(&etdev->FbrLock, flags);
1155         } else {
1156                 dev_err(&etdev->pdev->dev,
1157                           "NICReturnRFD illegal Buffer Index returned\n");
1158         }
1159
1160         /* The processing on this RFD is done, so put it back on the tail of
1161          * our list
1162          */
1163         spin_lock_irqsave(&etdev->RcvLock, flags);
1164         list_add_tail(&rfd->list_node, &rx_local->RecvList);
1165         rx_local->nReadyRecv++;
1166         spin_unlock_irqrestore(&etdev->RcvLock, flags);
1167
1168         WARN_ON(rx_local->nReadyRecv > rx_local->NumRfd);
1169 }