Linux 3.9-rc8
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / broadcom / bgmac.c
1 /*
2  * Driver for (BCM4706)? GBit MAC core on BCMA bus.
3  *
4  * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5  *
6  * Licensed under the GNU/GPL. See COPYING for details.
7  */
8
9 #include "bgmac.h"
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/delay.h>
14 #include <linux/etherdevice.h>
15 #include <linux/mii.h>
16 #include <linux/interrupt.h>
17 #include <linux/dma-mapping.h>
18 #include <bcm47xx_nvram.h>
19
20 static const struct bcma_device_id bgmac_bcma_tbl[] = {
21         BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
22         BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
23         BCMA_CORETABLE_END
24 };
25 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
26
27 static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
28                              u32 value, int timeout)
29 {
30         u32 val;
31         int i;
32
33         for (i = 0; i < timeout / 10; i++) {
34                 val = bcma_read32(core, reg);
35                 if ((val & mask) == value)
36                         return true;
37                 udelay(10);
38         }
39         pr_err("Timeout waiting for reg 0x%X\n", reg);
40         return false;
41 }
42
43 /**************************************************
44  * DMA
45  **************************************************/
46
47 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
48 {
49         u32 val;
50         int i;
51
52         if (!ring->mmio_base)
53                 return;
54
55         /* Suspend DMA TX ring first.
56          * bgmac_wait_value doesn't support waiting for any of few values, so
57          * implement whole loop here.
58          */
59         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
60                     BGMAC_DMA_TX_SUSPEND);
61         for (i = 0; i < 10000 / 10; i++) {
62                 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
63                 val &= BGMAC_DMA_TX_STAT;
64                 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
65                     val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
66                     val == BGMAC_DMA_TX_STAT_STOPPED) {
67                         i = 0;
68                         break;
69                 }
70                 udelay(10);
71         }
72         if (i)
73                 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
74                           ring->mmio_base, val);
75
76         /* Remove SUSPEND bit */
77         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
78         if (!bgmac_wait_value(bgmac->core,
79                               ring->mmio_base + BGMAC_DMA_TX_STATUS,
80                               BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
81                               10000)) {
82                 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
83                            ring->mmio_base);
84                 udelay(300);
85                 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
86                 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
87                         bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
88                                   ring->mmio_base);
89         }
90 }
91
92 static void bgmac_dma_tx_enable(struct bgmac *bgmac,
93                                 struct bgmac_dma_ring *ring)
94 {
95         u32 ctl;
96
97         ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
98         ctl |= BGMAC_DMA_TX_ENABLE;
99         ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
100         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
101 }
102
103 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
104                                     struct bgmac_dma_ring *ring,
105                                     struct sk_buff *skb)
106 {
107         struct device *dma_dev = bgmac->core->dma_dev;
108         struct net_device *net_dev = bgmac->net_dev;
109         struct bgmac_dma_desc *dma_desc;
110         struct bgmac_slot_info *slot;
111         u32 ctl0, ctl1;
112         int free_slots;
113
114         if (skb->len > BGMAC_DESC_CTL1_LEN) {
115                 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
116                 goto err_stop_drop;
117         }
118
119         if (ring->start <= ring->end)
120                 free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
121         else
122                 free_slots = ring->start - ring->end;
123         if (free_slots == 1) {
124                 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
125                 netif_stop_queue(net_dev);
126                 return NETDEV_TX_BUSY;
127         }
128
129         slot = &ring->slots[ring->end];
130         slot->skb = skb;
131         slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
132                                         DMA_TO_DEVICE);
133         if (dma_mapping_error(dma_dev, slot->dma_addr)) {
134                 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
135                           ring->mmio_base);
136                 goto err_stop_drop;
137         }
138
139         ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
140         if (ring->end == ring->num_slots - 1)
141                 ctl0 |= BGMAC_DESC_CTL0_EOT;
142         ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
143
144         dma_desc = ring->cpu_base;
145         dma_desc += ring->end;
146         dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
147         dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
148         dma_desc->ctl0 = cpu_to_le32(ctl0);
149         dma_desc->ctl1 = cpu_to_le32(ctl1);
150
151         wmb();
152
153         /* Increase ring->end to point empty slot. We tell hardware the first
154          * slot it should *not* read.
155          */
156         if (++ring->end >= BGMAC_TX_RING_SLOTS)
157                 ring->end = 0;
158         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
159                     ring->end * sizeof(struct bgmac_dma_desc));
160
161         /* Always keep one slot free to allow detecting bugged calls. */
162         if (--free_slots == 1)
163                 netif_stop_queue(net_dev);
164
165         return NETDEV_TX_OK;
166
167 err_stop_drop:
168         netif_stop_queue(net_dev);
169         dev_kfree_skb(skb);
170         return NETDEV_TX_OK;
171 }
172
173 /* Free transmitted packets */
174 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
175 {
176         struct device *dma_dev = bgmac->core->dma_dev;
177         int empty_slot;
178         bool freed = false;
179
180         /* The last slot that hardware didn't consume yet */
181         empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
182         empty_slot &= BGMAC_DMA_TX_STATDPTR;
183         empty_slot /= sizeof(struct bgmac_dma_desc);
184
185         while (ring->start != empty_slot) {
186                 struct bgmac_slot_info *slot = &ring->slots[ring->start];
187
188                 if (slot->skb) {
189                         /* Unmap no longer used buffer */
190                         dma_unmap_single(dma_dev, slot->dma_addr,
191                                          slot->skb->len, DMA_TO_DEVICE);
192                         slot->dma_addr = 0;
193
194                         /* Free memory! :) */
195                         dev_kfree_skb(slot->skb);
196                         slot->skb = NULL;
197                 } else {
198                         bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
199                                   ring->start, ring->end);
200                 }
201
202                 if (++ring->start >= BGMAC_TX_RING_SLOTS)
203                         ring->start = 0;
204                 freed = true;
205         }
206
207         if (freed && netif_queue_stopped(bgmac->net_dev))
208                 netif_wake_queue(bgmac->net_dev);
209 }
210
211 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
212 {
213         if (!ring->mmio_base)
214                 return;
215
216         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
217         if (!bgmac_wait_value(bgmac->core,
218                               ring->mmio_base + BGMAC_DMA_RX_STATUS,
219                               BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
220                               10000))
221                 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
222                           ring->mmio_base);
223 }
224
225 static void bgmac_dma_rx_enable(struct bgmac *bgmac,
226                                 struct bgmac_dma_ring *ring)
227 {
228         u32 ctl;
229
230         ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
231         ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
232         ctl |= BGMAC_DMA_RX_ENABLE;
233         ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
234         ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
235         ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
236         bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
237 }
238
239 static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
240                                      struct bgmac_slot_info *slot)
241 {
242         struct device *dma_dev = bgmac->core->dma_dev;
243         struct bgmac_rx_header *rx;
244
245         /* Alloc skb */
246         slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
247         if (!slot->skb) {
248                 bgmac_err(bgmac, "Allocation of skb failed!\n");
249                 return -ENOMEM;
250         }
251
252         /* Poison - if everything goes fine, hardware will overwrite it */
253         rx = (struct bgmac_rx_header *)slot->skb->data;
254         rx->len = cpu_to_le16(0xdead);
255         rx->flags = cpu_to_le16(0xbeef);
256
257         /* Map skb for the DMA */
258         slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
259                                         BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
260         if (dma_mapping_error(dma_dev, slot->dma_addr)) {
261                 bgmac_err(bgmac, "DMA mapping error\n");
262                 return -ENOMEM;
263         }
264         if (slot->dma_addr & 0xC0000000)
265                 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
266
267         return 0;
268 }
269
270 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
271                              int weight)
272 {
273         u32 end_slot;
274         int handled = 0;
275
276         end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
277         end_slot &= BGMAC_DMA_RX_STATDPTR;
278         end_slot /= sizeof(struct bgmac_dma_desc);
279
280         ring->end = end_slot;
281
282         while (ring->start != ring->end) {
283                 struct device *dma_dev = bgmac->core->dma_dev;
284                 struct bgmac_slot_info *slot = &ring->slots[ring->start];
285                 struct sk_buff *skb = slot->skb;
286                 struct sk_buff *new_skb;
287                 struct bgmac_rx_header *rx;
288                 u16 len, flags;
289
290                 /* Unmap buffer to make it accessible to the CPU */
291                 dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
292                                         BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
293
294                 /* Get info from the header */
295                 rx = (struct bgmac_rx_header *)skb->data;
296                 len = le16_to_cpu(rx->len);
297                 flags = le16_to_cpu(rx->flags);
298
299                 /* Check for poison and drop or pass the packet */
300                 if (len == 0xdead && flags == 0xbeef) {
301                         bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
302                                   ring->start);
303                 } else {
304                         /* Omit CRC. */
305                         len -= ETH_FCS_LEN;
306
307                         new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
308                         if (new_skb) {
309                                 skb_put(new_skb, len);
310                                 skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
311                                                                  new_skb->data,
312                                                                  len);
313                                 skb_checksum_none_assert(skb);
314                                 new_skb->protocol =
315                                         eth_type_trans(new_skb, bgmac->net_dev);
316                                 netif_receive_skb(new_skb);
317                                 handled++;
318                         } else {
319                                 bgmac->net_dev->stats.rx_dropped++;
320                                 bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
321                         }
322
323                         /* Poison the old skb */
324                         rx->len = cpu_to_le16(0xdead);
325                         rx->flags = cpu_to_le16(0xbeef);
326                 }
327
328                 /* Make it back accessible to the hardware */
329                 dma_sync_single_for_device(dma_dev, slot->dma_addr,
330                                            BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
331
332                 if (++ring->start >= BGMAC_RX_RING_SLOTS)
333                         ring->start = 0;
334
335                 if (handled >= weight) /* Should never be greater */
336                         break;
337         }
338
339         return handled;
340 }
341
342 /* Does ring support unaligned addressing? */
343 static bool bgmac_dma_unaligned(struct bgmac *bgmac,
344                                 struct bgmac_dma_ring *ring,
345                                 enum bgmac_dma_ring_type ring_type)
346 {
347         switch (ring_type) {
348         case BGMAC_DMA_RING_TX:
349                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
350                             0xff0);
351                 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
352                         return true;
353                 break;
354         case BGMAC_DMA_RING_RX:
355                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
356                             0xff0);
357                 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
358                         return true;
359                 break;
360         }
361         return false;
362 }
363
364 static void bgmac_dma_ring_free(struct bgmac *bgmac,
365                                 struct bgmac_dma_ring *ring)
366 {
367         struct device *dma_dev = bgmac->core->dma_dev;
368         struct bgmac_slot_info *slot;
369         int size;
370         int i;
371
372         for (i = 0; i < ring->num_slots; i++) {
373                 slot = &ring->slots[i];
374                 if (slot->skb) {
375                         if (slot->dma_addr)
376                                 dma_unmap_single(dma_dev, slot->dma_addr,
377                                                  slot->skb->len, DMA_TO_DEVICE);
378                         dev_kfree_skb(slot->skb);
379                 }
380         }
381
382         if (ring->cpu_base) {
383                 /* Free ring of descriptors */
384                 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
385                 dma_free_coherent(dma_dev, size, ring->cpu_base,
386                                   ring->dma_base);
387         }
388 }
389
390 static void bgmac_dma_free(struct bgmac *bgmac)
391 {
392         int i;
393
394         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
395                 bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
396         for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
397                 bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
398 }
399
400 static int bgmac_dma_alloc(struct bgmac *bgmac)
401 {
402         struct device *dma_dev = bgmac->core->dma_dev;
403         struct bgmac_dma_ring *ring;
404         static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
405                                          BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
406         int size; /* ring size: different for Tx and Rx */
407         int err;
408         int i;
409
410         BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
411         BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
412
413         if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
414                 bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
415                 return -ENOTSUPP;
416         }
417
418         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
419                 ring = &bgmac->tx_ring[i];
420                 ring->num_slots = BGMAC_TX_RING_SLOTS;
421                 ring->mmio_base = ring_base[i];
422                 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
423                         bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
424                                    ring->mmio_base);
425
426                 /* Alloc ring of descriptors */
427                 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
428                 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
429                                                      &ring->dma_base,
430                                                      GFP_KERNEL);
431                 if (!ring->cpu_base) {
432                         bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
433                                   ring->mmio_base);
434                         goto err_dma_free;
435                 }
436                 if (ring->dma_base & 0xC0000000)
437                         bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
438
439                 /* No need to alloc TX slots yet */
440         }
441
442         for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
443                 int j;
444
445                 ring = &bgmac->rx_ring[i];
446                 ring->num_slots = BGMAC_RX_RING_SLOTS;
447                 ring->mmio_base = ring_base[i];
448                 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
449                         bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
450                                    ring->mmio_base);
451
452                 /* Alloc ring of descriptors */
453                 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
454                 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
455                                                      &ring->dma_base,
456                                                      GFP_KERNEL);
457                 if (!ring->cpu_base) {
458                         bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
459                                   ring->mmio_base);
460                         err = -ENOMEM;
461                         goto err_dma_free;
462                 }
463                 if (ring->dma_base & 0xC0000000)
464                         bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
465
466                 /* Alloc RX slots */
467                 for (j = 0; j < ring->num_slots; j++) {
468                         err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
469                         if (err) {
470                                 bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
471                                 goto err_dma_free;
472                         }
473                 }
474         }
475
476         return 0;
477
478 err_dma_free:
479         bgmac_dma_free(bgmac);
480         return -ENOMEM;
481 }
482
483 static void bgmac_dma_init(struct bgmac *bgmac)
484 {
485         struct bgmac_dma_ring *ring;
486         struct bgmac_dma_desc *dma_desc;
487         u32 ctl0, ctl1;
488         int i;
489
490         for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
491                 ring = &bgmac->tx_ring[i];
492
493                 /* We don't implement unaligned addressing, so enable first */
494                 bgmac_dma_tx_enable(bgmac, ring);
495                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
496                             lower_32_bits(ring->dma_base));
497                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
498                             upper_32_bits(ring->dma_base));
499
500                 ring->start = 0;
501                 ring->end = 0;  /* Points the slot that should *not* be read */
502         }
503
504         for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
505                 int j;
506
507                 ring = &bgmac->rx_ring[i];
508
509                 /* We don't implement unaligned addressing, so enable first */
510                 bgmac_dma_rx_enable(bgmac, ring);
511                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
512                             lower_32_bits(ring->dma_base));
513                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
514                             upper_32_bits(ring->dma_base));
515
516                 for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
517                      j++, dma_desc++) {
518                         ctl0 = ctl1 = 0;
519
520                         if (j == ring->num_slots - 1)
521                                 ctl0 |= BGMAC_DESC_CTL0_EOT;
522                         ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
523                         /* Is there any BGMAC device that requires extension? */
524                         /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
525                          * B43_DMA64_DCTL1_ADDREXT_MASK;
526                          */
527
528                         dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
529                         dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
530                         dma_desc->ctl0 = cpu_to_le32(ctl0);
531                         dma_desc->ctl1 = cpu_to_le32(ctl1);
532                 }
533
534                 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
535                             ring->num_slots * sizeof(struct bgmac_dma_desc));
536
537                 ring->start = 0;
538                 ring->end = 0;
539         }
540 }
541
542 /**************************************************
543  * PHY ops
544  **************************************************/
545
546 static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
547 {
548         struct bcma_device *core;
549         u16 phy_access_addr;
550         u16 phy_ctl_addr;
551         u32 tmp;
552
553         BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
554         BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
555         BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
556         BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
557         BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
558         BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
559         BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
560         BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
561         BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
562         BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
563         BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
564
565         if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
566                 core = bgmac->core->bus->drv_gmac_cmn.core;
567                 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
568                 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
569         } else {
570                 core = bgmac->core;
571                 phy_access_addr = BGMAC_PHY_ACCESS;
572                 phy_ctl_addr = BGMAC_PHY_CNTL;
573         }
574
575         tmp = bcma_read32(core, phy_ctl_addr);
576         tmp &= ~BGMAC_PC_EPA_MASK;
577         tmp |= phyaddr;
578         bcma_write32(core, phy_ctl_addr, tmp);
579
580         tmp = BGMAC_PA_START;
581         tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
582         tmp |= reg << BGMAC_PA_REG_SHIFT;
583         bcma_write32(core, phy_access_addr, tmp);
584
585         if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
586                 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
587                           phyaddr, reg);
588                 return 0xffff;
589         }
590
591         return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
592 }
593
594 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
595 static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
596 {
597         struct bcma_device *core;
598         u16 phy_access_addr;
599         u16 phy_ctl_addr;
600         u32 tmp;
601
602         if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
603                 core = bgmac->core->bus->drv_gmac_cmn.core;
604                 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
605                 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
606         } else {
607                 core = bgmac->core;
608                 phy_access_addr = BGMAC_PHY_ACCESS;
609                 phy_ctl_addr = BGMAC_PHY_CNTL;
610         }
611
612         tmp = bcma_read32(core, phy_ctl_addr);
613         tmp &= ~BGMAC_PC_EPA_MASK;
614         tmp |= phyaddr;
615         bcma_write32(core, phy_ctl_addr, tmp);
616
617         bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
618         if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
619                 bgmac_warn(bgmac, "Error setting MDIO int\n");
620
621         tmp = BGMAC_PA_START;
622         tmp |= BGMAC_PA_WRITE;
623         tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
624         tmp |= reg << BGMAC_PA_REG_SHIFT;
625         tmp |= value;
626         bcma_write32(core, phy_access_addr, tmp);
627
628         if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
629                 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
630                           phyaddr, reg);
631                 return -ETIMEDOUT;
632         }
633
634         return 0;
635 }
636
637 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
638 static void bgmac_phy_force(struct bgmac *bgmac)
639 {
640         u16 ctl;
641         u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
642                      BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
643
644         if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
645                 return;
646
647         if (bgmac->autoneg)
648                 return;
649
650         ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
651         ctl &= mask;
652         if (bgmac->full_duplex)
653                 ctl |= BGMAC_PHY_CTL_DUPLEX;
654         if (bgmac->speed == BGMAC_SPEED_100)
655                 ctl |= BGMAC_PHY_CTL_SPEED_100;
656         else if (bgmac->speed == BGMAC_SPEED_1000)
657                 ctl |= BGMAC_PHY_CTL_SPEED_1000;
658         bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
659 }
660
661 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
662 static void bgmac_phy_advertise(struct bgmac *bgmac)
663 {
664         u16 adv;
665
666         if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
667                 return;
668
669         if (!bgmac->autoneg)
670                 return;
671
672         /* Adv selected 10/100 speeds */
673         adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
674         adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
675                  BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
676         if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
677                 adv |= BGMAC_PHY_ADV_10HALF;
678         if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
679                 adv |= BGMAC_PHY_ADV_100HALF;
680         if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
681                 adv |= BGMAC_PHY_ADV_10FULL;
682         if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
683                 adv |= BGMAC_PHY_ADV_100FULL;
684         bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
685
686         /* Adv selected 1000 speeds */
687         adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
688         adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
689         if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
690                 adv |= BGMAC_PHY_ADV2_1000HALF;
691         if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
692                 adv |= BGMAC_PHY_ADV2_1000FULL;
693         bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
694
695         /* Restart */
696         bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
697                         bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
698                         BGMAC_PHY_CTL_RESTART);
699 }
700
701 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
702 static void bgmac_phy_init(struct bgmac *bgmac)
703 {
704         struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
705         struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
706         u8 i;
707
708         if (ci->id == BCMA_CHIP_ID_BCM5356) {
709                 for (i = 0; i < 5; i++) {
710                         bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
711                         bgmac_phy_write(bgmac, i, 0x15, 0x0100);
712                         bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
713                         bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
714                         bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
715                 }
716         }
717         if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
718             (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
719             (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
720                 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
721                 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
722                 for (i = 0; i < 5; i++) {
723                         bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
724                         bgmac_phy_write(bgmac, i, 0x16, 0x5284);
725                         bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
726                         bgmac_phy_write(bgmac, i, 0x17, 0x0010);
727                         bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
728                         bgmac_phy_write(bgmac, i, 0x16, 0x5296);
729                         bgmac_phy_write(bgmac, i, 0x17, 0x1073);
730                         bgmac_phy_write(bgmac, i, 0x17, 0x9073);
731                         bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
732                         bgmac_phy_write(bgmac, i, 0x17, 0x9273);
733                         bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
734                 }
735         }
736 }
737
738 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
739 static void bgmac_phy_reset(struct bgmac *bgmac)
740 {
741         if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
742                 return;
743
744         bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
745                         BGMAC_PHY_CTL_RESET);
746         udelay(100);
747         if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
748             BGMAC_PHY_CTL_RESET)
749                 bgmac_err(bgmac, "PHY reset failed\n");
750         bgmac_phy_init(bgmac);
751 }
752
753 /**************************************************
754  * Chip ops
755  **************************************************/
756
757 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
758  * nothing to change? Try if after stabilizng driver.
759  */
760 static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
761                                  bool force)
762 {
763         u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
764         u32 new_val = (cmdcfg & mask) | set;
765
766         bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
767         udelay(2);
768
769         if (new_val != cmdcfg || force)
770                 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
771
772         bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
773         udelay(2);
774 }
775
776 static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
777 {
778         u32 tmp;
779
780         tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
781         bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
782         tmp = (addr[4] << 8) | addr[5];
783         bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
784 }
785
786 static void bgmac_set_rx_mode(struct net_device *net_dev)
787 {
788         struct bgmac *bgmac = netdev_priv(net_dev);
789
790         if (net_dev->flags & IFF_PROMISC)
791                 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
792         else
793                 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
794 }
795
796 #if 0 /* We don't use that regs yet */
797 static void bgmac_chip_stats_update(struct bgmac *bgmac)
798 {
799         int i;
800
801         if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
802                 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
803                         bgmac->mib_tx_regs[i] =
804                                 bgmac_read(bgmac,
805                                            BGMAC_TX_GOOD_OCTETS + (i * 4));
806                 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
807                         bgmac->mib_rx_regs[i] =
808                                 bgmac_read(bgmac,
809                                            BGMAC_RX_GOOD_OCTETS + (i * 4));
810         }
811
812         /* TODO: what else? how to handle BCM4706? Specs are needed */
813 }
814 #endif
815
816 static void bgmac_clear_mib(struct bgmac *bgmac)
817 {
818         int i;
819
820         if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
821                 return;
822
823         bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
824         for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
825                 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
826         for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
827                 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
828 }
829
830 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
831 static void bgmac_speed(struct bgmac *bgmac, int speed)
832 {
833         u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
834         u32 set = 0;
835
836         if (speed & BGMAC_SPEED_10)
837                 set |= BGMAC_CMDCFG_ES_10;
838         if (speed & BGMAC_SPEED_100)
839                 set |= BGMAC_CMDCFG_ES_100;
840         if (speed & BGMAC_SPEED_1000)
841                 set |= BGMAC_CMDCFG_ES_1000;
842         if (!bgmac->full_duplex)
843                 set |= BGMAC_CMDCFG_HD;
844         bgmac_cmdcfg_maskset(bgmac, mask, set, true);
845 }
846
847 static void bgmac_miiconfig(struct bgmac *bgmac)
848 {
849         u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
850                         BGMAC_DS_MM_SHIFT;
851         if (imode == 0 || imode == 1) {
852                 if (bgmac->autoneg)
853                         bgmac_speed(bgmac, BGMAC_SPEED_100);
854                 else
855                         bgmac_speed(bgmac, bgmac->speed);
856         }
857 }
858
859 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
860 static void bgmac_chip_reset(struct bgmac *bgmac)
861 {
862         struct bcma_device *core = bgmac->core;
863         struct bcma_bus *bus = core->bus;
864         struct bcma_chipinfo *ci = &bus->chipinfo;
865         u32 flags = 0;
866         u32 iost;
867         int i;
868
869         if (bcma_core_is_enabled(core)) {
870                 if (!bgmac->stats_grabbed) {
871                         /* bgmac_chip_stats_update(bgmac); */
872                         bgmac->stats_grabbed = true;
873                 }
874
875                 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
876                         bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
877
878                 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
879                 udelay(1);
880
881                 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
882                         bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
883
884                 /* TODO: Clear software multicast filter list */
885         }
886
887         iost = bcma_aread32(core, BCMA_IOST);
888         if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
889             (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
890             (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
891                 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
892
893         if (iost & BGMAC_BCMA_IOST_ATTACHED) {
894                 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
895                 if (!bgmac->has_robosw)
896                         flags |= BGMAC_BCMA_IOCTL_SW_RESET;
897         }
898
899         bcma_core_enable(core, flags);
900
901         if (core->id.rev > 2) {
902                 bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
903                 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
904                                  1000);
905         }
906
907         if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
908             ci->id == BCMA_CHIP_ID_BCM53572) {
909                 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
910                 u8 et_swtype = 0;
911                 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
912                              BGMAC_CHIPCTL_1_IF_TYPE_RMII;
913                 char buf[2];
914
915                 if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
916                         if (kstrtou8(buf, 0, &et_swtype))
917                                 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
918                                           buf);
919                         et_swtype &= 0x0f;
920                         et_swtype <<= 4;
921                         sw_type = et_swtype;
922                 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
923                         sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
924                 } else if ((ci->id != BCMA_CHIP_ID_BCM53572 && ci->pkg == 10) ||
925                            (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9)) {
926                         sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
927                                   BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
928                 }
929                 bcma_chipco_chipctl_maskset(cc, 1,
930                                             ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
931                                               BGMAC_CHIPCTL_1_SW_TYPE_MASK),
932                                             sw_type);
933         }
934
935         if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
936                 bcma_awrite32(core, BCMA_IOCTL,
937                               bcma_aread32(core, BCMA_IOCTL) &
938                               ~BGMAC_BCMA_IOCTL_SW_RESET);
939
940         /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
941          * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
942          * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
943          * be keps until taking MAC out of the reset.
944          */
945         bgmac_cmdcfg_maskset(bgmac,
946                              ~(BGMAC_CMDCFG_TE |
947                                BGMAC_CMDCFG_RE |
948                                BGMAC_CMDCFG_RPI |
949                                BGMAC_CMDCFG_TAI |
950                                BGMAC_CMDCFG_HD |
951                                BGMAC_CMDCFG_ML |
952                                BGMAC_CMDCFG_CFE |
953                                BGMAC_CMDCFG_RL |
954                                BGMAC_CMDCFG_RED |
955                                BGMAC_CMDCFG_PE |
956                                BGMAC_CMDCFG_TPI |
957                                BGMAC_CMDCFG_PAD_EN |
958                                BGMAC_CMDCFG_PF),
959                              BGMAC_CMDCFG_PROM |
960                              BGMAC_CMDCFG_NLC |
961                              BGMAC_CMDCFG_CFE |
962                              BGMAC_CMDCFG_SR,
963                              false);
964
965         bgmac_clear_mib(bgmac);
966         if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
967                 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
968                                BCMA_GMAC_CMN_PC_MTE);
969         else
970                 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
971         bgmac_miiconfig(bgmac);
972         bgmac_phy_init(bgmac);
973
974         bgmac->int_status = 0;
975 }
976
977 static void bgmac_chip_intrs_on(struct bgmac *bgmac)
978 {
979         bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
980 }
981
982 static void bgmac_chip_intrs_off(struct bgmac *bgmac)
983 {
984         bgmac_write(bgmac, BGMAC_INT_MASK, 0);
985         bgmac_read(bgmac, BGMAC_INT_MASK);
986 }
987
988 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
989 static void bgmac_enable(struct bgmac *bgmac)
990 {
991         struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
992         u32 cmdcfg;
993         u32 mode;
994         u32 rxq_ctl;
995         u32 fl_ctl;
996         u16 bp_clk;
997         u8 mdp;
998
999         cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
1000         bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1001                              BGMAC_CMDCFG_SR, true);
1002         udelay(2);
1003         cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1004         bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1005
1006         mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1007                 BGMAC_DS_MM_SHIFT;
1008         if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
1009                 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1010         if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
1011                 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
1012                                             BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1013
1014         switch (ci->id) {
1015         case BCMA_CHIP_ID_BCM5357:
1016         case BCMA_CHIP_ID_BCM4749:
1017         case BCMA_CHIP_ID_BCM53572:
1018         case BCMA_CHIP_ID_BCM4716:
1019         case BCMA_CHIP_ID_BCM47162:
1020                 fl_ctl = 0x03cb04cb;
1021                 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1022                     ci->id == BCMA_CHIP_ID_BCM4749 ||
1023                     ci->id == BCMA_CHIP_ID_BCM53572)
1024                         fl_ctl = 0x2300e1;
1025                 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1026                 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1027                 break;
1028         }
1029
1030         rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1031         rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1032         bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
1033         mdp = (bp_clk * 128 / 1000) - 3;
1034         rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1035         bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1036 }
1037
1038 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1039 static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
1040 {
1041         struct bgmac_dma_ring *ring;
1042         int i;
1043
1044         /* 1 interrupt per received frame */
1045         bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1046
1047         /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1048         bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1049
1050         bgmac_set_rx_mode(bgmac->net_dev);
1051
1052         bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
1053
1054         if (bgmac->loopback)
1055                 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1056         else
1057                 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
1058
1059         bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1060
1061         if (!bgmac->autoneg) {
1062                 bgmac_speed(bgmac, bgmac->speed);
1063                 bgmac_phy_force(bgmac);
1064         } else if (bgmac->speed) { /* if there is anything to adv */
1065                 bgmac_phy_advertise(bgmac);
1066         }
1067
1068         if (full_init) {
1069                 bgmac_dma_init(bgmac);
1070                 if (1) /* FIXME: is there any case we don't want IRQs? */
1071                         bgmac_chip_intrs_on(bgmac);
1072         } else {
1073                 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
1074                         ring = &bgmac->rx_ring[i];
1075                         bgmac_dma_rx_enable(bgmac, ring);
1076                 }
1077         }
1078
1079         bgmac_enable(bgmac);
1080 }
1081
1082 static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1083 {
1084         struct bgmac *bgmac = netdev_priv(dev_id);
1085
1086         u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1087         int_status &= bgmac->int_mask;
1088
1089         if (!int_status)
1090                 return IRQ_NONE;
1091
1092         /* Ack */
1093         bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
1094
1095         /* Disable new interrupts until handling existing ones */
1096         bgmac_chip_intrs_off(bgmac);
1097
1098         bgmac->int_status = int_status;
1099
1100         napi_schedule(&bgmac->napi);
1101
1102         return IRQ_HANDLED;
1103 }
1104
1105 static int bgmac_poll(struct napi_struct *napi, int weight)
1106 {
1107         struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1108         struct bgmac_dma_ring *ring;
1109         int handled = 0;
1110
1111         if (bgmac->int_status & BGMAC_IS_TX0) {
1112                 ring = &bgmac->tx_ring[0];
1113                 bgmac_dma_tx_free(bgmac, ring);
1114                 bgmac->int_status &= ~BGMAC_IS_TX0;
1115         }
1116
1117         if (bgmac->int_status & BGMAC_IS_RX) {
1118                 ring = &bgmac->rx_ring[0];
1119                 handled += bgmac_dma_rx_read(bgmac, ring, weight);
1120                 bgmac->int_status &= ~BGMAC_IS_RX;
1121         }
1122
1123         if (bgmac->int_status) {
1124                 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
1125                 bgmac->int_status = 0;
1126         }
1127
1128         if (handled < weight)
1129                 napi_complete(napi);
1130
1131         bgmac_chip_intrs_on(bgmac);
1132
1133         return handled;
1134 }
1135
1136 /**************************************************
1137  * net_device_ops
1138  **************************************************/
1139
1140 static int bgmac_open(struct net_device *net_dev)
1141 {
1142         struct bgmac *bgmac = netdev_priv(net_dev);
1143         int err = 0;
1144
1145         bgmac_chip_reset(bgmac);
1146         /* Specs say about reclaiming rings here, but we do that in DMA init */
1147         bgmac_chip_init(bgmac, true);
1148
1149         err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
1150                           KBUILD_MODNAME, net_dev);
1151         if (err < 0) {
1152                 bgmac_err(bgmac, "IRQ request error: %d!\n", err);
1153                 goto err_out;
1154         }
1155         napi_enable(&bgmac->napi);
1156
1157         netif_carrier_on(net_dev);
1158
1159 err_out:
1160         return err;
1161 }
1162
1163 static int bgmac_stop(struct net_device *net_dev)
1164 {
1165         struct bgmac *bgmac = netdev_priv(net_dev);
1166
1167         netif_carrier_off(net_dev);
1168
1169         napi_disable(&bgmac->napi);
1170         bgmac_chip_intrs_off(bgmac);
1171         free_irq(bgmac->core->irq, net_dev);
1172
1173         bgmac_chip_reset(bgmac);
1174
1175         return 0;
1176 }
1177
1178 static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1179                                     struct net_device *net_dev)
1180 {
1181         struct bgmac *bgmac = netdev_priv(net_dev);
1182         struct bgmac_dma_ring *ring;
1183
1184         /* No QOS support yet */
1185         ring = &bgmac->tx_ring[0];
1186         return bgmac_dma_tx_add(bgmac, ring, skb);
1187 }
1188
1189 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1190 {
1191         struct bgmac *bgmac = netdev_priv(net_dev);
1192         int ret;
1193
1194         ret = eth_prepare_mac_addr_change(net_dev, addr);
1195         if (ret < 0)
1196                 return ret;
1197         bgmac_write_mac_address(bgmac, (u8 *)addr);
1198         eth_commit_mac_addr_change(net_dev, addr);
1199         return 0;
1200 }
1201
1202 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1203 {
1204         struct bgmac *bgmac = netdev_priv(net_dev);
1205         struct mii_ioctl_data *data = if_mii(ifr);
1206
1207         switch (cmd) {
1208         case SIOCGMIIPHY:
1209                 data->phy_id = bgmac->phyaddr;
1210                 /* fallthru */
1211         case SIOCGMIIREG:
1212                 if (!netif_running(net_dev))
1213                         return -EAGAIN;
1214                 data->val_out = bgmac_phy_read(bgmac, data->phy_id,
1215                                                data->reg_num & 0x1f);
1216                 return 0;
1217         case SIOCSMIIREG:
1218                 if (!netif_running(net_dev))
1219                         return -EAGAIN;
1220                 bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
1221                                 data->val_in);
1222                 return 0;
1223         default:
1224                 return -EOPNOTSUPP;
1225         }
1226 }
1227
1228 static const struct net_device_ops bgmac_netdev_ops = {
1229         .ndo_open               = bgmac_open,
1230         .ndo_stop               = bgmac_stop,
1231         .ndo_start_xmit         = bgmac_start_xmit,
1232         .ndo_set_rx_mode        = bgmac_set_rx_mode,
1233         .ndo_set_mac_address    = bgmac_set_mac_address,
1234         .ndo_validate_addr      = eth_validate_addr,
1235         .ndo_do_ioctl           = bgmac_ioctl,
1236 };
1237
1238 /**************************************************
1239  * ethtool_ops
1240  **************************************************/
1241
1242 static int bgmac_get_settings(struct net_device *net_dev,
1243                               struct ethtool_cmd *cmd)
1244 {
1245         struct bgmac *bgmac = netdev_priv(net_dev);
1246
1247         cmd->supported = SUPPORTED_10baseT_Half |
1248                          SUPPORTED_10baseT_Full |
1249                          SUPPORTED_100baseT_Half |
1250                          SUPPORTED_100baseT_Full |
1251                          SUPPORTED_1000baseT_Half |
1252                          SUPPORTED_1000baseT_Full |
1253                          SUPPORTED_Autoneg;
1254
1255         if (bgmac->autoneg) {
1256                 WARN_ON(cmd->advertising);
1257                 if (bgmac->full_duplex) {
1258                         if (bgmac->speed & BGMAC_SPEED_10)
1259                                 cmd->advertising |= ADVERTISED_10baseT_Full;
1260                         if (bgmac->speed & BGMAC_SPEED_100)
1261                                 cmd->advertising |= ADVERTISED_100baseT_Full;
1262                         if (bgmac->speed & BGMAC_SPEED_1000)
1263                                 cmd->advertising |= ADVERTISED_1000baseT_Full;
1264                 } else {
1265                         if (bgmac->speed & BGMAC_SPEED_10)
1266                                 cmd->advertising |= ADVERTISED_10baseT_Half;
1267                         if (bgmac->speed & BGMAC_SPEED_100)
1268                                 cmd->advertising |= ADVERTISED_100baseT_Half;
1269                         if (bgmac->speed & BGMAC_SPEED_1000)
1270                                 cmd->advertising |= ADVERTISED_1000baseT_Half;
1271                 }
1272         } else {
1273                 switch (bgmac->speed) {
1274                 case BGMAC_SPEED_10:
1275                         ethtool_cmd_speed_set(cmd, SPEED_10);
1276                         break;
1277                 case BGMAC_SPEED_100:
1278                         ethtool_cmd_speed_set(cmd, SPEED_100);
1279                         break;
1280                 case BGMAC_SPEED_1000:
1281                         ethtool_cmd_speed_set(cmd, SPEED_1000);
1282                         break;
1283                 }
1284         }
1285
1286         cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1287
1288         cmd->autoneg = bgmac->autoneg;
1289
1290         return 0;
1291 }
1292
1293 #if 0
1294 static int bgmac_set_settings(struct net_device *net_dev,
1295                               struct ethtool_cmd *cmd)
1296 {
1297         struct bgmac *bgmac = netdev_priv(net_dev);
1298
1299         return -1;
1300 }
1301 #endif
1302
1303 static void bgmac_get_drvinfo(struct net_device *net_dev,
1304                               struct ethtool_drvinfo *info)
1305 {
1306         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1307         strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
1308 }
1309
1310 static const struct ethtool_ops bgmac_ethtool_ops = {
1311         .get_settings           = bgmac_get_settings,
1312         .get_drvinfo            = bgmac_get_drvinfo,
1313 };
1314
1315 /**************************************************
1316  * BCMA bus ops
1317  **************************************************/
1318
1319 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1320 static int bgmac_probe(struct bcma_device *core)
1321 {
1322         struct net_device *net_dev;
1323         struct bgmac *bgmac;
1324         struct ssb_sprom *sprom = &core->bus->sprom;
1325         u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
1326         int err;
1327
1328         /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
1329         if (core->core_unit > 1) {
1330                 pr_err("Unsupported core_unit %d\n", core->core_unit);
1331                 return -ENOTSUPP;
1332         }
1333
1334         if (!is_valid_ether_addr(mac)) {
1335                 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
1336                 eth_random_addr(mac);
1337                 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1338         }
1339
1340         /* Allocation and references */
1341         net_dev = alloc_etherdev(sizeof(*bgmac));
1342         if (!net_dev)
1343                 return -ENOMEM;
1344         net_dev->netdev_ops = &bgmac_netdev_ops;
1345         net_dev->irq = core->irq;
1346         SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
1347         bgmac = netdev_priv(net_dev);
1348         bgmac->net_dev = net_dev;
1349         bgmac->core = core;
1350         bcma_set_drvdata(core, bgmac);
1351
1352         /* Defaults */
1353         bgmac->autoneg = true;
1354         bgmac->full_duplex = true;
1355         bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
1356         memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
1357
1358         /* On BCM4706 we need common core to access PHY */
1359         if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
1360             !core->bus->drv_gmac_cmn.core) {
1361                 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
1362                 err = -ENODEV;
1363                 goto err_netdev_free;
1364         }
1365         bgmac->cmn = core->bus->drv_gmac_cmn.core;
1366
1367         bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
1368                          sprom->et0phyaddr;
1369         bgmac->phyaddr &= BGMAC_PHY_MASK;
1370         if (bgmac->phyaddr == BGMAC_PHY_MASK) {
1371                 bgmac_err(bgmac, "No PHY found\n");
1372                 err = -ENODEV;
1373                 goto err_netdev_free;
1374         }
1375         bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
1376                    bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
1377
1378         if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
1379                 bgmac_err(bgmac, "PCI setup not implemented\n");
1380                 err = -ENOTSUPP;
1381                 goto err_netdev_free;
1382         }
1383
1384         bgmac_chip_reset(bgmac);
1385
1386         err = bgmac_dma_alloc(bgmac);
1387         if (err) {
1388                 bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
1389                 goto err_netdev_free;
1390         }
1391
1392         bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
1393         if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
1394                 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1395
1396         /* TODO: reset the external phy. Specs are needed */
1397         bgmac_phy_reset(bgmac);
1398
1399         bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
1400                                BGMAC_BFL_ENETROBO);
1401         if (bgmac->has_robosw)
1402                 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
1403
1404         if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1405                 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1406
1407         err = register_netdev(bgmac->net_dev);
1408         if (err) {
1409                 bgmac_err(bgmac, "Cannot register net device\n");
1410                 err = -ENOTSUPP;
1411                 goto err_dma_free;
1412         }
1413
1414         netif_carrier_off(net_dev);
1415
1416         netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1417
1418         return 0;
1419
1420 err_dma_free:
1421         bgmac_dma_free(bgmac);
1422
1423 err_netdev_free:
1424         bcma_set_drvdata(core, NULL);
1425         free_netdev(net_dev);
1426
1427         return err;
1428 }
1429
1430 static void bgmac_remove(struct bcma_device *core)
1431 {
1432         struct bgmac *bgmac = bcma_get_drvdata(core);
1433
1434         netif_napi_del(&bgmac->napi);
1435         unregister_netdev(bgmac->net_dev);
1436         bgmac_dma_free(bgmac);
1437         bcma_set_drvdata(core, NULL);
1438         free_netdev(bgmac->net_dev);
1439 }
1440
1441 static struct bcma_driver bgmac_bcma_driver = {
1442         .name           = KBUILD_MODNAME,
1443         .id_table       = bgmac_bcma_tbl,
1444         .probe          = bgmac_probe,
1445         .remove         = bgmac_remove,
1446 };
1447
1448 static int __init bgmac_init(void)
1449 {
1450         int err;
1451
1452         err = bcma_driver_register(&bgmac_bcma_driver);
1453         if (err)
1454                 return err;
1455         pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1456
1457         return 0;
1458 }
1459
1460 static void __exit bgmac_exit(void)
1461 {
1462         bcma_driver_unregister(&bgmac_bcma_driver);
1463 }
1464
1465 module_init(bgmac_init)
1466 module_exit(bgmac_exit)
1467
1468 MODULE_AUTHOR("Rafał Miłecki");
1469 MODULE_LICENSE("GPL");