amd-xgbe: Implement split header receive support
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / amd / xgbe / xgbe-drv.c
1 /*
2  * AMD 10Gb Ethernet driver
3  *
4  * This file is available to you under your choice of the following two
5  * licenses:
6  *
7  * License 1: GPLv2
8  *
9  * Copyright (c) 2014 Advanced Micro Devices, Inc.
10  *
11  * This file is free software; you may copy, redistribute and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation, either version 2 of the License, or (at
14  * your option) any later version.
15  *
16  * This file is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  * This file incorporates work covered by the following copyright and
25  * permission notice:
26  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
27  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
29  *     and you.
30  *
31  *     The Software IS NOT an item of Licensed Software or Licensed Product
32  *     under any End User Software License Agreement or Agreement for Licensed
33  *     Product with Synopsys or any supplement thereto.  Permission is hereby
34  *     granted, free of charge, to any person obtaining a copy of this software
35  *     annotated with this license and the Software, to deal in the Software
36  *     without restriction, including without limitation the rights to use,
37  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38  *     of the Software, and to permit persons to whom the Software is furnished
39  *     to do so, subject to the following conditions:
40  *
41  *     The above copyright notice and this permission notice shall be included
42  *     in all copies or substantial portions of the Software.
43  *
44  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54  *     THE POSSIBILITY OF SUCH DAMAGE.
55  *
56  *
57  * License 2: Modified BSD
58  *
59  * Copyright (c) 2014 Advanced Micro Devices, Inc.
60  * All rights reserved.
61  *
62  * Redistribution and use in source and binary forms, with or without
63  * modification, are permitted provided that the following conditions are met:
64  *     * Redistributions of source code must retain the above copyright
65  *       notice, this list of conditions and the following disclaimer.
66  *     * Redistributions in binary form must reproduce the above copyright
67  *       notice, this list of conditions and the following disclaimer in the
68  *       documentation and/or other materials provided with the distribution.
69  *     * Neither the name of Advanced Micro Devices, Inc. nor the
70  *       names of its contributors may be used to endorse or promote products
71  *       derived from this software without specific prior written permission.
72  *
73  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76  * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83  *
84  * This file incorporates work covered by the following copyright and
85  * permission notice:
86  *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
87  *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88  *     Inc. unless otherwise expressly agreed to in writing between Synopsys
89  *     and you.
90  *
91  *     The Software IS NOT an item of Licensed Software or Licensed Product
92  *     under any End User Software License Agreement or Agreement for Licensed
93  *     Product with Synopsys or any supplement thereto.  Permission is hereby
94  *     granted, free of charge, to any person obtaining a copy of this software
95  *     annotated with this license and the Software, to deal in the Software
96  *     without restriction, including without limitation the rights to use,
97  *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98  *     of the Software, and to permit persons to whom the Software is furnished
99  *     to do so, subject to the following conditions:
100  *
101  *     The above copyright notice and this permission notice shall be included
102  *     in all copies or substantial portions of the Software.
103  *
104  *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105  *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106  *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107  *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108  *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109  *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110  *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111  *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112  *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113  *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114  *     THE POSSIBILITY OF SUCH DAMAGE.
115  */
116
117 #include <linux/spinlock.h>
118 #include <linux/tcp.h>
119 #include <linux/if_vlan.h>
120 #include <net/busy_poll.h>
121 #include <linux/clk.h>
122 #include <linux/if_ether.h>
123 #include <linux/net_tstamp.h>
124 #include <linux/phy.h>
125
126 #include "xgbe.h"
127 #include "xgbe-common.h"
128
129 static int xgbe_poll(struct napi_struct *, int);
130 static void xgbe_set_rx_mode(struct net_device *);
131
132 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
133 {
134         struct xgbe_channel *channel_mem, *channel;
135         struct xgbe_ring *tx_ring, *rx_ring;
136         unsigned int count, i;
137
138         count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
139
140         channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
141         if (!channel_mem)
142                 goto err_channel;
143
144         tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
145                           GFP_KERNEL);
146         if (!tx_ring)
147                 goto err_tx_ring;
148
149         rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
150                           GFP_KERNEL);
151         if (!rx_ring)
152                 goto err_rx_ring;
153
154         for (i = 0, channel = channel_mem; i < count; i++, channel++) {
155                 snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
156                 channel->pdata = pdata;
157                 channel->queue_index = i;
158                 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
159                                     (DMA_CH_INC * i);
160
161                 if (i < pdata->tx_ring_count) {
162                         spin_lock_init(&tx_ring->lock);
163                         channel->tx_ring = tx_ring++;
164                 }
165
166                 if (i < pdata->rx_ring_count) {
167                         spin_lock_init(&rx_ring->lock);
168                         channel->rx_ring = rx_ring++;
169                 }
170
171                 DBGPR("  %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
172                       channel->name, channel->queue_index, channel->dma_regs,
173                       channel->tx_ring, channel->rx_ring);
174         }
175
176         pdata->channel = channel_mem;
177         pdata->channel_count = count;
178
179         return 0;
180
181 err_rx_ring:
182         kfree(tx_ring);
183
184 err_tx_ring:
185         kfree(channel_mem);
186
187 err_channel:
188         netdev_err(pdata->netdev, "channel allocation failed\n");
189
190         return -ENOMEM;
191 }
192
193 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
194 {
195         if (!pdata->channel)
196                 return;
197
198         kfree(pdata->channel->rx_ring);
199         kfree(pdata->channel->tx_ring);
200         kfree(pdata->channel);
201
202         pdata->channel = NULL;
203         pdata->channel_count = 0;
204 }
205
206 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
207 {
208         return (ring->rdesc_count - (ring->cur - ring->dirty));
209 }
210
211 static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
212 {
213         unsigned int rx_buf_size;
214
215         if (mtu > XGMAC_JUMBO_PACKET_MTU) {
216                 netdev_alert(netdev, "MTU exceeds maximum supported value\n");
217                 return -EINVAL;
218         }
219
220         rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
221         rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
222
223         rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
224                       ~(XGBE_RX_BUF_ALIGN - 1);
225
226         return rx_buf_size;
227 }
228
229 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
230 {
231         struct xgbe_hw_if *hw_if = &pdata->hw_if;
232         struct xgbe_channel *channel;
233         enum xgbe_int int_id;
234         unsigned int i;
235
236         channel = pdata->channel;
237         for (i = 0; i < pdata->channel_count; i++, channel++) {
238                 if (channel->tx_ring && channel->rx_ring)
239                         int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
240                 else if (channel->tx_ring)
241                         int_id = XGMAC_INT_DMA_CH_SR_TI;
242                 else if (channel->rx_ring)
243                         int_id = XGMAC_INT_DMA_CH_SR_RI;
244                 else
245                         continue;
246
247                 hw_if->enable_int(channel, int_id);
248         }
249 }
250
251 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
252 {
253         struct xgbe_hw_if *hw_if = &pdata->hw_if;
254         struct xgbe_channel *channel;
255         enum xgbe_int int_id;
256         unsigned int i;
257
258         channel = pdata->channel;
259         for (i = 0; i < pdata->channel_count; i++, channel++) {
260                 if (channel->tx_ring && channel->rx_ring)
261                         int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
262                 else if (channel->tx_ring)
263                         int_id = XGMAC_INT_DMA_CH_SR_TI;
264                 else if (channel->rx_ring)
265                         int_id = XGMAC_INT_DMA_CH_SR_RI;
266                 else
267                         continue;
268
269                 hw_if->disable_int(channel, int_id);
270         }
271 }
272
273 static irqreturn_t xgbe_isr(int irq, void *data)
274 {
275         struct xgbe_prv_data *pdata = data;
276         struct xgbe_hw_if *hw_if = &pdata->hw_if;
277         struct xgbe_channel *channel;
278         unsigned int dma_isr, dma_ch_isr;
279         unsigned int mac_isr, mac_tssr;
280         unsigned int i;
281
282         /* The DMA interrupt status register also reports MAC and MTL
283          * interrupts. So for polling mode, we just need to check for
284          * this register to be non-zero
285          */
286         dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
287         if (!dma_isr)
288                 goto isr_done;
289
290         DBGPR("-->xgbe_isr\n");
291
292         DBGPR("  DMA_ISR = %08x\n", dma_isr);
293         DBGPR("  DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
294         DBGPR("  DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
295
296         for (i = 0; i < pdata->channel_count; i++) {
297                 if (!(dma_isr & (1 << i)))
298                         continue;
299
300                 channel = pdata->channel + i;
301
302                 dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
303                 DBGPR("  DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
304
305                 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
306                     XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
307                         if (napi_schedule_prep(&pdata->napi)) {
308                                 /* Disable Tx and Rx interrupts */
309                                 xgbe_disable_rx_tx_ints(pdata);
310
311                                 /* Turn on polling */
312                                 __napi_schedule(&pdata->napi);
313                         }
314                 }
315
316                 /* Restart the device on a Fatal Bus Error */
317                 if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
318                         schedule_work(&pdata->restart_work);
319
320                 /* Clear all interrupt signals */
321                 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
322         }
323
324         if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
325                 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
326
327                 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
328                         hw_if->tx_mmc_int(pdata);
329
330                 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
331                         hw_if->rx_mmc_int(pdata);
332
333                 if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
334                         mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
335
336                         if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
337                                 /* Read Tx Timestamp to clear interrupt */
338                                 pdata->tx_tstamp =
339                                         hw_if->get_tx_tstamp(pdata);
340                                 schedule_work(&pdata->tx_tstamp_work);
341                         }
342                 }
343         }
344
345         DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
346
347         DBGPR("<--xgbe_isr\n");
348
349 isr_done:
350         return IRQ_HANDLED;
351 }
352
353 static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
354 {
355         struct xgbe_channel *channel = container_of(timer,
356                                                     struct xgbe_channel,
357                                                     tx_timer);
358         struct xgbe_ring *ring = channel->tx_ring;
359         struct xgbe_prv_data *pdata = channel->pdata;
360         unsigned long flags;
361
362         DBGPR("-->xgbe_tx_timer\n");
363
364         spin_lock_irqsave(&ring->lock, flags);
365
366         if (napi_schedule_prep(&pdata->napi)) {
367                 /* Disable Tx and Rx interrupts */
368                 xgbe_disable_rx_tx_ints(pdata);
369
370                 /* Turn on polling */
371                 __napi_schedule(&pdata->napi);
372         }
373
374         channel->tx_timer_active = 0;
375
376         spin_unlock_irqrestore(&ring->lock, flags);
377
378         DBGPR("<--xgbe_tx_timer\n");
379
380         return HRTIMER_NORESTART;
381 }
382
383 static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
384 {
385         struct xgbe_channel *channel;
386         unsigned int i;
387
388         DBGPR("-->xgbe_init_tx_timers\n");
389
390         channel = pdata->channel;
391         for (i = 0; i < pdata->channel_count; i++, channel++) {
392                 if (!channel->tx_ring)
393                         break;
394
395                 DBGPR("  %s adding tx timer\n", channel->name);
396                 hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
397                              HRTIMER_MODE_REL);
398                 channel->tx_timer.function = xgbe_tx_timer;
399         }
400
401         DBGPR("<--xgbe_init_tx_timers\n");
402 }
403
404 static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
405 {
406         struct xgbe_channel *channel;
407         unsigned int i;
408
409         DBGPR("-->xgbe_stop_tx_timers\n");
410
411         channel = pdata->channel;
412         for (i = 0; i < pdata->channel_count; i++, channel++) {
413                 if (!channel->tx_ring)
414                         break;
415
416                 DBGPR("  %s deleting tx timer\n", channel->name);
417                 channel->tx_timer_active = 0;
418                 hrtimer_cancel(&channel->tx_timer);
419         }
420
421         DBGPR("<--xgbe_stop_tx_timers\n");
422 }
423
424 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
425 {
426         unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
427         struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
428
429         DBGPR("-->xgbe_get_all_hw_features\n");
430
431         mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
432         mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
433         mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
434
435         memset(hw_feat, 0, sizeof(*hw_feat));
436
437         hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
438
439         /* Hardware feature register 0 */
440         hw_feat->gmii        = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
441         hw_feat->vlhash      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
442         hw_feat->sma         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
443         hw_feat->rwk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
444         hw_feat->mgk         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
445         hw_feat->mmc         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
446         hw_feat->aoe         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
447         hw_feat->ts          = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
448         hw_feat->eee         = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
449         hw_feat->tx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
450         hw_feat->rx_coe      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
451         hw_feat->addn_mac    = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
452                                               ADDMACADRSEL);
453         hw_feat->ts_src      = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
454         hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
455
456         /* Hardware feature register 1 */
457         hw_feat->rx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
458                                                 RXFIFOSIZE);
459         hw_feat->tx_fifo_size  = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
460                                                 TXFIFOSIZE);
461         hw_feat->dcb           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
462         hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
463         hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
464         hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
465         hw_feat->tc_cnt        = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
466         hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
467                                                   HASHTBLSZ);
468         hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
469                                                   L3L4FNUM);
470
471         /* Hardware feature register 2 */
472         hw_feat->rx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
473         hw_feat->tx_q_cnt     = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
474         hw_feat->rx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
475         hw_feat->tx_ch_cnt    = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
476         hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
477         hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
478
479         /* Translate the Hash Table size into actual number */
480         switch (hw_feat->hash_table_size) {
481         case 0:
482                 break;
483         case 1:
484                 hw_feat->hash_table_size = 64;
485                 break;
486         case 2:
487                 hw_feat->hash_table_size = 128;
488                 break;
489         case 3:
490                 hw_feat->hash_table_size = 256;
491                 break;
492         }
493
494         /* The Queue and Channel counts are zero based so increment them
495          * to get the actual number
496          */
497         hw_feat->rx_q_cnt++;
498         hw_feat->tx_q_cnt++;
499         hw_feat->rx_ch_cnt++;
500         hw_feat->tx_ch_cnt++;
501
502         DBGPR("<--xgbe_get_all_hw_features\n");
503 }
504
505 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
506 {
507         if (add)
508                 netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
509                                NAPI_POLL_WEIGHT);
510         napi_enable(&pdata->napi);
511 }
512
513 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
514 {
515         napi_disable(&pdata->napi);
516
517         if (del)
518                 netif_napi_del(&pdata->napi);
519 }
520
521 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
522 {
523         struct xgbe_hw_if *hw_if = &pdata->hw_if;
524
525         DBGPR("-->xgbe_init_tx_coalesce\n");
526
527         pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
528         pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
529
530         hw_if->config_tx_coalesce(pdata);
531
532         DBGPR("<--xgbe_init_tx_coalesce\n");
533 }
534
535 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
536 {
537         struct xgbe_hw_if *hw_if = &pdata->hw_if;
538
539         DBGPR("-->xgbe_init_rx_coalesce\n");
540
541         pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
542         pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
543
544         hw_if->config_rx_coalesce(pdata);
545
546         DBGPR("<--xgbe_init_rx_coalesce\n");
547 }
548
549 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
550 {
551         struct xgbe_desc_if *desc_if = &pdata->desc_if;
552         struct xgbe_channel *channel;
553         struct xgbe_ring *ring;
554         struct xgbe_ring_data *rdata;
555         unsigned int i, j;
556
557         DBGPR("-->xgbe_free_tx_data\n");
558
559         channel = pdata->channel;
560         for (i = 0; i < pdata->channel_count; i++, channel++) {
561                 ring = channel->tx_ring;
562                 if (!ring)
563                         break;
564
565                 for (j = 0; j < ring->rdesc_count; j++) {
566                         rdata = XGBE_GET_DESC_DATA(ring, j);
567                         desc_if->unmap_rdata(pdata, rdata);
568                 }
569         }
570
571         DBGPR("<--xgbe_free_tx_data\n");
572 }
573
574 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
575 {
576         struct xgbe_desc_if *desc_if = &pdata->desc_if;
577         struct xgbe_channel *channel;
578         struct xgbe_ring *ring;
579         struct xgbe_ring_data *rdata;
580         unsigned int i, j;
581
582         DBGPR("-->xgbe_free_rx_data\n");
583
584         channel = pdata->channel;
585         for (i = 0; i < pdata->channel_count; i++, channel++) {
586                 ring = channel->rx_ring;
587                 if (!ring)
588                         break;
589
590                 for (j = 0; j < ring->rdesc_count; j++) {
591                         rdata = XGBE_GET_DESC_DATA(ring, j);
592                         desc_if->unmap_rdata(pdata, rdata);
593                 }
594         }
595
596         DBGPR("<--xgbe_free_rx_data\n");
597 }
598
599 static void xgbe_adjust_link(struct net_device *netdev)
600 {
601         struct xgbe_prv_data *pdata = netdev_priv(netdev);
602         struct xgbe_hw_if *hw_if = &pdata->hw_if;
603         struct phy_device *phydev = pdata->phydev;
604         int new_state = 0;
605
606         if (phydev == NULL)
607                 return;
608
609         if (phydev->link) {
610                 /* Flow control support */
611                 if (pdata->pause_autoneg) {
612                         if (phydev->pause || phydev->asym_pause) {
613                                 pdata->tx_pause = 1;
614                                 pdata->rx_pause = 1;
615                         } else {
616                                 pdata->tx_pause = 0;
617                                 pdata->rx_pause = 0;
618                         }
619                 }
620
621                 if (pdata->tx_pause != pdata->phy_tx_pause) {
622                         hw_if->config_tx_flow_control(pdata);
623                         pdata->phy_tx_pause = pdata->tx_pause;
624                 }
625
626                 if (pdata->rx_pause != pdata->phy_rx_pause) {
627                         hw_if->config_rx_flow_control(pdata);
628                         pdata->phy_rx_pause = pdata->rx_pause;
629                 }
630
631                 /* Speed support */
632                 if (phydev->speed != pdata->phy_speed) {
633                         new_state = 1;
634
635                         switch (phydev->speed) {
636                         case SPEED_10000:
637                                 hw_if->set_xgmii_speed(pdata);
638                                 break;
639
640                         case SPEED_2500:
641                                 hw_if->set_gmii_2500_speed(pdata);
642                                 break;
643
644                         case SPEED_1000:
645                                 hw_if->set_gmii_speed(pdata);
646                                 break;
647                         }
648                         pdata->phy_speed = phydev->speed;
649                 }
650
651                 if (phydev->link != pdata->phy_link) {
652                         new_state = 1;
653                         pdata->phy_link = 1;
654                 }
655         } else if (pdata->phy_link) {
656                 new_state = 1;
657                 pdata->phy_link = 0;
658                 pdata->phy_speed = SPEED_UNKNOWN;
659         }
660
661         if (new_state)
662                 phy_print_status(phydev);
663 }
664
665 static int xgbe_phy_init(struct xgbe_prv_data *pdata)
666 {
667         struct net_device *netdev = pdata->netdev;
668         struct phy_device *phydev = pdata->phydev;
669         int ret;
670
671         pdata->phy_link = -1;
672         pdata->phy_speed = SPEED_UNKNOWN;
673         pdata->phy_tx_pause = pdata->tx_pause;
674         pdata->phy_rx_pause = pdata->rx_pause;
675
676         ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
677                                  pdata->phy_mode);
678         if (ret) {
679                 netdev_err(netdev, "phy_connect_direct failed\n");
680                 return ret;
681         }
682
683         if (!phydev->drv || (phydev->drv->phy_id == 0)) {
684                 netdev_err(netdev, "phy_id not valid\n");
685                 ret = -ENODEV;
686                 goto err_phy_connect;
687         }
688         DBGPR("  phy_connect_direct succeeded for PHY %s, link=%d\n",
689               dev_name(&phydev->dev), phydev->link);
690
691         return 0;
692
693 err_phy_connect:
694         phy_disconnect(phydev);
695
696         return ret;
697 }
698
699 static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
700 {
701         if (!pdata->phydev)
702                 return;
703
704         phy_disconnect(pdata->phydev);
705 }
706
707 int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
708 {
709         struct xgbe_prv_data *pdata = netdev_priv(netdev);
710         struct xgbe_hw_if *hw_if = &pdata->hw_if;
711         unsigned long flags;
712
713         DBGPR("-->xgbe_powerdown\n");
714
715         if (!netif_running(netdev) ||
716             (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
717                 netdev_alert(netdev, "Device is already powered down\n");
718                 DBGPR("<--xgbe_powerdown\n");
719                 return -EINVAL;
720         }
721
722         phy_stop(pdata->phydev);
723
724         spin_lock_irqsave(&pdata->lock, flags);
725
726         if (caller == XGMAC_DRIVER_CONTEXT)
727                 netif_device_detach(netdev);
728
729         netif_tx_stop_all_queues(netdev);
730         xgbe_napi_disable(pdata, 0);
731
732         /* Powerdown Tx/Rx */
733         hw_if->powerdown_tx(pdata);
734         hw_if->powerdown_rx(pdata);
735
736         pdata->power_down = 1;
737
738         spin_unlock_irqrestore(&pdata->lock, flags);
739
740         DBGPR("<--xgbe_powerdown\n");
741
742         return 0;
743 }
744
745 int xgbe_powerup(struct net_device *netdev, unsigned int caller)
746 {
747         struct xgbe_prv_data *pdata = netdev_priv(netdev);
748         struct xgbe_hw_if *hw_if = &pdata->hw_if;
749         unsigned long flags;
750
751         DBGPR("-->xgbe_powerup\n");
752
753         if (!netif_running(netdev) ||
754             (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
755                 netdev_alert(netdev, "Device is already powered up\n");
756                 DBGPR("<--xgbe_powerup\n");
757                 return -EINVAL;
758         }
759
760         spin_lock_irqsave(&pdata->lock, flags);
761
762         pdata->power_down = 0;
763
764         phy_start(pdata->phydev);
765
766         /* Enable Tx/Rx */
767         hw_if->powerup_tx(pdata);
768         hw_if->powerup_rx(pdata);
769
770         if (caller == XGMAC_DRIVER_CONTEXT)
771                 netif_device_attach(netdev);
772
773         xgbe_napi_enable(pdata, 0);
774         netif_tx_start_all_queues(netdev);
775
776         spin_unlock_irqrestore(&pdata->lock, flags);
777
778         DBGPR("<--xgbe_powerup\n");
779
780         return 0;
781 }
782
783 static int xgbe_start(struct xgbe_prv_data *pdata)
784 {
785         struct xgbe_hw_if *hw_if = &pdata->hw_if;
786         struct net_device *netdev = pdata->netdev;
787
788         DBGPR("-->xgbe_start\n");
789
790         xgbe_set_rx_mode(netdev);
791
792         hw_if->init(pdata);
793
794         phy_start(pdata->phydev);
795
796         hw_if->enable_tx(pdata);
797         hw_if->enable_rx(pdata);
798
799         xgbe_init_tx_timers(pdata);
800
801         xgbe_napi_enable(pdata, 1);
802         netif_tx_start_all_queues(netdev);
803
804         DBGPR("<--xgbe_start\n");
805
806         return 0;
807 }
808
809 static void xgbe_stop(struct xgbe_prv_data *pdata)
810 {
811         struct xgbe_hw_if *hw_if = &pdata->hw_if;
812         struct net_device *netdev = pdata->netdev;
813
814         DBGPR("-->xgbe_stop\n");
815
816         phy_stop(pdata->phydev);
817
818         netif_tx_stop_all_queues(netdev);
819         xgbe_napi_disable(pdata, 1);
820
821         xgbe_stop_tx_timers(pdata);
822
823         hw_if->disable_tx(pdata);
824         hw_if->disable_rx(pdata);
825
826         DBGPR("<--xgbe_stop\n");
827 }
828
829 static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
830 {
831         struct xgbe_hw_if *hw_if = &pdata->hw_if;
832
833         DBGPR("-->xgbe_restart_dev\n");
834
835         /* If not running, "restart" will happen on open */
836         if (!netif_running(pdata->netdev))
837                 return;
838
839         xgbe_stop(pdata);
840         synchronize_irq(pdata->irq_number);
841
842         xgbe_free_tx_data(pdata);
843         xgbe_free_rx_data(pdata);
844
845         /* Issue software reset to device if requested */
846         if (reset)
847                 hw_if->exit(pdata);
848
849         xgbe_start(pdata);
850
851         DBGPR("<--xgbe_restart_dev\n");
852 }
853
854 static void xgbe_restart(struct work_struct *work)
855 {
856         struct xgbe_prv_data *pdata = container_of(work,
857                                                    struct xgbe_prv_data,
858                                                    restart_work);
859
860         rtnl_lock();
861
862         xgbe_restart_dev(pdata, 1);
863
864         rtnl_unlock();
865 }
866
867 static void xgbe_tx_tstamp(struct work_struct *work)
868 {
869         struct xgbe_prv_data *pdata = container_of(work,
870                                                    struct xgbe_prv_data,
871                                                    tx_tstamp_work);
872         struct skb_shared_hwtstamps hwtstamps;
873         u64 nsec;
874         unsigned long flags;
875
876         if (pdata->tx_tstamp) {
877                 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
878                                             pdata->tx_tstamp);
879
880                 memset(&hwtstamps, 0, sizeof(hwtstamps));
881                 hwtstamps.hwtstamp = ns_to_ktime(nsec);
882                 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
883         }
884
885         dev_kfree_skb_any(pdata->tx_tstamp_skb);
886
887         spin_lock_irqsave(&pdata->tstamp_lock, flags);
888         pdata->tx_tstamp_skb = NULL;
889         spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
890 }
891
892 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
893                                       struct ifreq *ifreq)
894 {
895         if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
896                          sizeof(pdata->tstamp_config)))
897                 return -EFAULT;
898
899         return 0;
900 }
901
902 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
903                                       struct ifreq *ifreq)
904 {
905         struct hwtstamp_config config;
906         unsigned int mac_tscr;
907
908         if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
909                 return -EFAULT;
910
911         if (config.flags)
912                 return -EINVAL;
913
914         mac_tscr = 0;
915
916         switch (config.tx_type) {
917         case HWTSTAMP_TX_OFF:
918                 break;
919
920         case HWTSTAMP_TX_ON:
921                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
922                 break;
923
924         default:
925                 return -ERANGE;
926         }
927
928         switch (config.rx_filter) {
929         case HWTSTAMP_FILTER_NONE:
930                 break;
931
932         case HWTSTAMP_FILTER_ALL:
933                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
934                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
935                 break;
936
937         /* PTP v2, UDP, any kind of event packet */
938         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
939                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
940         /* PTP v1, UDP, any kind of event packet */
941         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
942                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
943                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
944                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
945                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
946                 break;
947
948         /* PTP v2, UDP, Sync packet */
949         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
950                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
951         /* PTP v1, UDP, Sync packet */
952         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
953                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
954                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
955                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
956                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
957                 break;
958
959         /* PTP v2, UDP, Delay_req packet */
960         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
961                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
962         /* PTP v1, UDP, Delay_req packet */
963         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
964                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
965                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
966                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
967                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
968                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
969                 break;
970
971         /* 802.AS1, Ethernet, any kind of event packet */
972         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
973                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
974                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
975                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
976                 break;
977
978         /* 802.AS1, Ethernet, Sync packet */
979         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
980                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
981                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
982                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
983                 break;
984
985         /* 802.AS1, Ethernet, Delay_req packet */
986         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
987                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
988                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
989                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
990                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
991                 break;
992
993         /* PTP v2/802.AS1, any layer, any kind of event packet */
994         case HWTSTAMP_FILTER_PTP_V2_EVENT:
995                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
996                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
997                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
998                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
999                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
1000                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1001                 break;
1002
1003         /* PTP v2/802.AS1, any layer, Sync packet */
1004         case HWTSTAMP_FILTER_PTP_V2_SYNC:
1005                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1006                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1007                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1008                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1009                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1010                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1011                 break;
1012
1013         /* PTP v2/802.AS1, any layer, Delay_req packet */
1014         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1015                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1016                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1017                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
1018                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
1019                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
1020                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
1021                 XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1022                 break;
1023
1024         default:
1025                 return -ERANGE;
1026         }
1027
1028         pdata->hw_if.config_tstamp(pdata, mac_tscr);
1029
1030         memcpy(&pdata->tstamp_config, &config, sizeof(config));
1031
1032         return 0;
1033 }
1034
1035 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1036                                 struct sk_buff *skb,
1037                                 struct xgbe_packet_data *packet)
1038 {
1039         unsigned long flags;
1040
1041         if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
1042                 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1043                 if (pdata->tx_tstamp_skb) {
1044                         /* Another timestamp in progress, ignore this one */
1045                         XGMAC_SET_BITS(packet->attributes,
1046                                        TX_PACKET_ATTRIBUTES, PTP, 0);
1047                 } else {
1048                         pdata->tx_tstamp_skb = skb_get(skb);
1049                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1050                 }
1051                 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1052         }
1053
1054         if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1055                 skb_tx_timestamp(skb);
1056 }
1057
1058 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
1059 {
1060         if (vlan_tx_tag_present(skb))
1061                 packet->vlan_ctag = vlan_tx_tag_get(skb);
1062 }
1063
1064 static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
1065 {
1066         int ret;
1067
1068         if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1069                             TSO_ENABLE))
1070                 return 0;
1071
1072         ret = skb_cow_head(skb, 0);
1073         if (ret)
1074                 return ret;
1075
1076         packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1077         packet->tcp_header_len = tcp_hdrlen(skb);
1078         packet->tcp_payload_len = skb->len - packet->header_len;
1079         packet->mss = skb_shinfo(skb)->gso_size;
1080         DBGPR("  packet->header_len=%u\n", packet->header_len);
1081         DBGPR("  packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1082               packet->tcp_header_len, packet->tcp_payload_len);
1083         DBGPR("  packet->mss=%u\n", packet->mss);
1084
1085         return 0;
1086 }
1087
1088 static int xgbe_is_tso(struct sk_buff *skb)
1089 {
1090         if (skb->ip_summed != CHECKSUM_PARTIAL)
1091                 return 0;
1092
1093         if (!skb_is_gso(skb))
1094                 return 0;
1095
1096         DBGPR("  TSO packet to be processed\n");
1097
1098         return 1;
1099 }
1100
1101 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1102                              struct xgbe_ring *ring, struct sk_buff *skb,
1103                              struct xgbe_packet_data *packet)
1104 {
1105         struct skb_frag_struct *frag;
1106         unsigned int context_desc;
1107         unsigned int len;
1108         unsigned int i;
1109
1110         context_desc = 0;
1111         packet->rdesc_count = 0;
1112
1113         if (xgbe_is_tso(skb)) {
1114                 /* TSO requires an extra desriptor if mss is different */
1115                 if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
1116                         context_desc = 1;
1117                         packet->rdesc_count++;
1118                 }
1119
1120                 /* TSO requires an extra desriptor for TSO header */
1121                 packet->rdesc_count++;
1122
1123                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1124                                TSO_ENABLE, 1);
1125                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1126                                CSUM_ENABLE, 1);
1127         } else if (skb->ip_summed == CHECKSUM_PARTIAL)
1128                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1129                                CSUM_ENABLE, 1);
1130
1131         if (vlan_tx_tag_present(skb)) {
1132                 /* VLAN requires an extra descriptor if tag is different */
1133                 if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
1134                         /* We can share with the TSO context descriptor */
1135                         if (!context_desc) {
1136                                 context_desc = 1;
1137                                 packet->rdesc_count++;
1138                         }
1139
1140                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1141                                VLAN_CTAG, 1);
1142         }
1143
1144         if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1145             (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1146                 XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1147                                PTP, 1);
1148
1149         for (len = skb_headlen(skb); len;) {
1150                 packet->rdesc_count++;
1151                 len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1152         }
1153
1154         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1155                 frag = &skb_shinfo(skb)->frags[i];
1156                 for (len = skb_frag_size(frag); len; ) {
1157                         packet->rdesc_count++;
1158                         len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
1159                 }
1160         }
1161 }
1162
1163 static int xgbe_open(struct net_device *netdev)
1164 {
1165         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1166         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1167         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1168         int ret;
1169
1170         DBGPR("-->xgbe_open\n");
1171
1172         /* Initialize the phy */
1173         ret = xgbe_phy_init(pdata);
1174         if (ret)
1175                 return ret;
1176
1177         /* Enable the clocks */
1178         ret = clk_prepare_enable(pdata->sysclk);
1179         if (ret) {
1180                 netdev_alert(netdev, "dma clk_prepare_enable failed\n");
1181                 goto err_phy_init;
1182         }
1183
1184         ret = clk_prepare_enable(pdata->ptpclk);
1185         if (ret) {
1186                 netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
1187                 goto err_sysclk;
1188         }
1189
1190         /* Calculate the Rx buffer size before allocating rings */
1191         ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1192         if (ret < 0)
1193                 goto err_ptpclk;
1194         pdata->rx_buf_size = ret;
1195
1196         /* Allocate the channel and ring structures */
1197         ret = xgbe_alloc_channels(pdata);
1198         if (ret)
1199                 goto err_ptpclk;
1200
1201         /* Allocate the ring descriptors and buffers */
1202         ret = desc_if->alloc_ring_resources(pdata);
1203         if (ret)
1204                 goto err_channels;
1205
1206         /* Initialize the device restart and Tx timestamp work struct */
1207         INIT_WORK(&pdata->restart_work, xgbe_restart);
1208         INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1209
1210         /* Request interrupts */
1211         ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
1212                                netdev->name, pdata);
1213         if (ret) {
1214                 netdev_alert(netdev, "error requesting irq %d\n",
1215                              pdata->irq_number);
1216                 goto err_rings;
1217         }
1218         pdata->irq_number = netdev->irq;
1219
1220         ret = xgbe_start(pdata);
1221         if (ret)
1222                 goto err_start;
1223
1224         DBGPR("<--xgbe_open\n");
1225
1226         return 0;
1227
1228 err_start:
1229         hw_if->exit(pdata);
1230
1231         devm_free_irq(pdata->dev, pdata->irq_number, pdata);
1232         pdata->irq_number = 0;
1233
1234 err_rings:
1235         desc_if->free_ring_resources(pdata);
1236
1237 err_channels:
1238         xgbe_free_channels(pdata);
1239
1240 err_ptpclk:
1241         clk_disable_unprepare(pdata->ptpclk);
1242
1243 err_sysclk:
1244         clk_disable_unprepare(pdata->sysclk);
1245
1246 err_phy_init:
1247         xgbe_phy_exit(pdata);
1248
1249         return ret;
1250 }
1251
1252 static int xgbe_close(struct net_device *netdev)
1253 {
1254         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1255         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1256         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1257
1258         DBGPR("-->xgbe_close\n");
1259
1260         /* Stop the device */
1261         xgbe_stop(pdata);
1262
1263         /* Issue software reset to device */
1264         hw_if->exit(pdata);
1265
1266         /* Free the ring descriptors and buffers */
1267         desc_if->free_ring_resources(pdata);
1268
1269         /* Free the channel and ring structures */
1270         xgbe_free_channels(pdata);
1271
1272         /* Release the interrupt */
1273         if (pdata->irq_number != 0) {
1274                 devm_free_irq(pdata->dev, pdata->irq_number, pdata);
1275                 pdata->irq_number = 0;
1276         }
1277
1278         /* Disable the clocks */
1279         clk_disable_unprepare(pdata->ptpclk);
1280         clk_disable_unprepare(pdata->sysclk);
1281
1282         /* Release the phy */
1283         xgbe_phy_exit(pdata);
1284
1285         DBGPR("<--xgbe_close\n");
1286
1287         return 0;
1288 }
1289
1290 static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
1291 {
1292         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1293         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1294         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1295         struct xgbe_channel *channel;
1296         struct xgbe_ring *ring;
1297         struct xgbe_packet_data *packet;
1298         unsigned long flags;
1299         int ret;
1300
1301         DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
1302
1303         channel = pdata->channel + skb->queue_mapping;
1304         ring = channel->tx_ring;
1305         packet = &ring->packet_data;
1306
1307         ret = NETDEV_TX_OK;
1308
1309         spin_lock_irqsave(&ring->lock, flags);
1310
1311         if (skb->len == 0) {
1312                 netdev_err(netdev, "empty skb received from stack\n");
1313                 dev_kfree_skb_any(skb);
1314                 goto tx_netdev_return;
1315         }
1316
1317         /* Calculate preliminary packet info */
1318         memset(packet, 0, sizeof(*packet));
1319         xgbe_packet_info(pdata, ring, skb, packet);
1320
1321         /* Check that there are enough descriptors available */
1322         if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
1323                 DBGPR("  Tx queue stopped, not enough descriptors available\n");
1324                 netif_stop_subqueue(netdev, channel->queue_index);
1325                 ring->tx.queue_stopped = 1;
1326                 ret = NETDEV_TX_BUSY;
1327                 goto tx_netdev_return;
1328         }
1329
1330         ret = xgbe_prep_tso(skb, packet);
1331         if (ret) {
1332                 netdev_err(netdev, "error processing TSO packet\n");
1333                 dev_kfree_skb_any(skb);
1334                 goto tx_netdev_return;
1335         }
1336         xgbe_prep_vlan(skb, packet);
1337
1338         if (!desc_if->map_tx_skb(channel, skb)) {
1339                 dev_kfree_skb_any(skb);
1340                 goto tx_netdev_return;
1341         }
1342
1343         xgbe_prep_tx_tstamp(pdata, skb, packet);
1344
1345         /* Configure required descriptor fields for transmission */
1346         hw_if->dev_xmit(channel);
1347
1348 #ifdef XGMAC_ENABLE_TX_PKT_DUMP
1349         xgbe_print_pkt(netdev, skb, true);
1350 #endif
1351
1352 tx_netdev_return:
1353         spin_unlock_irqrestore(&ring->lock, flags);
1354
1355         DBGPR("<--xgbe_xmit\n");
1356
1357         return ret;
1358 }
1359
1360 static void xgbe_set_rx_mode(struct net_device *netdev)
1361 {
1362         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1363         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1364         unsigned int pr_mode, am_mode;
1365
1366         DBGPR("-->xgbe_set_rx_mode\n");
1367
1368         pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1369         am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1370
1371         hw_if->set_promiscuous_mode(pdata, pr_mode);
1372         hw_if->set_all_multicast_mode(pdata, am_mode);
1373
1374         hw_if->add_mac_addresses(pdata);
1375
1376         DBGPR("<--xgbe_set_rx_mode\n");
1377 }
1378
1379 static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
1380 {
1381         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1382         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1383         struct sockaddr *saddr = addr;
1384
1385         DBGPR("-->xgbe_set_mac_address\n");
1386
1387         if (!is_valid_ether_addr(saddr->sa_data))
1388                 return -EADDRNOTAVAIL;
1389
1390         memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
1391
1392         hw_if->set_mac_address(pdata, netdev->dev_addr);
1393
1394         DBGPR("<--xgbe_set_mac_address\n");
1395
1396         return 0;
1397 }
1398
1399 static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
1400 {
1401         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1402         int ret;
1403
1404         switch (cmd) {
1405         case SIOCGHWTSTAMP:
1406                 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
1407                 break;
1408
1409         case SIOCSHWTSTAMP:
1410                 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
1411                 break;
1412
1413         default:
1414                 ret = -EOPNOTSUPP;
1415         }
1416
1417         return ret;
1418 }
1419
1420 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
1421 {
1422         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1423         int ret;
1424
1425         DBGPR("-->xgbe_change_mtu\n");
1426
1427         ret = xgbe_calc_rx_buf_size(netdev, mtu);
1428         if (ret < 0)
1429                 return ret;
1430
1431         pdata->rx_buf_size = ret;
1432         netdev->mtu = mtu;
1433
1434         xgbe_restart_dev(pdata, 0);
1435
1436         DBGPR("<--xgbe_change_mtu\n");
1437
1438         return 0;
1439 }
1440
1441 static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
1442                                                   struct rtnl_link_stats64 *s)
1443 {
1444         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1445         struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
1446
1447         DBGPR("-->%s\n", __func__);
1448
1449         pdata->hw_if.read_mmc_stats(pdata);
1450
1451         s->rx_packets = pstats->rxframecount_gb;
1452         s->rx_bytes = pstats->rxoctetcount_gb;
1453         s->rx_errors = pstats->rxframecount_gb -
1454                        pstats->rxbroadcastframes_g -
1455                        pstats->rxmulticastframes_g -
1456                        pstats->rxunicastframes_g;
1457         s->multicast = pstats->rxmulticastframes_g;
1458         s->rx_length_errors = pstats->rxlengtherror;
1459         s->rx_crc_errors = pstats->rxcrcerror;
1460         s->rx_fifo_errors = pstats->rxfifooverflow;
1461
1462         s->tx_packets = pstats->txframecount_gb;
1463         s->tx_bytes = pstats->txoctetcount_gb;
1464         s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
1465         s->tx_dropped = netdev->stats.tx_dropped;
1466
1467         DBGPR("<--%s\n", __func__);
1468
1469         return s;
1470 }
1471
1472 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1473                                 u16 vid)
1474 {
1475         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1476         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1477
1478         DBGPR("-->%s\n", __func__);
1479
1480         set_bit(vid, pdata->active_vlans);
1481         hw_if->update_vlan_hash_table(pdata);
1482
1483         DBGPR("<--%s\n", __func__);
1484
1485         return 0;
1486 }
1487
1488 static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1489                                  u16 vid)
1490 {
1491         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1492         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1493
1494         DBGPR("-->%s\n", __func__);
1495
1496         clear_bit(vid, pdata->active_vlans);
1497         hw_if->update_vlan_hash_table(pdata);
1498
1499         DBGPR("<--%s\n", __func__);
1500
1501         return 0;
1502 }
1503
1504 #ifdef CONFIG_NET_POLL_CONTROLLER
1505 static void xgbe_poll_controller(struct net_device *netdev)
1506 {
1507         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1508
1509         DBGPR("-->xgbe_poll_controller\n");
1510
1511         disable_irq(pdata->irq_number);
1512
1513         xgbe_isr(pdata->irq_number, pdata);
1514
1515         enable_irq(pdata->irq_number);
1516
1517         DBGPR("<--xgbe_poll_controller\n");
1518 }
1519 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1520
1521 static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
1522 {
1523         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1524         unsigned int offset, queue;
1525         u8 i;
1526
1527         if (tc && (tc != pdata->hw_feat.tc_cnt))
1528                 return -EINVAL;
1529
1530         if (tc) {
1531                 netdev_set_num_tc(netdev, tc);
1532                 for (i = 0, queue = 0, offset = 0; i < tc; i++) {
1533                         while ((queue < pdata->tx_q_count) &&
1534                                (pdata->q2tc_map[queue] == i))
1535                                 queue++;
1536
1537                         DBGPR("  TC%u using TXq%u-%u\n", i, offset, queue - 1);
1538                         netdev_set_tc_queue(netdev, i, queue - offset, offset);
1539                         offset = queue;
1540                 }
1541         } else {
1542                 netdev_reset_tc(netdev);
1543         }
1544
1545         return 0;
1546 }
1547
1548 static int xgbe_set_features(struct net_device *netdev,
1549                              netdev_features_t features)
1550 {
1551         struct xgbe_prv_data *pdata = netdev_priv(netdev);
1552         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1553         netdev_features_t rxcsum, rxvlan, rxvlan_filter;
1554
1555         rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1556         rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
1557         rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
1558
1559         if ((features & NETIF_F_RXCSUM) && !rxcsum)
1560                 hw_if->enable_rx_csum(pdata);
1561         else if (!(features & NETIF_F_RXCSUM) && rxcsum)
1562                 hw_if->disable_rx_csum(pdata);
1563
1564         if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
1565                 hw_if->enable_rx_vlan_stripping(pdata);
1566         else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
1567                 hw_if->disable_rx_vlan_stripping(pdata);
1568
1569         if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
1570                 hw_if->enable_rx_vlan_filtering(pdata);
1571         else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
1572                 hw_if->disable_rx_vlan_filtering(pdata);
1573
1574         pdata->netdev_features = features;
1575
1576         DBGPR("<--xgbe_set_features\n");
1577
1578         return 0;
1579 }
1580
1581 static const struct net_device_ops xgbe_netdev_ops = {
1582         .ndo_open               = xgbe_open,
1583         .ndo_stop               = xgbe_close,
1584         .ndo_start_xmit         = xgbe_xmit,
1585         .ndo_set_rx_mode        = xgbe_set_rx_mode,
1586         .ndo_set_mac_address    = xgbe_set_mac_address,
1587         .ndo_validate_addr      = eth_validate_addr,
1588         .ndo_do_ioctl           = xgbe_ioctl,
1589         .ndo_change_mtu         = xgbe_change_mtu,
1590         .ndo_get_stats64        = xgbe_get_stats64,
1591         .ndo_vlan_rx_add_vid    = xgbe_vlan_rx_add_vid,
1592         .ndo_vlan_rx_kill_vid   = xgbe_vlan_rx_kill_vid,
1593 #ifdef CONFIG_NET_POLL_CONTROLLER
1594         .ndo_poll_controller    = xgbe_poll_controller,
1595 #endif
1596         .ndo_setup_tc           = xgbe_setup_tc,
1597         .ndo_set_features       = xgbe_set_features,
1598 };
1599
1600 struct net_device_ops *xgbe_get_netdev_ops(void)
1601 {
1602         return (struct net_device_ops *)&xgbe_netdev_ops;
1603 }
1604
1605 static void xgbe_rx_refresh(struct xgbe_channel *channel)
1606 {
1607         struct xgbe_prv_data *pdata = channel->pdata;
1608         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1609         struct xgbe_ring *ring = channel->rx_ring;
1610         struct xgbe_ring_data *rdata;
1611
1612         desc_if->realloc_rx_buffer(channel);
1613
1614         /* Update the Rx Tail Pointer Register with address of
1615          * the last cleaned entry */
1616         rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
1617         XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1618                           lower_32_bits(rdata->rdesc_dma));
1619 }
1620
1621 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
1622                                        struct xgbe_ring_data *rdata,
1623                                        unsigned int *len)
1624 {
1625         struct net_device *netdev = pdata->netdev;
1626         struct sk_buff *skb;
1627         u8 *packet;
1628         unsigned int copy_len;
1629
1630         skb = netdev_alloc_skb_ip_align(netdev, rdata->rx_hdr.dma_len);
1631         if (!skb)
1632                 return NULL;
1633
1634         packet = page_address(rdata->rx_hdr.pa.pages) +
1635                  rdata->rx_hdr.pa.pages_offset;
1636         copy_len = (rdata->hdr_len) ? rdata->hdr_len : *len;
1637         copy_len = min(rdata->rx_hdr.dma_len, copy_len);
1638         skb_copy_to_linear_data(skb, packet, copy_len);
1639         skb_put(skb, copy_len);
1640
1641         *len -= copy_len;
1642
1643         return skb;
1644 }
1645
1646 static int xgbe_tx_poll(struct xgbe_channel *channel)
1647 {
1648         struct xgbe_prv_data *pdata = channel->pdata;
1649         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1650         struct xgbe_desc_if *desc_if = &pdata->desc_if;
1651         struct xgbe_ring *ring = channel->tx_ring;
1652         struct xgbe_ring_data *rdata;
1653         struct xgbe_ring_desc *rdesc;
1654         struct net_device *netdev = pdata->netdev;
1655         unsigned long flags;
1656         int processed = 0;
1657
1658         DBGPR("-->xgbe_tx_poll\n");
1659
1660         /* Nothing to do if there isn't a Tx ring for this channel */
1661         if (!ring)
1662                 return 0;
1663
1664         spin_lock_irqsave(&ring->lock, flags);
1665
1666         while ((processed < XGBE_TX_DESC_MAX_PROC) &&
1667                (ring->dirty < ring->cur)) {
1668                 rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
1669                 rdesc = rdata->rdesc;
1670
1671                 if (!hw_if->tx_complete(rdesc))
1672                         break;
1673
1674 #ifdef XGMAC_ENABLE_TX_DESC_DUMP
1675                 xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
1676 #endif
1677
1678                 /* Free the SKB and reset the descriptor for re-use */
1679                 desc_if->unmap_rdata(pdata, rdata);
1680                 hw_if->tx_desc_reset(rdata);
1681
1682                 processed++;
1683                 ring->dirty++;
1684         }
1685
1686         if ((ring->tx.queue_stopped == 1) &&
1687             (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
1688                 ring->tx.queue_stopped = 0;
1689                 netif_wake_subqueue(netdev, channel->queue_index);
1690         }
1691
1692         DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
1693
1694         spin_unlock_irqrestore(&ring->lock, flags);
1695
1696         return processed;
1697 }
1698
1699 static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1700 {
1701         struct xgbe_prv_data *pdata = channel->pdata;
1702         struct xgbe_hw_if *hw_if = &pdata->hw_if;
1703         struct xgbe_ring *ring = channel->rx_ring;
1704         struct xgbe_ring_data *rdata;
1705         struct xgbe_packet_data *packet;
1706         struct net_device *netdev = pdata->netdev;
1707         struct sk_buff *skb;
1708         struct skb_shared_hwtstamps *hwtstamps;
1709         unsigned int incomplete, error, context_next, context;
1710         unsigned int len, put_len, max_len;
1711         unsigned int received = 0;
1712         int packet_count = 0;
1713
1714         DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1715
1716         /* Nothing to do if there isn't a Rx ring for this channel */
1717         if (!ring)
1718                 return 0;
1719
1720         rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1721         packet = &ring->packet_data;
1722         while (packet_count < budget) {
1723                 DBGPR("  cur = %d\n", ring->cur);
1724
1725                 /* First time in loop see if we need to restore state */
1726                 if (!received && rdata->state_saved) {
1727                         incomplete = rdata->state.incomplete;
1728                         context_next = rdata->state.context_next;
1729                         skb = rdata->state.skb;
1730                         error = rdata->state.error;
1731                         len = rdata->state.len;
1732                 } else {
1733                         memset(packet, 0, sizeof(*packet));
1734                         incomplete = 0;
1735                         context_next = 0;
1736                         skb = NULL;
1737                         error = 0;
1738                         len = 0;
1739                 }
1740
1741 read_again:
1742                 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1743
1744                 if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
1745                         xgbe_rx_refresh(channel);
1746
1747                 if (hw_if->dev_read(channel))
1748                         break;
1749
1750                 received++;
1751                 ring->cur++;
1752                 ring->dirty++;
1753
1754                 incomplete = XGMAC_GET_BITS(packet->attributes,
1755                                             RX_PACKET_ATTRIBUTES,
1756                                             INCOMPLETE);
1757                 context_next = XGMAC_GET_BITS(packet->attributes,
1758                                               RX_PACKET_ATTRIBUTES,
1759                                               CONTEXT_NEXT);
1760                 context = XGMAC_GET_BITS(packet->attributes,
1761                                          RX_PACKET_ATTRIBUTES,
1762                                          CONTEXT);
1763
1764                 /* Earlier error, just drain the remaining data */
1765                 if ((incomplete || context_next) && error)
1766                         goto read_again;
1767
1768                 if (error || packet->errors) {
1769                         if (packet->errors)
1770                                 DBGPR("Error in received packet\n");
1771                         dev_kfree_skb(skb);
1772                         goto next_packet;
1773                 }
1774
1775                 if (!context) {
1776                         put_len = rdata->len - len;
1777                         len += put_len;
1778
1779                         if (!skb) {
1780                                 dma_sync_single_for_cpu(pdata->dev,
1781                                                         rdata->rx_hdr.dma,
1782                                                         rdata->rx_hdr.dma_len,
1783                                                         DMA_FROM_DEVICE);
1784
1785                                 skb = xgbe_create_skb(pdata, rdata, &put_len);
1786                                 if (!skb) {
1787                                         error = 1;
1788                                         goto read_again;
1789                                 }
1790                         }
1791
1792                         if (put_len) {
1793                                 dma_sync_single_for_cpu(pdata->dev,
1794                                                         rdata->rx_buf.dma,
1795                                                         rdata->rx_buf.dma_len,
1796                                                         DMA_FROM_DEVICE);
1797
1798                                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1799                                                 rdata->rx_buf.pa.pages,
1800                                                 rdata->rx_buf.pa.pages_offset,
1801                                                 put_len, rdata->rx_buf.dma_len);
1802                                 rdata->rx_buf.pa.pages = NULL;
1803                         }
1804                 }
1805
1806                 if (incomplete || context_next)
1807                         goto read_again;
1808
1809                 /* Stray Context Descriptor? */
1810                 if (!skb)
1811                         goto next_packet;
1812
1813                 /* Be sure we don't exceed the configured MTU */
1814                 max_len = netdev->mtu + ETH_HLEN;
1815                 if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1816                     (skb->protocol == htons(ETH_P_8021Q)))
1817                         max_len += VLAN_HLEN;
1818
1819                 if (skb->len > max_len) {
1820                         DBGPR("packet length exceeds configured MTU\n");
1821                         dev_kfree_skb(skb);
1822                         goto next_packet;
1823                 }
1824
1825 #ifdef XGMAC_ENABLE_RX_PKT_DUMP
1826                 xgbe_print_pkt(netdev, skb, false);
1827 #endif
1828
1829                 skb_checksum_none_assert(skb);
1830                 if (XGMAC_GET_BITS(packet->attributes,
1831                                    RX_PACKET_ATTRIBUTES, CSUM_DONE))
1832                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1833
1834                 if (XGMAC_GET_BITS(packet->attributes,
1835                                    RX_PACKET_ATTRIBUTES, VLAN_CTAG))
1836                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1837                                                packet->vlan_ctag);
1838
1839                 if (XGMAC_GET_BITS(packet->attributes,
1840                                    RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
1841                         u64 nsec;
1842
1843                         nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1844                                                     packet->rx_tstamp);
1845                         hwtstamps = skb_hwtstamps(skb);
1846                         hwtstamps->hwtstamp = ns_to_ktime(nsec);
1847                 }
1848
1849                 skb->dev = netdev;
1850                 skb->protocol = eth_type_trans(skb, netdev);
1851                 skb_record_rx_queue(skb, channel->queue_index);
1852                 skb_mark_napi_id(skb, &pdata->napi);
1853
1854                 netdev->last_rx = jiffies;
1855                 napi_gro_receive(&pdata->napi, skb);
1856
1857 next_packet:
1858                 packet_count++;
1859         }
1860
1861         /* Check if we need to save state before leaving */
1862         if (received && (incomplete || context_next)) {
1863                 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1864                 rdata->state_saved = 1;
1865                 rdata->state.incomplete = incomplete;
1866                 rdata->state.context_next = context_next;
1867                 rdata->state.skb = skb;
1868                 rdata->state.len = len;
1869                 rdata->state.error = error;
1870         }
1871
1872         DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
1873
1874         return packet_count;
1875 }
1876
1877 static int xgbe_poll(struct napi_struct *napi, int budget)
1878 {
1879         struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
1880                                                    napi);
1881         struct xgbe_channel *channel;
1882         int ring_budget;
1883         int processed, last_processed;
1884         unsigned int i;
1885
1886         DBGPR("-->xgbe_poll: budget=%d\n", budget);
1887
1888         processed = 0;
1889         ring_budget = budget / pdata->rx_ring_count;
1890         do {
1891                 last_processed = processed;
1892
1893                 channel = pdata->channel;
1894                 for (i = 0; i < pdata->channel_count; i++, channel++) {
1895                         /* Cleanup Tx ring first */
1896                         xgbe_tx_poll(channel);
1897
1898                         /* Process Rx ring next */
1899                         if (ring_budget > (budget - processed))
1900                                 ring_budget = budget - processed;
1901                         processed += xgbe_rx_poll(channel, ring_budget);
1902                 }
1903         } while ((processed < budget) && (processed != last_processed));
1904
1905         /* If we processed everything, we are done */
1906         if (processed < budget) {
1907                 /* Turn off polling */
1908                 napi_complete(napi);
1909
1910                 /* Enable Tx and Rx interrupts */
1911                 xgbe_enable_rx_tx_ints(pdata);
1912         }
1913
1914         DBGPR("<--xgbe_poll: received = %d\n", processed);
1915
1916         return processed;
1917 }
1918
1919 void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
1920                        unsigned int count, unsigned int flag)
1921 {
1922         struct xgbe_ring_data *rdata;
1923         struct xgbe_ring_desc *rdesc;
1924
1925         while (count--) {
1926                 rdata = XGBE_GET_DESC_DATA(ring, idx);
1927                 rdesc = rdata->rdesc;
1928                 pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
1929                          (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
1930                          le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
1931                          le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
1932                 idx++;
1933         }
1934 }
1935
1936 void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
1937                        unsigned int idx)
1938 {
1939         pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
1940                  le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
1941                  le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
1942 }
1943
1944 void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
1945 {
1946         struct ethhdr *eth = (struct ethhdr *)skb->data;
1947         unsigned char *buf = skb->data;
1948         unsigned char buffer[128];
1949         unsigned int i, j;
1950
1951         netdev_alert(netdev, "\n************** SKB dump ****************\n");
1952
1953         netdev_alert(netdev, "%s packet of %d bytes\n",
1954                      (tx_rx ? "TX" : "RX"), skb->len);
1955
1956         netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
1957         netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
1958         netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
1959
1960         for (i = 0, j = 0; i < skb->len;) {
1961                 j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
1962                               buf[i++]);
1963
1964                 if ((i % 32) == 0) {
1965                         netdev_alert(netdev, "  0x%04x: %s\n", i - 32, buffer);
1966                         j = 0;
1967                 } else if ((i % 16) == 0) {
1968                         buffer[j++] = ' ';
1969                         buffer[j++] = ' ';
1970                 } else if ((i % 4) == 0) {
1971                         buffer[j++] = ' ';
1972                 }
1973         }
1974         if (i % 32)
1975                 netdev_alert(netdev, "  0x%04x: %s\n", i - (i % 32), buffer);
1976
1977         netdev_alert(netdev, "\n************** SKB dump ****************\n");
1978 }