2 * drivers/net/ethernet/freescale/gianfar_ethtool.c
4 * Gianfar Ethernet Driver
5 * Ethtool support for Gianfar Enet
6 * Based on e1000 ethtool support
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
14 * This software may be used and distributed according to
15 * the terms of the GNU Public License, Version 2, incorporated herein
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/string.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/net_tstamp.h>
29 #include <linux/skbuff.h>
30 #include <linux/spinlock.h>
35 #include <asm/uaccess.h>
36 #include <linux/module.h>
37 #include <linux/crc32.h>
38 #include <asm/types.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/phy.h>
42 #include <linux/sort.h>
43 #include <linux/if_vlan.h>
47 #define GFAR_MAX_COAL_USECS 0xffff
48 #define GFAR_MAX_COAL_FRAMES 0xff
49 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
51 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
52 static int gfar_gcoalesce(struct net_device *dev,
53 struct ethtool_coalesce *cvals);
54 static int gfar_scoalesce(struct net_device *dev,
55 struct ethtool_coalesce *cvals);
56 static void gfar_gringparam(struct net_device *dev,
57 struct ethtool_ringparam *rvals);
58 static int gfar_sringparam(struct net_device *dev,
59 struct ethtool_ringparam *rvals);
60 static void gfar_gdrvinfo(struct net_device *dev,
61 struct ethtool_drvinfo *drvinfo);
63 static const char stat_gstrings[][ETH_GSTRING_LEN] = {
64 "rx-large-frame-errors",
65 "rx-short-frame-errors",
66 "rx-non-octet-errors",
71 "rx-truncated-frames",
75 "rx-skb-missing-errors",
78 "tx-rx-65-127-frames",
79 "tx-rx-128-255-frames",
80 "tx-rx-256-511-frames",
81 "tx-rx-512-1023-frames",
82 "tx-rx-1024-1518-frames",
83 "tx-rx-1519-1522-good-vlan",
87 "receive-multicast-packet",
88 "receive-broadcast-packet",
89 "rx-control-frame-packets",
90 "rx-pause-frame-packets",
93 "rx-frame-length-error",
95 "rx-carrier-sense-error",
96 "rx-undersize-packets",
97 "rx-oversize-packets",
98 "rx-fragmented-frames",
103 "tx-multicast-packets",
104 "tx-broadcast-packets",
105 "tx-pause-control-frames",
106 "tx-deferral-packets",
107 "tx-excessive-deferral-packets",
108 "tx-single-collision-packets",
109 "tx-multiple-collision-packets",
110 "tx-late-collision-packets",
111 "tx-excessive-collision-packets",
112 "tx-total-collision",
118 "tx-oversize-frames",
119 "tx-undersize-frames",
120 "tx-fragmented-frames",
123 /* Fill in a buffer with the strings which correspond to the
125 static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
127 struct gfar_private *priv = netdev_priv(dev);
129 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
130 memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
132 memcpy(buf, stat_gstrings,
133 GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
136 /* Fill in an array of 64-bit statistics from various sources.
137 * This array will be appended to the end of the ethtool_stats
138 * structure, and returned to user space
140 static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
144 struct gfar_private *priv = netdev_priv(dev);
145 struct gfar __iomem *regs = priv->gfargrp[0].regs;
146 atomic64_t *extra = (atomic64_t *)&priv->extra_stats;
148 for (i = 0; i < GFAR_EXTRA_STATS_LEN; i++)
149 buf[i] = atomic64_read(&extra[i]);
151 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
152 u32 __iomem *rmon = (u32 __iomem *) ®s->rmon;
154 for (; i < GFAR_STATS_LEN; i++, rmon++)
155 buf[i] = (u64) gfar_read(rmon);
159 static int gfar_sset_count(struct net_device *dev, int sset)
161 struct gfar_private *priv = netdev_priv(dev);
165 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON)
166 return GFAR_STATS_LEN;
168 return GFAR_EXTRA_STATS_LEN;
174 /* Fills in the drvinfo structure with some basic info */
175 static void gfar_gdrvinfo(struct net_device *dev,
176 struct ethtool_drvinfo *drvinfo)
178 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
179 strlcpy(drvinfo->version, gfar_driver_version,
180 sizeof(drvinfo->version));
181 strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
182 strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
183 drvinfo->regdump_len = 0;
184 drvinfo->eedump_len = 0;
188 static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
190 struct gfar_private *priv = netdev_priv(dev);
191 struct phy_device *phydev = priv->phydev;
196 return phy_ethtool_sset(phydev, cmd);
200 /* Return the current settings in the ethtool_cmd structure */
201 static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
203 struct gfar_private *priv = netdev_priv(dev);
204 struct phy_device *phydev = priv->phydev;
205 struct gfar_priv_rx_q *rx_queue = NULL;
206 struct gfar_priv_tx_q *tx_queue = NULL;
210 tx_queue = priv->tx_queue[0];
211 rx_queue = priv->rx_queue[0];
213 /* etsec-1.7 and older versions have only one txic
214 * and rxic regs although they support multiple queues */
215 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
216 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
218 return phy_ethtool_gset(phydev, cmd);
221 /* Return the length of the register structure */
222 static int gfar_reglen(struct net_device *dev)
224 return sizeof (struct gfar);
227 /* Return a dump of the GFAR register space */
228 static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
232 struct gfar_private *priv = netdev_priv(dev);
233 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
234 u32 *buf = (u32 *) regbuf;
236 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
237 buf[i] = gfar_read(&theregs[i]);
240 /* Convert microseconds to ethernet clock ticks, which changes
241 * depending on what speed the controller is running at */
242 static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
247 /* The timer is different, depending on the interface speed */
248 switch (priv->phydev->speed) {
250 count = GFAR_GBIT_TIME;
253 count = GFAR_100_TIME;
257 count = GFAR_10_TIME;
261 /* Make sure we return a number greater than 0
263 return (usecs * 1000 + count - 1) / count;
266 /* Convert ethernet clock ticks to microseconds */
267 static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
272 /* The timer is different, depending on the interface speed */
273 switch (priv->phydev->speed) {
275 count = GFAR_GBIT_TIME;
278 count = GFAR_100_TIME;
282 count = GFAR_10_TIME;
286 /* Make sure we return a number greater than 0 */
287 /* if ticks is > 0 */
288 return (ticks * count) / 1000;
291 /* Get the coalescing parameters, and put them in the cvals
293 static int gfar_gcoalesce(struct net_device *dev,
294 struct ethtool_coalesce *cvals)
296 struct gfar_private *priv = netdev_priv(dev);
297 struct gfar_priv_rx_q *rx_queue = NULL;
298 struct gfar_priv_tx_q *tx_queue = NULL;
299 unsigned long rxtime;
300 unsigned long rxcount;
301 unsigned long txtime;
302 unsigned long txcount;
304 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
307 if (NULL == priv->phydev)
310 rx_queue = priv->rx_queue[0];
311 tx_queue = priv->tx_queue[0];
313 rxtime = get_ictt_value(rx_queue->rxic);
314 rxcount = get_icft_value(rx_queue->rxic);
315 txtime = get_ictt_value(tx_queue->txic);
316 txcount = get_icft_value(tx_queue->txic);
317 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
318 cvals->rx_max_coalesced_frames = rxcount;
320 cvals->tx_coalesce_usecs = gfar_ticks2usecs(priv, txtime);
321 cvals->tx_max_coalesced_frames = txcount;
323 cvals->use_adaptive_rx_coalesce = 0;
324 cvals->use_adaptive_tx_coalesce = 0;
326 cvals->pkt_rate_low = 0;
327 cvals->rx_coalesce_usecs_low = 0;
328 cvals->rx_max_coalesced_frames_low = 0;
329 cvals->tx_coalesce_usecs_low = 0;
330 cvals->tx_max_coalesced_frames_low = 0;
332 /* When the packet rate is below pkt_rate_high but above
333 * pkt_rate_low (both measured in packets per second) the
334 * normal {rx,tx}_* coalescing parameters are used.
337 /* When the packet rate is (measured in packets per second)
338 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
341 cvals->pkt_rate_high = 0;
342 cvals->rx_coalesce_usecs_high = 0;
343 cvals->rx_max_coalesced_frames_high = 0;
344 cvals->tx_coalesce_usecs_high = 0;
345 cvals->tx_max_coalesced_frames_high = 0;
347 /* How often to do adaptive coalescing packet rate sampling,
348 * measured in seconds. Must not be zero.
350 cvals->rate_sample_interval = 0;
355 /* Change the coalescing values.
356 * Both cvals->*_usecs and cvals->*_frames have to be > 0
357 * in order for coalescing to be active
359 static int gfar_scoalesce(struct net_device *dev,
360 struct ethtool_coalesce *cvals)
362 struct gfar_private *priv = netdev_priv(dev);
365 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
368 /* Set up rx coalescing */
369 /* As of now, we will enable/disable coalescing for all
370 * queues together in case of eTSEC2, this will be modified
371 * along with the ethtool interface
373 if ((cvals->rx_coalesce_usecs == 0) ||
374 (cvals->rx_max_coalesced_frames == 0)) {
375 for (i = 0; i < priv->num_rx_queues; i++)
376 priv->rx_queue[i]->rxcoalescing = 0;
378 for (i = 0; i < priv->num_rx_queues; i++)
379 priv->rx_queue[i]->rxcoalescing = 1;
382 if (NULL == priv->phydev)
385 /* Check the bounds of the values */
386 if (cvals->rx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
387 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
388 GFAR_MAX_COAL_USECS);
392 if (cvals->rx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
393 netdev_info(dev, "Coalescing is limited to %d frames\n",
394 GFAR_MAX_COAL_FRAMES);
398 for (i = 0; i < priv->num_rx_queues; i++) {
399 priv->rx_queue[i]->rxic = mk_ic_value(
400 cvals->rx_max_coalesced_frames,
401 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
404 /* Set up tx coalescing */
405 if ((cvals->tx_coalesce_usecs == 0) ||
406 (cvals->tx_max_coalesced_frames == 0)) {
407 for (i = 0; i < priv->num_tx_queues; i++)
408 priv->tx_queue[i]->txcoalescing = 0;
410 for (i = 0; i < priv->num_tx_queues; i++)
411 priv->tx_queue[i]->txcoalescing = 1;
414 /* Check the bounds of the values */
415 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
416 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
417 GFAR_MAX_COAL_USECS);
421 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
422 netdev_info(dev, "Coalescing is limited to %d frames\n",
423 GFAR_MAX_COAL_FRAMES);
427 for (i = 0; i < priv->num_tx_queues; i++) {
428 priv->tx_queue[i]->txic = mk_ic_value(
429 cvals->tx_max_coalesced_frames,
430 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
433 gfar_configure_coalescing_all(priv);
438 /* Fills in rvals with the current ring parameters. Currently,
439 * rx, rx_mini, and rx_jumbo rings are the same size, as mini and
440 * jumbo are ignored by the driver */
441 static void gfar_gringparam(struct net_device *dev,
442 struct ethtool_ringparam *rvals)
444 struct gfar_private *priv = netdev_priv(dev);
445 struct gfar_priv_tx_q *tx_queue = NULL;
446 struct gfar_priv_rx_q *rx_queue = NULL;
448 tx_queue = priv->tx_queue[0];
449 rx_queue = priv->rx_queue[0];
451 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
452 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
453 rvals->rx_jumbo_max_pending = GFAR_RX_MAX_RING_SIZE;
454 rvals->tx_max_pending = GFAR_TX_MAX_RING_SIZE;
456 /* Values changeable by the user. The valid values are
457 * in the range 1 to the "*_max_pending" counterpart above.
459 rvals->rx_pending = rx_queue->rx_ring_size;
460 rvals->rx_mini_pending = rx_queue->rx_ring_size;
461 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
462 rvals->tx_pending = tx_queue->tx_ring_size;
465 /* Change the current ring parameters, stopping the controller if
466 * necessary so that we don't mess things up while we're in motion.
468 static int gfar_sringparam(struct net_device *dev,
469 struct ethtool_ringparam *rvals)
471 struct gfar_private *priv = netdev_priv(dev);
474 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
477 if (!is_power_of_2(rvals->rx_pending)) {
478 netdev_err(dev, "Ring sizes must be a power of 2\n");
482 if (rvals->tx_pending > GFAR_TX_MAX_RING_SIZE)
485 if (!is_power_of_2(rvals->tx_pending)) {
486 netdev_err(dev, "Ring sizes must be a power of 2\n");
490 if (dev->flags & IFF_UP)
493 /* Change the sizes */
494 for (i = 0; i < priv->num_rx_queues; i++)
495 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
497 for (i = 0; i < priv->num_tx_queues; i++)
498 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
500 /* Rebuild the rings with the new size */
501 if (dev->flags & IFF_UP) {
502 err = startup_gfar(dev);
503 netif_tx_wake_all_queues(dev);
508 static void gfar_gpauseparam(struct net_device *dev,
509 struct ethtool_pauseparam *epause)
511 struct gfar_private *priv = netdev_priv(dev);
513 epause->autoneg = !!priv->pause_aneg_en;
514 epause->rx_pause = !!priv->rx_pause_en;
515 epause->tx_pause = !!priv->tx_pause_en;
518 static int gfar_spauseparam(struct net_device *dev,
519 struct ethtool_pauseparam *epause)
521 struct gfar_private *priv = netdev_priv(dev);
522 struct phy_device *phydev = priv->phydev;
523 struct gfar __iomem *regs = priv->gfargrp[0].regs;
526 if (!(phydev->supported & SUPPORTED_Pause) ||
527 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
528 (epause->rx_pause != epause->tx_pause)))
531 priv->rx_pause_en = priv->tx_pause_en = 0;
532 if (epause->rx_pause) {
533 priv->rx_pause_en = 1;
535 if (epause->tx_pause) {
536 priv->tx_pause_en = 1;
537 /* FLOW_CTRL_RX & TX */
538 newadv = ADVERTISED_Pause;
539 } else /* FLOW_CTLR_RX */
540 newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
541 } else if (epause->tx_pause) {
542 priv->tx_pause_en = 1;
544 newadv = ADVERTISED_Asym_Pause;
549 priv->pause_aneg_en = 1;
551 priv->pause_aneg_en = 0;
553 oldadv = phydev->advertising &
554 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
555 if (oldadv != newadv) {
556 phydev->advertising &=
557 ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
558 phydev->advertising |= newadv;
560 /* inform link partner of our
561 * new flow ctrl settings
563 return phy_start_aneg(phydev);
565 if (!epause->autoneg) {
567 tempval = gfar_read(®s->maccfg1);
568 tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
569 if (priv->tx_pause_en)
570 tempval |= MACCFG1_TX_FLOW;
571 if (priv->rx_pause_en)
572 tempval |= MACCFG1_RX_FLOW;
573 gfar_write(®s->maccfg1, tempval);
580 int gfar_set_features(struct net_device *dev, netdev_features_t features)
582 netdev_features_t changed = dev->features ^ features;
585 if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
586 gfar_vlan_mode(dev, features);
588 if (!(changed & NETIF_F_RXCSUM))
591 if (dev->flags & IFF_UP) {
592 /* Now we take down the rings to rebuild them */
595 dev->features = features;
597 err = startup_gfar(dev);
598 netif_tx_wake_all_queues(dev);
603 static uint32_t gfar_get_msglevel(struct net_device *dev)
605 struct gfar_private *priv = netdev_priv(dev);
607 return priv->msg_enable;
610 static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
612 struct gfar_private *priv = netdev_priv(dev);
614 priv->msg_enable = data;
618 static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
620 struct gfar_private *priv = netdev_priv(dev);
622 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) {
623 wol->supported = WAKE_MAGIC;
624 wol->wolopts = priv->wol_en ? WAKE_MAGIC : 0;
626 wol->supported = wol->wolopts = 0;
630 static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
632 struct gfar_private *priv = netdev_priv(dev);
635 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
639 if (wol->wolopts & ~WAKE_MAGIC)
642 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
644 spin_lock_irqsave(&priv->bflock, flags);
645 priv->wol_en = !!device_may_wakeup(&dev->dev);
646 spin_unlock_irqrestore(&priv->bflock, flags);
652 static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
654 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
656 if (ethflow & RXH_L2DA) {
657 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
658 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
659 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
660 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
661 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
662 priv->cur_filer_idx = priv->cur_filer_idx - 1;
664 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
665 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
666 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
667 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
668 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
669 priv->cur_filer_idx = priv->cur_filer_idx - 1;
672 if (ethflow & RXH_VLAN) {
673 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
674 RQFCR_AND | RQFCR_HASHTBL_0;
675 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
676 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
677 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
678 priv->cur_filer_idx = priv->cur_filer_idx - 1;
681 if (ethflow & RXH_IP_SRC) {
682 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
683 RQFCR_AND | RQFCR_HASHTBL_0;
684 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
685 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
686 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
687 priv->cur_filer_idx = priv->cur_filer_idx - 1;
690 if (ethflow & (RXH_IP_DST)) {
691 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
692 RQFCR_AND | RQFCR_HASHTBL_0;
693 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
694 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
695 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
696 priv->cur_filer_idx = priv->cur_filer_idx - 1;
699 if (ethflow & RXH_L3_PROTO) {
700 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
701 RQFCR_AND | RQFCR_HASHTBL_0;
702 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
703 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
704 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
705 priv->cur_filer_idx = priv->cur_filer_idx - 1;
708 if (ethflow & RXH_L4_B_0_1) {
709 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
710 RQFCR_AND | RQFCR_HASHTBL_0;
711 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
712 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
713 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
714 priv->cur_filer_idx = priv->cur_filer_idx - 1;
717 if (ethflow & RXH_L4_B_2_3) {
718 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
719 RQFCR_AND | RQFCR_HASHTBL_0;
720 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
721 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
722 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
723 priv->cur_filer_idx = priv->cur_filer_idx - 1;
727 static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
730 unsigned int last_rule_idx = priv->cur_filer_idx;
731 unsigned int cmp_rqfpr;
732 unsigned int *local_rqfpr;
733 unsigned int *local_rqfcr;
734 int i = 0x0, k = 0x0;
735 int j = MAX_FILER_IDX, l = 0x0;
738 local_rqfpr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
740 local_rqfcr = kmalloc_array(MAX_FILER_IDX + 1, sizeof(unsigned int),
742 if (!local_rqfpr || !local_rqfcr) {
749 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
752 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
755 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
758 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
761 netdev_err(priv->ndev,
762 "Right now this class is not supported\n");
767 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
768 local_rqfpr[j] = priv->ftp_rqfpr[i];
769 local_rqfcr[j] = priv->ftp_rqfcr[i];
771 if ((priv->ftp_rqfcr[i] ==
772 (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
773 (priv->ftp_rqfpr[i] == cmp_rqfpr))
777 if (i == MAX_FILER_IDX + 1) {
778 netdev_err(priv->ndev,
779 "No parse rule found, can't create hash rules\n");
784 /* If a match was found, then it begins the starting of a cluster rule
785 * if it was already programmed, we need to overwrite these rules
787 for (l = i+1; l < MAX_FILER_IDX; l++) {
788 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
789 !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
790 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
791 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
792 priv->ftp_rqfpr[l] = FPR_FILER_MASK;
793 gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
798 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
799 (priv->ftp_rqfcr[l] & RQFCR_AND))
802 local_rqfpr[j] = priv->ftp_rqfpr[l];
803 local_rqfcr[j] = priv->ftp_rqfcr[l];
808 priv->cur_filer_idx = l - 1;
812 ethflow_to_filer_rules(priv, ethflow);
814 /* Write back the popped out rules again */
815 for (k = j+1; k < MAX_FILER_IDX; k++) {
816 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
817 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
818 gfar_write_filer(priv, priv->cur_filer_idx,
819 local_rqfcr[k], local_rqfpr[k]);
820 if (!priv->cur_filer_idx)
822 priv->cur_filer_idx = priv->cur_filer_idx - 1;
831 static int gfar_set_hash_opts(struct gfar_private *priv,
832 struct ethtool_rxnfc *cmd)
834 /* write the filer rules here */
835 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
841 static int gfar_check_filer_hardware(struct gfar_private *priv)
843 struct gfar __iomem *regs = priv->gfargrp[0].regs;
846 /* Check if we are in FIFO mode */
847 i = gfar_read(®s->ecntrl);
849 if (i == ECNTRL_FIFM) {
850 netdev_notice(priv->ndev, "Interface in FIFO mode\n");
851 i = gfar_read(®s->rctrl);
852 i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
853 if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
854 netdev_info(priv->ndev,
855 "Receive Queue Filtering enabled\n");
857 netdev_warn(priv->ndev,
858 "Receive Queue Filtering disabled\n");
862 /* Or in standard mode */
864 i = gfar_read(®s->rctrl);
865 i &= RCTRL_PRSDEP_MASK;
866 if (i == RCTRL_PRSDEP_MASK) {
867 netdev_info(priv->ndev,
868 "Receive Queue Filtering enabled\n");
870 netdev_warn(priv->ndev,
871 "Receive Queue Filtering disabled\n");
876 /* Sets the properties for arbitrary filer rule
877 * to the first 4 Layer 4 Bytes
879 gfar_write(®s->rbifx, 0xC0C1C2C3);
883 static int gfar_comp_asc(const void *a, const void *b)
885 return memcmp(a, b, 4);
888 static int gfar_comp_desc(const void *a, const void *b)
890 return -memcmp(a, b, 4);
893 static void gfar_swap(void *a, void *b, int size)
904 /* Write a mask to filer cache */
905 static void gfar_set_mask(u32 mask, struct filer_table *tab)
907 tab->fe[tab->index].ctrl = RQFCR_AND | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
908 tab->fe[tab->index].prop = mask;
912 /* Sets parse bits (e.g. IP or TCP) */
913 static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
915 gfar_set_mask(mask, tab);
916 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
918 tab->fe[tab->index].prop = value;
922 static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
923 struct filer_table *tab)
925 gfar_set_mask(mask, tab);
926 tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
927 tab->fe[tab->index].prop = value;
931 /* For setting a tuple of value and mask of type flag
933 * IP-Src = 10.0.0.0/255.0.0.0
934 * value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
936 * Ethtool gives us a value=0 and mask=~0 for don't care a tuple
937 * For a don't care mask it gives us a 0
939 * The check if don't care and the mask adjustment if mask=0 is done for VLAN
940 * and MAC stuff on an upper level (due to missing information on this level).
941 * For these guys we can discard them if they are value=0 and mask=0.
943 * Further the all masks are one-padded for better hardware efficiency.
945 static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
946 struct filer_table *tab)
953 mask |= RQFCR_PID_PRI_MASK;
958 if (!~(mask | RQFCR_PID_L4P_MASK))
963 mask |= RQFCR_PID_L4P_MASK;
969 mask |= RQFCR_PID_VID_MASK;
975 if (!~(mask | RQFCR_PID_PORT_MASK))
980 mask |= RQFCR_PID_PORT_MASK;
989 mask |= RQFCR_PID_MAC_MASK;
991 /* for all real 32bit masks */
999 gfar_set_general_attribute(value, mask, flag, tab);
1002 /* Translates value and mask for UDP, TCP or SCTP */
1003 static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
1004 struct ethtool_tcpip4_spec *mask,
1005 struct filer_table *tab)
1007 gfar_set_attribute(be32_to_cpu(value->ip4src),
1008 be32_to_cpu(mask->ip4src),
1009 RQFCR_PID_SIA, tab);
1010 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1011 be32_to_cpu(mask->ip4dst),
1012 RQFCR_PID_DIA, tab);
1013 gfar_set_attribute(be16_to_cpu(value->pdst),
1014 be16_to_cpu(mask->pdst),
1015 RQFCR_PID_DPT, tab);
1016 gfar_set_attribute(be16_to_cpu(value->psrc),
1017 be16_to_cpu(mask->psrc),
1018 RQFCR_PID_SPT, tab);
1019 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1022 /* Translates value and mask for RAW-IP4 */
1023 static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
1024 struct ethtool_usrip4_spec *mask,
1025 struct filer_table *tab)
1027 gfar_set_attribute(be32_to_cpu(value->ip4src),
1028 be32_to_cpu(mask->ip4src),
1029 RQFCR_PID_SIA, tab);
1030 gfar_set_attribute(be32_to_cpu(value->ip4dst),
1031 be32_to_cpu(mask->ip4dst),
1032 RQFCR_PID_DIA, tab);
1033 gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
1034 gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
1035 gfar_set_attribute(be32_to_cpu(value->l4_4_bytes),
1036 be32_to_cpu(mask->l4_4_bytes),
1037 RQFCR_PID_ARB, tab);
1041 /* Translates value and mask for ETHER spec */
1042 static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
1043 struct filer_table *tab)
1045 u32 upper_temp_mask = 0;
1046 u32 lower_temp_mask = 0;
1048 /* Source address */
1049 if (!is_broadcast_ether_addr(mask->h_source)) {
1050 if (is_zero_ether_addr(mask->h_source)) {
1051 upper_temp_mask = 0xFFFFFFFF;
1052 lower_temp_mask = 0xFFFFFFFF;
1054 upper_temp_mask = mask->h_source[0] << 16 |
1055 mask->h_source[1] << 8 |
1057 lower_temp_mask = mask->h_source[3] << 16 |
1058 mask->h_source[4] << 8 |
1062 gfar_set_attribute(value->h_source[0] << 16 |
1063 value->h_source[1] << 8 |
1065 upper_temp_mask, RQFCR_PID_SAH, tab);
1066 /* And the same for the lower part */
1067 gfar_set_attribute(value->h_source[3] << 16 |
1068 value->h_source[4] << 8 |
1070 lower_temp_mask, RQFCR_PID_SAL, tab);
1072 /* Destination address */
1073 if (!is_broadcast_ether_addr(mask->h_dest)) {
1074 /* Special for destination is limited broadcast */
1075 if ((is_broadcast_ether_addr(value->h_dest) &&
1076 is_zero_ether_addr(mask->h_dest))) {
1077 gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
1079 if (is_zero_ether_addr(mask->h_dest)) {
1080 upper_temp_mask = 0xFFFFFFFF;
1081 lower_temp_mask = 0xFFFFFFFF;
1083 upper_temp_mask = mask->h_dest[0] << 16 |
1084 mask->h_dest[1] << 8 |
1086 lower_temp_mask = mask->h_dest[3] << 16 |
1087 mask->h_dest[4] << 8 |
1092 gfar_set_attribute(value->h_dest[0] << 16 |
1093 value->h_dest[1] << 8 |
1095 upper_temp_mask, RQFCR_PID_DAH, tab);
1096 /* And the same for the lower part */
1097 gfar_set_attribute(value->h_dest[3] << 16 |
1098 value->h_dest[4] << 8 |
1100 lower_temp_mask, RQFCR_PID_DAL, tab);
1104 gfar_set_attribute(be16_to_cpu(value->h_proto),
1105 be16_to_cpu(mask->h_proto),
1106 RQFCR_PID_ETY, tab);
1109 static inline u32 vlan_tci_vid(struct ethtool_rx_flow_spec *rule)
1111 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_VID_MASK;
1114 static inline u32 vlan_tci_vidm(struct ethtool_rx_flow_spec *rule)
1116 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_VID_MASK;
1119 static inline u32 vlan_tci_cfi(struct ethtool_rx_flow_spec *rule)
1121 return be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_CFI_MASK;
1124 static inline u32 vlan_tci_cfim(struct ethtool_rx_flow_spec *rule)
1126 return be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_CFI_MASK;
1129 static inline u32 vlan_tci_prio(struct ethtool_rx_flow_spec *rule)
1131 return (be16_to_cpu(rule->h_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1135 static inline u32 vlan_tci_priom(struct ethtool_rx_flow_spec *rule)
1137 return (be16_to_cpu(rule->m_ext.vlan_tci) & VLAN_PRIO_MASK) >>
1141 /* Convert a rule to binary filter format of gianfar */
1142 static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1143 struct filer_table *tab)
1145 u32 vlan = 0, vlan_mask = 0;
1146 u32 id = 0, id_mask = 0;
1147 u32 cfi = 0, cfi_mask = 0;
1148 u32 prio = 0, prio_mask = 0;
1149 u32 old_index = tab->index;
1151 /* Check if vlan is wanted */
1152 if ((rule->flow_type & FLOW_EXT) &&
1153 (rule->m_ext.vlan_tci != cpu_to_be16(0xFFFF))) {
1154 if (!rule->m_ext.vlan_tci)
1155 rule->m_ext.vlan_tci = cpu_to_be16(0xFFFF);
1158 vlan_mask = RQFPR_VLN;
1160 /* Separate the fields */
1161 id = vlan_tci_vid(rule);
1162 id_mask = vlan_tci_vidm(rule);
1163 cfi = vlan_tci_cfi(rule);
1164 cfi_mask = vlan_tci_cfim(rule);
1165 prio = vlan_tci_prio(rule);
1166 prio_mask = vlan_tci_priom(rule);
1168 if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
1170 vlan_mask |= RQFPR_CFI;
1171 } else if (cfi != VLAN_TAG_PRESENT &&
1172 cfi_mask == VLAN_TAG_PRESENT) {
1173 vlan_mask |= RQFPR_CFI;
1177 switch (rule->flow_type & ~FLOW_EXT) {
1179 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
1180 RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
1181 gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
1182 &rule->m_u.tcp_ip4_spec, tab);
1185 gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
1186 RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
1187 gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
1188 &rule->m_u.udp_ip4_spec, tab);
1191 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1193 gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
1194 gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
1195 (struct ethtool_tcpip4_spec *)&rule->m_u,
1199 gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
1201 gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
1202 (struct ethtool_usrip4_spec *) &rule->m_u,
1207 gfar_set_parse_bits(vlan, vlan_mask, tab);
1208 gfar_set_ether((struct ethhdr *) &rule->h_u,
1209 (struct ethhdr *) &rule->m_u, tab);
1215 /* Set the vlan attributes in the end */
1217 gfar_set_attribute(id, id_mask, RQFCR_PID_VID, tab);
1218 gfar_set_attribute(prio, prio_mask, RQFCR_PID_PRI, tab);
1221 /* If there has been nothing written till now, it must be a default */
1222 if (tab->index == old_index) {
1223 gfar_set_mask(0xFFFFFFFF, tab);
1224 tab->fe[tab->index].ctrl = 0x20;
1225 tab->fe[tab->index].prop = 0x0;
1229 /* Remove last AND */
1230 tab->fe[tab->index - 1].ctrl &= (~RQFCR_AND);
1232 /* Specify which queue to use or to drop */
1233 if (rule->ring_cookie == RX_CLS_FLOW_DISC)
1234 tab->fe[tab->index - 1].ctrl |= RQFCR_RJE;
1236 tab->fe[tab->index - 1].ctrl |= (rule->ring_cookie << 10);
1238 /* Only big enough entries can be clustered */
1239 if (tab->index > (old_index + 2)) {
1240 tab->fe[old_index + 1].ctrl |= RQFCR_CLE;
1241 tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
1244 /* In rare cases the cache can be full while there is
1247 if (tab->index > MAX_FILER_CACHE_IDX - 1)
1253 /* Copy size filer entries */
1254 static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1255 struct gfar_filer_entry src[0], s32 size)
1259 dst[size].ctrl = src[size].ctrl;
1260 dst[size].prop = src[size].prop;
1264 /* Delete the contents of the filer-table between start and end
1267 static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1271 if (end > MAX_FILER_CACHE_IDX || end < begin)
1275 length = end - begin;
1278 while (end < tab->index) {
1279 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1280 tab->fe[begin++].prop = tab->fe[end++].prop;
1283 /* Fill up with don't cares */
1284 while (begin < tab->index) {
1285 tab->fe[begin].ctrl = 0x60;
1286 tab->fe[begin].prop = 0xFFFFFFFF;
1290 tab->index -= length;
1294 /* Make space on the wanted location */
1295 static int gfar_expand_filer_entries(u32 begin, u32 length,
1296 struct filer_table *tab)
1298 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1299 begin > MAX_FILER_CACHE_IDX)
1302 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1303 tab->index - length + 1);
1305 tab->index += length;
1309 static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1311 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1313 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1314 (RQFCR_AND | RQFCR_CLE))
1320 static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1322 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1324 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1331 /* Uses hardwares clustering option to reduce
1332 * the number of filer table entries
1334 static void gfar_cluster_filer(struct filer_table *tab)
1336 s32 i = -1, j, iend, jend;
1338 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1340 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1341 /* The cluster entries self and the previous one
1342 * (a mask) must be identical!
1344 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1346 if (tab->fe[i].prop != tab->fe[j].prop)
1348 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1350 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1352 iend = gfar_get_next_cluster_end(i, tab);
1353 jend = gfar_get_next_cluster_end(j, tab);
1354 if (jend == -1 || iend == -1)
1357 /* First we make some free space, where our cluster
1358 * element should be. Then we copy it there and finally
1359 * delete in from its old location.
1361 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1365 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1366 &(tab->fe[jend + 1]), jend - j);
1368 if (gfar_trim_filer_entries(jend - 1,
1373 /* Mask out cluster bit */
1374 tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1379 /* Swaps the masked bits of a1<>a2 and b1<>b2 */
1380 static void gfar_swap_bits(struct gfar_filer_entry *a1,
1381 struct gfar_filer_entry *a2,
1382 struct gfar_filer_entry *b1,
1383 struct gfar_filer_entry *b2, u32 mask)
1386 temp[0] = a1->ctrl & mask;
1387 temp[1] = a2->ctrl & mask;
1388 temp[2] = b1->ctrl & mask;
1389 temp[3] = b2->ctrl & mask;
1396 a1->ctrl |= temp[1];
1397 a2->ctrl |= temp[0];
1398 b1->ctrl |= temp[3];
1399 b2->ctrl |= temp[2];
1402 /* Generate a list consisting of masks values with their start and
1403 * end of validity and block as indicator for parts belonging
1404 * together (glued by ANDs) in mask_table
1406 static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1407 struct filer_table *tab)
1409 u32 i, and_index = 0, block_index = 1;
1411 for (i = 0; i < tab->index; i++) {
1413 /* LSByte of control = 0 sets a mask */
1414 if (!(tab->fe[i].ctrl & 0xF)) {
1415 mask_table[and_index].mask = tab->fe[i].prop;
1416 mask_table[and_index].start = i;
1417 mask_table[and_index].block = block_index;
1419 mask_table[and_index - 1].end = i - 1;
1422 /* cluster starts and ends will be separated because they should
1423 * hold their position
1425 if (tab->fe[i].ctrl & RQFCR_CLE)
1427 /* A not set AND indicates the end of a depended block */
1428 if (!(tab->fe[i].ctrl & RQFCR_AND))
1432 mask_table[and_index - 1].end = i - 1;
1437 /* Sorts the entries of mask_table by the values of the masks.
1438 * Important: The 0xFF80 flags of the first and last entry of a
1439 * block must hold their position (which queue, CLusterEnable, ReJEct,
1442 static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1443 struct filer_table *temp_table, u32 and_index)
1445 /* Pointer to compare function (_asc or _desc) */
1446 int (*gfar_comp)(const void *, const void *);
1448 u32 i, size = 0, start = 0, prev = 1;
1449 u32 old_first, old_last, new_first, new_last;
1451 gfar_comp = &gfar_comp_desc;
1453 for (i = 0; i < and_index; i++) {
1454 if (prev != mask_table[i].block) {
1455 old_first = mask_table[start].start + 1;
1456 old_last = mask_table[i - 1].end;
1457 sort(mask_table + start, size,
1458 sizeof(struct gfar_mask_entry),
1459 gfar_comp, &gfar_swap);
1461 /* Toggle order for every block. This makes the
1462 * thing more efficient!
1464 if (gfar_comp == gfar_comp_desc)
1465 gfar_comp = &gfar_comp_asc;
1467 gfar_comp = &gfar_comp_desc;
1469 new_first = mask_table[start].start + 1;
1470 new_last = mask_table[i - 1].end;
1472 gfar_swap_bits(&temp_table->fe[new_first],
1473 &temp_table->fe[old_first],
1474 &temp_table->fe[new_last],
1475 &temp_table->fe[old_last],
1476 RQFCR_QUEUE | RQFCR_CLE |
1477 RQFCR_RJE | RQFCR_AND);
1483 prev = mask_table[i].block;
1487 /* Reduces the number of masks needed in the filer table to save entries
1488 * This is done by sorting the masks of a depended block. A depended block is
1489 * identified by gluing ANDs or CLE. The sorting order toggles after every
1490 * block. Of course entries in scope of a mask must change their location with
1493 static int gfar_optimize_filer_masks(struct filer_table *tab)
1495 struct filer_table *temp_table;
1496 struct gfar_mask_entry *mask_table;
1498 u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1501 /* We need a copy of the filer table because
1502 * we want to change its order
1504 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
1505 if (temp_table == NULL)
1508 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1509 sizeof(struct gfar_mask_entry), GFP_KERNEL);
1511 if (mask_table == NULL) {
1516 and_index = gfar_generate_mask_table(mask_table, tab);
1518 gfar_sort_mask_table(mask_table, temp_table, and_index);
1520 /* Now we can copy the data from our duplicated filer table to
1521 * the real one in the order the mask table says
1523 for (i = 0; i < and_index; i++) {
1524 size = mask_table[i].end - mask_table[i].start + 1;
1525 gfar_copy_filer_entries(&(tab->fe[j]),
1526 &(temp_table->fe[mask_table[i].start]), size);
1530 /* And finally we just have to check for duplicated masks and drop the
1533 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1534 if (tab->fe[i].ctrl == 0x80) {
1535 previous_mask = i++;
1539 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1540 if (tab->fe[i].ctrl == 0x80) {
1541 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1542 /* Two identical ones found!
1543 * So drop the second one!
1545 gfar_trim_filer_entries(i, i, tab);
1547 /* Not identical! */
1553 end: kfree(temp_table);
1557 /* Write the bit-pattern from software's buffer to hardware registers */
1558 static int gfar_write_filer_table(struct gfar_private *priv,
1559 struct filer_table *tab)
1562 if (tab->index > MAX_FILER_IDX - 1)
1565 /* Avoid inconsistent filer table to be processed */
1568 /* Fill regular entries */
1569 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
1571 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1572 /* Fill the rest with fall-troughs */
1573 for (; i < MAX_FILER_IDX - 1; i++)
1574 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1575 /* Last entry must be default accept
1576 * because that's what people expect
1578 gfar_write_filer(priv, i, 0x20, 0x0);
1585 static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
1586 struct gfar_private *priv)
1589 if (flow->flow_type & FLOW_EXT) {
1590 if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
1591 netdev_warn(priv->ndev,
1592 "User-specific data not supported!\n");
1593 if (~flow->m_ext.vlan_etype)
1594 netdev_warn(priv->ndev,
1595 "VLAN-etype not supported!\n");
1597 if (flow->flow_type == IP_USER_FLOW)
1598 if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
1599 netdev_warn(priv->ndev,
1600 "IP-Version differing from IPv4 not supported!\n");
1605 static int gfar_process_filer_changes(struct gfar_private *priv)
1607 struct ethtool_flow_spec_container *j;
1608 struct filer_table *tab;
1612 /* So index is set to zero, too! */
1613 tab = kzalloc(sizeof(*tab), GFP_KERNEL);
1617 /* Now convert the existing filer data from flow_spec into
1618 * filer tables binary format
1620 list_for_each_entry(j, &priv->rx_list.list, list) {
1621 ret = gfar_convert_to_filer(&j->fs, tab);
1622 if (ret == -EBUSY) {
1623 netdev_err(priv->ndev,
1624 "Rule not added: No free space!\n");
1628 netdev_err(priv->ndev,
1629 "Rule not added: Unsupported Flow-type!\n");
1636 /* Optimizations to save entries */
1637 gfar_cluster_filer(tab);
1638 gfar_optimize_filer_masks(tab);
1640 pr_debug("\tSummary:\n"
1641 "\tData on hardware: %d\n"
1642 "\tCompression rate: %d%%\n",
1643 tab->index, 100 - (100 * tab->index) / i);
1645 /* Write everything to hardware */
1646 ret = gfar_write_filer_table(priv, tab);
1647 if (ret == -EBUSY) {
1648 netdev_err(priv->ndev, "Rule not added: No free space!\n");
1657 static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
1661 for (i = 0; i < sizeof(flow->m_u); i++)
1662 flow->m_u.hdata[i] ^= 0xFF;
1664 flow->m_ext.vlan_etype ^= cpu_to_be16(0xFFFF);
1665 flow->m_ext.vlan_tci ^= cpu_to_be16(0xFFFF);
1666 flow->m_ext.data[0] ^= cpu_to_be32(~0);
1667 flow->m_ext.data[1] ^= cpu_to_be32(~0);
1670 static int gfar_add_cls(struct gfar_private *priv,
1671 struct ethtool_rx_flow_spec *flow)
1673 struct ethtool_flow_spec_container *temp, *comp;
1676 temp = kmalloc(sizeof(*temp), GFP_KERNEL);
1679 memcpy(&temp->fs, flow, sizeof(temp->fs));
1681 gfar_invert_masks(&temp->fs);
1682 ret = gfar_check_capability(&temp->fs, priv);
1685 /* Link in the new element at the right @location */
1686 if (list_empty(&priv->rx_list.list)) {
1687 ret = gfar_check_filer_hardware(priv);
1690 list_add(&temp->list, &priv->rx_list.list);
1693 list_for_each_entry(comp, &priv->rx_list.list, list) {
1694 if (comp->fs.location > flow->location) {
1695 list_add_tail(&temp->list, &comp->list);
1698 if (comp->fs.location == flow->location) {
1699 netdev_err(priv->ndev,
1700 "Rule not added: ID %d not free!\n",
1706 list_add_tail(&temp->list, &priv->rx_list.list);
1710 ret = gfar_process_filer_changes(priv);
1713 priv->rx_list.count++;
1717 list_del(&temp->list);
1723 static int gfar_del_cls(struct gfar_private *priv, u32 loc)
1725 struct ethtool_flow_spec_container *comp;
1728 if (list_empty(&priv->rx_list.list))
1731 list_for_each_entry(comp, &priv->rx_list.list, list) {
1732 if (comp->fs.location == loc) {
1733 list_del(&comp->list);
1735 priv->rx_list.count--;
1736 gfar_process_filer_changes(priv);
1745 static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
1747 struct ethtool_flow_spec_container *comp;
1750 list_for_each_entry(comp, &priv->rx_list.list, list) {
1751 if (comp->fs.location == cmd->fs.location) {
1752 memcpy(&cmd->fs, &comp->fs, sizeof(cmd->fs));
1753 gfar_invert_masks(&cmd->fs);
1762 static int gfar_get_cls_all(struct gfar_private *priv,
1763 struct ethtool_rxnfc *cmd, u32 *rule_locs)
1765 struct ethtool_flow_spec_container *comp;
1768 list_for_each_entry(comp, &priv->rx_list.list, list) {
1769 if (i == cmd->rule_cnt)
1771 rule_locs[i] = comp->fs.location;
1775 cmd->data = MAX_FILER_IDX;
1781 static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1783 struct gfar_private *priv = netdev_priv(dev);
1786 mutex_lock(&priv->rx_queue_access);
1790 ret = gfar_set_hash_opts(priv, cmd);
1792 case ETHTOOL_SRXCLSRLINS:
1793 if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
1794 cmd->fs.ring_cookie >= priv->num_rx_queues) ||
1795 cmd->fs.location >= MAX_FILER_IDX) {
1799 ret = gfar_add_cls(priv, &cmd->fs);
1801 case ETHTOOL_SRXCLSRLDEL:
1802 ret = gfar_del_cls(priv, cmd->fs.location);
1808 mutex_unlock(&priv->rx_queue_access);
1813 static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1816 struct gfar_private *priv = netdev_priv(dev);
1820 case ETHTOOL_GRXRINGS:
1821 cmd->data = priv->num_rx_queues;
1823 case ETHTOOL_GRXCLSRLCNT:
1824 cmd->rule_cnt = priv->rx_list.count;
1826 case ETHTOOL_GRXCLSRULE:
1827 ret = gfar_get_cls(priv, cmd);
1829 case ETHTOOL_GRXCLSRLALL:
1830 ret = gfar_get_cls_all(priv, cmd, rule_locs);
1840 int gfar_phc_index = -1;
1841 EXPORT_SYMBOL(gfar_phc_index);
1843 static int gfar_get_ts_info(struct net_device *dev,
1844 struct ethtool_ts_info *info)
1846 struct gfar_private *priv = netdev_priv(dev);
1848 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
1849 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
1850 SOF_TIMESTAMPING_SOFTWARE;
1851 info->phc_index = -1;
1854 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1855 SOF_TIMESTAMPING_RX_HARDWARE |
1856 SOF_TIMESTAMPING_RAW_HARDWARE;
1857 info->phc_index = gfar_phc_index;
1858 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
1859 (1 << HWTSTAMP_TX_ON);
1860 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1861 (1 << HWTSTAMP_FILTER_ALL);
1865 const struct ethtool_ops gfar_ethtool_ops = {
1866 .get_settings = gfar_gsettings,
1867 .set_settings = gfar_ssettings,
1868 .get_drvinfo = gfar_gdrvinfo,
1869 .get_regs_len = gfar_reglen,
1870 .get_regs = gfar_get_regs,
1871 .get_link = ethtool_op_get_link,
1872 .get_coalesce = gfar_gcoalesce,
1873 .set_coalesce = gfar_scoalesce,
1874 .get_ringparam = gfar_gringparam,
1875 .set_ringparam = gfar_sringparam,
1876 .get_pauseparam = gfar_gpauseparam,
1877 .set_pauseparam = gfar_spauseparam,
1878 .get_strings = gfar_gstrings,
1879 .get_sset_count = gfar_sset_count,
1880 .get_ethtool_stats = gfar_fill_stats,
1881 .get_msglevel = gfar_get_msglevel,
1882 .set_msglevel = gfar_set_msglevel,
1884 .get_wol = gfar_get_wol,
1885 .set_wol = gfar_set_wol,
1887 .set_rxnfc = gfar_set_nfc,
1888 .get_rxnfc = gfar_get_nfc,
1889 .get_ts_info = gfar_get_ts_info,