Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net...
authorDavid S. Miller <davem@davemloft.net>
Tue, 8 Jul 2014 04:22:58 +0000 (21:22 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 8 Jul 2014 04:22:58 +0000 (21:22 -0700)
Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-07-02

This series contains updates to i40e and i40evf.

Anjali fixes a possible race where we were trying to free the dummy packet
buffer in the function that created it, so cleanup the dummy packet buffer
in i40e_clean_tx_ring() instead.  Also fixes an issue where the filter
program routine was not checking if there were descriptors available for
programming a filter.

Mitch fixes unnecessary delays when sending the admin queue commands by
moving a declaration up one level so we do not dereference it out of scope.
Fixes an issue with the VF where if the admin queue interrupts get lost for
some reason, the VF communication will stall as the VFs have no way of
reaching the PF.  To alleviate this condition, go ahead and check the ARQ
every time we run the service task.  Updates i40evf to allow the watchdog
to fire vector 0 via software, which makes the driver tolerant of dropped
interrupts on that vector.

Paul fixes a shifted '1' to be unsigned to avoid shifting a signed integer.

Jesse disables TPH by default since it is currently not enabled in the
current hardware.  Also finishes the i40e implementation of get_settings
for ethtool.

Catherine adds a new variable (hw.phy.link_info.an_enabled) to track whether
auto-negotiation is enabled, along with the functionality to update the
variable.  Adds the functionality to set the requested flow control mode.
Adds i40e implementation of setpauseparam and set_settings to ethtool.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
65 files changed:
Documentation/devicetree/bindings/net/broadcom-systemport.txt
Documentation/networking/ip-sysctl.txt
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/ti/tlan.h
drivers/net/phy/dp83640.c
drivers/net/vxlan.c
include/linux/ipv6.h
include/linux/netdevice.h
include/linux/ptp_classify.h
include/linux/skbuff.h
include/net/flow_keys.h
include/net/ip.h
include/net/ipv6.h
include/net/netns/ipv6.h
include/net/sctp/sctp.h
include/net/sock.h
include/net/tcp.h
include/net/udp.h
include/net/vxlan.h
include/uapi/linux/in6.h
net/8021q/vlan_dev.c
net/batman-adv/sysfs.c
net/core/flow_dissector.c
net/core/ptp_classifier.c
net/core/timestamping.c
net/ieee802154/6lowpan_iphc.c
net/ieee802154/6lowpan_rtnl.c
net/ieee802154/af_ieee802154.c
net/ieee802154/dgram.c
net/ieee802154/ieee802154.h
net/ieee802154/netlink.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ieee802154/raw.c
net/ieee802154/reassembly.c
net/ieee802154/wpan-class.c
net/ipv4/datagram.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ipv6_sockglue.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/mac802154/ieee802154_dev.c
net/mac802154/llsec.c
net/mac802154/mib.c
net/mac802154/tx.c
net/netlink/af_netlink.c
net/openvswitch/vport-vxlan.c
net/sctp/sysctl.c
net/sctp/transport.c
net/tipc/link.c

index c183ea90d9bc5b08e7980d4e9bdb9dd386957077..aa7ad622259d991fbfdbfafa303285c09527ce6e 100644 (file)
@@ -4,7 +4,8 @@ Required properties:
 - compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
 - reg: address and length of the register set for the device.
 - interrupts: interrupts for the device, first cell must be for the the rx
-  interrupts, and the second cell should be for the transmit queues
+  interrupts, and the second cell should be for the transmit queues. An
+  optional third interrupt cell for Wake-on-LAN can be specified
 - local-mac-address: Ethernet MAC address (48 bits) of this adapter
 - phy-mode: Should be a string describing the PHY interface to the
   Ethernet switch/PHY, see Documentation/devicetree/bindings/net/ethernet.txt
index 10e216c6e05e4bc1b385f957a8c75140c24687ae..f35bfe43bf7abac26690328c5332d154023fd818 100644 (file)
@@ -1132,6 +1132,15 @@ flowlabel_consistency - BOOLEAN
        FALSE: disabled
        Default: TRUE
 
+auto_flowlabels - BOOLEAN
+       Automatically generate flow labels based based on a flow hash
+       of the packet. This allows intermediate devices, such as routers,
+       to idenfify packet flows for mechanisms like Equal Cost Multipath
+       Routing (see RFC 6438).
+       TRUE: enabled
+       FALSE: disabled
+       Default: false
+
 anycast_src_echo_reply - BOOLEAN
        Controls the use of anycast addresses as source addresses for ICMPv6
        echo reply
index 141160ef249ae83e9d1fe8672dca3926ea9b7172..7a1bd2b3bc26d40cc0450833b4831e76617ed529 100644 (file)
@@ -124,9 +124,9 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        u32 reg;
 
-       priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+       priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
        reg = rxchk_readl(priv, RXCHK_CONTROL);
-       if (priv->rx_csum_en)
+       if (priv->rx_chk_en)
                reg |= RXCHK_EN;
        else
                reg &= ~RXCHK_EN;
@@ -134,7 +134,7 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
        /* If UniMAC forwards CRC, we need to skip over it to get
         * a valid CHK bit to be set in the per-packet status word
         */
-       if (priv->rx_csum_en && priv->crc_fwd)
+       if (priv->rx_chk_en && priv->crc_fwd)
                reg |= RXCHK_SKIP_FCS;
        else
                reg &= ~RXCHK_SKIP_FCS;
@@ -384,6 +384,64 @@ static void bcm_sysport_get_stats(struct net_device *dev,
        }
 }
 
+static void bcm_sysport_get_wol(struct net_device *dev,
+                               struct ethtool_wolinfo *wol)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+       wol->wolopts = priv->wolopts;
+
+       if (!(priv->wolopts & WAKE_MAGICSECURE))
+               return;
+
+       /* Return the programmed SecureOn password */
+       reg = umac_readl(priv, UMAC_PSW_MS);
+       put_unaligned_be16(reg, &wol->sopass[0]);
+       reg = umac_readl(priv, UMAC_PSW_LS);
+       put_unaligned_be32(reg, &wol->sopass[2]);
+}
+
+static int bcm_sysport_set_wol(struct net_device *dev,
+                               struct ethtool_wolinfo *wol)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+       u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+
+       if (!device_can_wakeup(kdev))
+               return -ENOTSUPP;
+
+       if (wol->wolopts & ~supported)
+               return -EINVAL;
+
+       /* Program the SecureOn password */
+       if (wol->wolopts & WAKE_MAGICSECURE) {
+               umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
+                               UMAC_PSW_MS);
+               umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
+                               UMAC_PSW_LS);
+       }
+
+       /* Flag the device and relevant IRQ as wakeup capable */
+       if (wol->wolopts) {
+               device_set_wakeup_enable(kdev, 1);
+               enable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = 0;
+       } else {
+               device_set_wakeup_enable(kdev, 0);
+               /* Avoid unbalanced disable_irq_wake calls */
+               if (!priv->wol_irq_disabled)
+                       disable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = 1;
+       }
+
+       priv->wolopts = wol->wolopts;
+
+       return 0;
+}
+
 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
 {
        dev_kfree_skb_any(cb->skb);
@@ -692,6 +750,20 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
+static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
+{
+       u32 reg;
+
+       /* Stop monitoring MPD interrupt */
+       intrl2_0_mask_set(priv, INTRL2_0_MPD);
+
+       /* Clear the MagicPacket detection logic */
+       reg = umac_readl(priv, UMAC_MPD_CTRL);
+       reg &= ~MPD_EN;
+       umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+       netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
+}
 
 /* RX and misc interrupt routine */
 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
@@ -722,6 +794,11 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
                bcm_sysport_tx_reclaim_all(priv);
 
+       if (priv->irq0_stat & INTRL2_0_MPD) {
+               netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
+               bcm_sysport_resume_from_wol(priv);
+       }
+
        return IRQ_HANDLED;
 }
 
@@ -757,6 +834,15 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
+{
+       struct bcm_sysport_priv *priv = dev_id;
+
+       pm_wakeup_event(&priv->pdev->dev, 0);
+
+       return IRQ_HANDLED;
+}
+
 static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
 {
        struct sk_buff *nskb;
@@ -1236,15 +1322,15 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev)
 }
 
 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
-                                       unsigned int enable)
+                                       u32 mask, unsigned int enable)
 {
        u32 reg;
 
        reg = umac_readl(priv, UMAC_CMD);
        if (enable)
-               reg |= CMD_RX_EN | CMD_TX_EN;
+               reg |= mask;
        else
-               reg &= ~(CMD_RX_EN | CMD_TX_EN);
+               reg &= ~mask;
        umac_writel(priv, reg, UMAC_CMD);
 
        /* UniMAC stops on a packet boundary, wait for a full-sized packet
@@ -1295,11 +1381,35 @@ static void topctrl_flush(struct bcm_sysport_priv *priv)
        topctrl_writel(priv, 0, TX_FLUSH_CNTL);
 }
 
+static void bcm_sysport_netif_start(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       /* Enable NAPI */
+       napi_enable(&priv->napi);
+
+       phy_start(priv->phydev);
+
+       /* Enable TX interrupts for the 32 TXQs */
+       intrl2_1_mask_clear(priv, 0xffffffff);
+
+       /* Last call before we start the real business */
+       netif_tx_start_all_queues(dev);
+}
+
+static void rbuf_init(struct bcm_sysport_priv *priv)
+{
+       u32 reg;
+
+       reg = rbuf_readl(priv, RBUF_CONTROL);
+       reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+       rbuf_writel(priv, reg, RBUF_CONTROL);
+}
+
 static int bcm_sysport_open(struct net_device *dev)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        unsigned int i;
-       u32 reg;
        int ret;
 
        /* Reset UniMAC */
@@ -1313,12 +1423,10 @@ static int bcm_sysport_open(struct net_device *dev)
        topctrl_flush(priv);
 
        /* Disable the UniMAC RX/TX */
-       umac_enable_set(priv, 0);
+       umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
 
        /* Enable RBUF 2bytes alignment and Receive Status Block */
-       reg = rbuf_readl(priv, RBUF_CONTROL);
-       reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
-       rbuf_writel(priv, reg, RBUF_CONTROL);
+       rbuf_init(priv);
 
        /* Set maximum frame length */
        umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
@@ -1394,19 +1502,10 @@ static int bcm_sysport_open(struct net_device *dev)
        if (ret)
                goto out_clear_rx_int;
 
-       /* Enable NAPI */
-       napi_enable(&priv->napi);
-
        /* Turn on UniMAC TX/RX */
-       umac_enable_set(priv, 1);
-
-       phy_start(priv->phydev);
+       umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
 
-       /* Enable TX interrupts for the 32 TXQs */
-       intrl2_1_mask_clear(priv, 0xffffffff);
-
-       /* Last call before we start the real business */
-       netif_tx_start_all_queues(dev);
+       bcm_sysport_netif_start(dev);
 
        return 0;
 
@@ -1425,12 +1524,9 @@ out_phy_disconnect:
        return ret;
 }
 
-static int bcm_sysport_stop(struct net_device *dev)
+static void bcm_sysport_netif_stop(struct net_device *dev)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
-       unsigned int i;
-       u32 reg;
-       int ret;
 
        /* stop all software from updating hardware */
        netif_tx_stop_all_queues(dev);
@@ -1442,11 +1538,18 @@ static int bcm_sysport_stop(struct net_device *dev)
        intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
        intrl2_1_mask_set(priv, 0xffffffff);
        intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       int ret;
+
+       bcm_sysport_netif_stop(dev);
 
        /* Disable UniMAC RX */
-       reg = umac_readl(priv, UMAC_CMD);
-       reg &= ~CMD_RX_EN;
-       umac_writel(priv, reg, UMAC_CMD);
+       umac_enable_set(priv, CMD_RX_EN, 0);
 
        ret = tdma_enable_set(priv, 0);
        if (ret) {
@@ -1464,9 +1567,7 @@ static int bcm_sysport_stop(struct net_device *dev)
        }
 
        /* Disable UniMAC TX */
-       reg = umac_readl(priv, UMAC_CMD);
-       reg &= ~CMD_TX_EN;
-       umac_writel(priv, reg, UMAC_CMD);
+       umac_enable_set(priv, CMD_TX_EN, 0);
 
        /* Free RX/TX rings SW structures */
        for (i = 0; i < dev->num_tx_queues; i++)
@@ -1492,6 +1593,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
        .get_strings            = bcm_sysport_get_strings,
        .get_ethtool_stats      = bcm_sysport_get_stats,
        .get_sset_count         = bcm_sysport_get_sset_count,
+       .get_wol                = bcm_sysport_get_wol,
+       .set_wol                = bcm_sysport_set_wol,
 };
 
 static const struct net_device_ops bcm_sysport_netdev_ops = {
@@ -1533,6 +1636,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
 
        priv->irq0 = platform_get_irq(pdev, 0);
        priv->irq1 = platform_get_irq(pdev, 1);
+       priv->wol_irq = platform_get_irq(pdev, 2);
        if (priv->irq0 <= 0 || priv->irq1 <= 0) {
                dev_err(&pdev->dev, "invalid interrupts\n");
                ret = -EINVAL;
@@ -1585,6 +1689,13 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
                                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
+       /* Request the WOL interrupt and advertise suspend if available */
+       priv->wol_irq_disabled = 1;
+       ret = devm_request_irq(&pdev->dev, priv->wol_irq,
+                               bcm_sysport_wol_isr, 0, dev->name, priv);
+       if (!ret)
+               device_set_wakeup_capable(&pdev->dev, 1);
+
        /* Set the needed headroom once and for all */
        BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
        dev->needed_headroom += sizeof(struct bcm_tsb);
@@ -1631,6 +1742,208 @@ static int bcm_sysport_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
+{
+       struct net_device *ndev = priv->netdev;
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       /* Password has already been programmed */
+       reg = umac_readl(priv, UMAC_MPD_CTRL);
+       reg |= MPD_EN;
+       reg &= ~PSW_EN;
+       if (priv->wolopts & WAKE_MAGICSECURE)
+               reg |= PSW_EN;
+       umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+       /* Make sure RBUF entered WoL mode as result */
+       do {
+               reg = rbuf_readl(priv, RBUF_STATUS);
+               if (reg & RBUF_WOL_MODE)
+                       break;
+
+               udelay(10);
+       } while (timeout-- > 0);
+
+       /* Do not leave the UniMAC RBUF matching only MPD packets */
+       if (!timeout) {
+               reg = umac_readl(priv, UMAC_MPD_CTRL);
+               reg &= ~MPD_EN;
+               umac_writel(priv, reg, UMAC_MPD_CTRL);
+               netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
+               return -ETIMEDOUT;
+       }
+
+       /* UniMAC receive needs to be turned on */
+       umac_enable_set(priv, CMD_RX_EN, 1);
+
+       /* Enable the interrupt wake-up source */
+       intrl2_0_mask_clear(priv, INTRL2_0_MPD);
+
+       netif_dbg(priv, wol, ndev, "entered WOL mode\n");
+
+       return 0;
+}
+
+static int bcm_sysport_suspend(struct device *d)
+{
+       struct net_device *dev = dev_get_drvdata(d);
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       int ret = 0;
+       u32 reg;
+
+       if (!netif_running(dev))
+               return 0;
+
+       bcm_sysport_netif_stop(dev);
+
+       phy_suspend(priv->phydev);
+
+       netif_device_detach(dev);
+
+       /* Disable UniMAC RX */
+       umac_enable_set(priv, CMD_RX_EN, 0);
+
+       ret = rdma_enable_set(priv, 0);
+       if (ret) {
+               netdev_err(dev, "RDMA timeout!\n");
+               return ret;
+       }
+
+       /* Disable RXCHK if enabled */
+       if (priv->rx_chk_en) {
+               reg = rxchk_readl(priv, RXCHK_CONTROL);
+               reg &= ~RXCHK_EN;
+               rxchk_writel(priv, reg, RXCHK_CONTROL);
+       }
+
+       /* Flush RX pipe */
+       if (!priv->wolopts)
+               topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+
+       ret = tdma_enable_set(priv, 0);
+       if (ret) {
+               netdev_err(dev, "TDMA timeout!\n");
+               return ret;
+       }
+
+       /* Wait for a packet boundary */
+       usleep_range(2000, 3000);
+
+       umac_enable_set(priv, CMD_TX_EN, 0);
+
+       topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+
+       /* Free RX/TX rings SW structures */
+       for (i = 0; i < dev->num_tx_queues; i++)
+               bcm_sysport_fini_tx_ring(priv, i);
+       bcm_sysport_fini_rx_ring(priv);
+
+       /* Get prepared for Wake-on-LAN */
+       if (device_may_wakeup(d) && priv->wolopts)
+               ret = bcm_sysport_suspend_to_wol(priv);
+
+       return ret;
+}
+
+static int bcm_sysport_resume(struct device *d)
+{
+       struct net_device *dev = dev_get_drvdata(d);
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       u32 reg;
+       int ret;
+
+       if (!netif_running(dev))
+               return 0;
+
+       /* We may have been suspended and never received a WOL event that
+        * would turn off MPD detection, take care of that now
+        */
+       bcm_sysport_resume_from_wol(priv);
+
+       /* Initialize both hardware and software ring */
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               ret = bcm_sysport_init_tx_ring(priv, i);
+               if (ret) {
+                       netdev_err(dev, "failed to initialize TX ring %d\n",
+                                       i);
+                       goto out_free_tx_rings;
+               }
+       }
+
+       /* Initialize linked-list */
+       tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+       /* Initialize RX ring */
+       ret = bcm_sysport_init_rx_ring(priv);
+       if (ret) {
+               netdev_err(dev, "failed to initialize RX ring\n");
+               goto out_free_rx_ring;
+       }
+
+       netif_device_attach(dev);
+
+       /* Enable RX interrupt and TX ring full interrupt */
+       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+       /* RX pipe enable */
+       topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+
+       ret = rdma_enable_set(priv, 1);
+       if (ret) {
+               netdev_err(dev, "failed to enable RDMA\n");
+               goto out_free_rx_ring;
+       }
+
+       /* Enable rxhck */
+       if (priv->rx_chk_en) {
+               reg = rxchk_readl(priv, RXCHK_CONTROL);
+               reg |= RXCHK_EN;
+               rxchk_writel(priv, reg, RXCHK_CONTROL);
+       }
+
+       rbuf_init(priv);
+
+       /* Set maximum frame length */
+       umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+       /* Set MAC address */
+       umac_set_hw_addr(priv, dev->dev_addr);
+
+       umac_enable_set(priv, CMD_RX_EN, 1);
+
+       /* TX pipe enable */
+       topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+
+       umac_enable_set(priv, CMD_TX_EN, 1);
+
+       ret = tdma_enable_set(priv, 1);
+       if (ret) {
+               netdev_err(dev, "TDMA timeout!\n");
+               goto out_free_rx_ring;
+       }
+
+       phy_resume(priv->phydev);
+
+       bcm_sysport_netif_start(dev);
+
+       return 0;
+
+out_free_rx_ring:
+       bcm_sysport_fini_rx_ring(priv);
+out_free_tx_rings:
+       for (i = 0; i < dev->num_tx_queues; i++)
+               bcm_sysport_fini_tx_ring(priv, i);
+       return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
+               bcm_sysport_suspend, bcm_sysport_resume);
+
 static const struct of_device_id bcm_sysport_of_match[] = {
        { .compatible = "brcm,systemport-v1.00" },
        { .compatible = "brcm,systemport" },
@@ -1644,6 +1957,7 @@ static struct platform_driver bcm_sysport_driver = {
                .name = "brcm-systemport",
                .owner = THIS_MODULE,
                .of_match_table = bcm_sysport_of_match,
+               .pm = &bcm_sysport_pm_ops,
        },
 };
 module_platform_driver(bcm_sysport_driver);
index 281c082460375611bc61c02baa476a9c0d03b33a..b08dab828101e7800d125d45f6e370169231f8f3 100644 (file)
@@ -246,6 +246,15 @@ struct bcm_rsb {
 #define  MIB_RX_CNT_RST                        (1 << 0)
 #define  MIB_RUNT_CNT_RST              (1 << 1)
 #define  MIB_TX_CNT_RST                        (1 << 2)
+
+#define UMAC_MPD_CTRL                  0x620
+#define  MPD_EN                                (1 << 0)
+#define  MSEQ_LEN_SHIFT                        16
+#define  MSEQ_LEN_MASK                 0xff
+#define  PSW_EN                                (1 << 27)
+
+#define UMAC_PSW_MS                    0x624
+#define UMAC_PSW_LS                    0x628
 #define UMAC_MDF_CTRL                  0x650
 #define UMAC_MDF_ADDR                  0x654
 
@@ -642,6 +651,7 @@ struct bcm_sysport_priv {
        struct platform_device  *pdev;
        int                     irq0;
        int                     irq1;
+       int                     wol_irq;
 
        /* Transmit rings */
        struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
@@ -664,10 +674,12 @@ struct bcm_sysport_priv {
        int                     old_duplex;
 
        /* Misc fields */
-       unsigned int            rx_csum_en:1;
+       unsigned int            rx_chk_en:1;
        unsigned int            tsb_en:1;
        unsigned int            crc_fwd:1;
        u16                     rev;
+       u32                     wolopts;
+       unsigned int            wol_irq_disabled:1;
 
        /* MIB related fields */
        struct bcm_sysport_mib  mib;
index c2f5d2d3b9324269edd1dc7d18e3d5355f209faf..d3d871b28cad83d045995cee6ed545037764c01f 100644 (file)
@@ -411,6 +411,7 @@ struct be_resources {
        u16 max_vlans;          /* Number of vlans supported */
        u16 max_evt_qs;
        u32 if_cap_flags;
+       u32 vf_if_cap_flags;    /* VF if capability flags */
 };
 
 struct rss_info {
@@ -500,6 +501,7 @@ struct be_adapter {
        u32 flash_status;
        struct completion et_cmd_compl;
 
+       struct be_resources pool_res;   /* resources available for the port */
        struct be_resources res;        /* resources available for the func */
        u16 num_vfs;                    /* Number of VFs provisioned by PF */
        u8 virtfn;
@@ -523,9 +525,8 @@ struct be_adapter {
 
 #define be_physfn(adapter)             (!adapter->virtfn)
 #define be_virtfn(adapter)             (adapter->virtfn)
-#define        sriov_enabled(adapter)          (adapter->num_vfs > 0)
-#define sriov_want(adapter)             (be_physfn(adapter) && \
-                                        (num_vfs || pci_num_vf(adapter->pdev)))
+#define sriov_enabled(adapter)         (adapter->num_vfs > 0)
+
 #define for_all_vfs(adapter, vf_cfg, i)                                        \
        for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
                i++, vf_cfg++)
@@ -536,7 +537,7 @@ struct be_adapter {
 #define be_max_vlans(adapter)          (adapter->res.max_vlans)
 #define be_max_uc(adapter)             (adapter->res.max_uc_mac)
 #define be_max_mc(adapter)             (adapter->res.max_mcast_mac)
-#define be_max_vfs(adapter)            (adapter->res.max_vfs)
+#define be_max_vfs(adapter)            (adapter->pool_res.max_vfs)
 #define be_max_rss(adapter)            (adapter->res.max_rss_qs)
 #define be_max_txqs(adapter)           (adapter->res.max_tx_qs)
 #define be_max_prio_txqs(adapter)      (adapter->res.max_prio_tx_qs)
index f4ea3490f44657f3e90974065fea9a0eee649cd0..9904bbfd4e935a1ea0e3e4a21948128c98b52b6c 100644 (file)
@@ -3313,15 +3313,28 @@ err:
        return status;
 }
 
-static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
+/* Descriptor type */
+enum {
+       FUNC_DESC = 1,
+       VFT_DESC = 2
+};
+
+static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
+                                              int desc_type)
 {
        struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+       struct be_nic_res_desc *nic;
        int i;
 
        for (i = 0; i < desc_count; i++) {
                if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
-                   hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
-                       return (struct be_nic_res_desc *)hdr;
+                   hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
+                       nic = (struct be_nic_res_desc *)hdr;
+                       if (desc_type == FUNC_DESC ||
+                           (desc_type == VFT_DESC &&
+                            nic->flags & (1 << VFT_SHIFT)))
+                               return nic;
+               }
 
                hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
                hdr = (void *)hdr + hdr->desc_len;
@@ -3329,6 +3342,16 @@ static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
        return NULL;
 }
 
+static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
+{
+       return be_get_nic_desc(buf, desc_count, VFT_DESC);
+}
+
+static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
+{
+       return be_get_nic_desc(buf, desc_count, FUNC_DESC);
+}
+
 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
                                                 u32 desc_count)
 {
@@ -3424,7 +3447,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
                u32 desc_count = le32_to_cpu(resp->desc_count);
                struct be_nic_res_desc *desc;
 
-               desc = be_get_nic_desc(resp->func_param, desc_count);
+               desc = be_get_func_nic_desc(resp->func_param, desc_count);
                if (!desc) {
                        status = -EINVAL;
                        goto err;
@@ -3440,76 +3463,17 @@ err:
        return status;
 }
 
-/* Uses mbox */
-static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
-                                         u8 domain, struct be_dma_mem *cmd)
-{
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_req_get_profile_config *req;
-       int status;
-
-       if (mutex_lock_interruptible(&adapter->mbox_lock))
-               return -1;
-       wrb = wrb_from_mbox(adapter);
-
-       req = cmd->va;
-       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                              OPCODE_COMMON_GET_PROFILE_CONFIG,
-                              cmd->size, wrb, cmd);
-
-       req->type = ACTIVE_PROFILE_TYPE;
-       req->hdr.domain = domain;
-       if (!lancer_chip(adapter))
-               req->hdr.version = 1;
-
-       status = be_mbox_notify_wait(adapter);
-
-       mutex_unlock(&adapter->mbox_lock);
-       return status;
-}
-
-/* Uses sync mcc */
-static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
-                                         u8 domain, struct be_dma_mem *cmd)
-{
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_req_get_profile_config *req;
-       int status;
-
-       spin_lock_bh(&adapter->mcc_lock);
-
-       wrb = wrb_from_mccq(adapter);
-       if (!wrb) {
-               status = -EBUSY;
-               goto err;
-       }
-
-       req = cmd->va;
-       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                              OPCODE_COMMON_GET_PROFILE_CONFIG,
-                              cmd->size, wrb, cmd);
-
-       req->type = ACTIVE_PROFILE_TYPE;
-       req->hdr.domain = domain;
-       if (!lancer_chip(adapter))
-               req->hdr.version = 1;
-
-       status = be_mcc_notify_wait(adapter);
-
-err:
-       spin_unlock_bh(&adapter->mcc_lock);
-       return status;
-}
-
-/* Uses sync mcc, if MCCQ is already created otherwise mbox */
+/* Will use MBOX only if MCCQ has not been created */
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 domain)
 {
        struct be_cmd_resp_get_profile_config *resp;
+       struct be_cmd_req_get_profile_config *req;
+       struct be_nic_res_desc *vf_res;
        struct be_pcie_res_desc *pcie;
        struct be_port_res_desc *port;
        struct be_nic_res_desc *nic;
-       struct be_queue_info *mccq = &adapter->mcc_obj.q;
+       struct be_mcc_wrb wrb = {0};
        struct be_dma_mem cmd;
        u32 desc_count;
        int status;
@@ -3520,10 +3484,17 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        if (!cmd.va)
                return -ENOMEM;
 
-       if (!mccq->created)
-               status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
-       else
-               status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
+       req = cmd.va;
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                              OPCODE_COMMON_GET_PROFILE_CONFIG,
+                              cmd.size, &wrb, &cmd);
+
+       req->hdr.domain = domain;
+       if (!lancer_chip(adapter))
+               req->hdr.version = 1;
+       req->type = ACTIVE_PROFILE_TYPE;
+
+       status = be_cmd_notify_wait(adapter, &wrb);
        if (status)
                goto err;
 
@@ -3539,48 +3510,52 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        if (port)
                adapter->mc_type = port->mc_type;
 
-       nic = be_get_nic_desc(resp->func_param, desc_count);
+       nic = be_get_func_nic_desc(resp->func_param, desc_count);
        if (nic)
                be_copy_nic_desc(res, nic);
 
+       vf_res = be_get_vft_desc(resp->func_param, desc_count);
+       if (vf_res)
+               res->vf_if_cap_flags = vf_res->cap_flags;
 err:
        if (cmd.va)
                pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
-int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
-                             int size, u8 version, u8 domain)
+/* Will use MBOX only if MCCQ has not been created */
+static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+                                    int size, int count, u8 version, u8 domain)
 {
        struct be_cmd_req_set_profile_config *req;
-       struct be_mcc_wrb *wrb;
+       struct be_mcc_wrb wrb = {0};
+       struct be_dma_mem cmd;
        int status;
 
-       spin_lock_bh(&adapter->mcc_lock);
-
-       wrb = wrb_from_mccq(adapter);
-       if (!wrb) {
-               status = -EBUSY;
-               goto err;
-       }
+       memset(&cmd, 0, sizeof(struct be_dma_mem));
+       cmd.size = sizeof(struct be_cmd_req_set_profile_config);
+       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       if (!cmd.va)
+               return -ENOMEM;
 
-       req = embedded_payload(wrb);
+       req = cmd.va;
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                              OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
-                              wrb, NULL);
+                              OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
+                              &wrb, &cmd);
        req->hdr.version = version;
        req->hdr.domain = domain;
-       req->desc_count = cpu_to_le32(1);
+       req->desc_count = cpu_to_le32(count);
        memcpy(req->desc, desc, size);
 
-       status = be_mcc_notify_wait(adapter);
-err:
-       spin_unlock_bh(&adapter->mcc_lock);
+       status = be_cmd_notify_wait(adapter, &wrb);
+
+       if (cmd.va)
+               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
 /* Mark all fields invalid */
-void be_reset_nic_desc(struct be_nic_res_desc *nic)
+static void be_reset_nic_desc(struct be_nic_res_desc *nic)
 {
        memset(nic, 0, sizeof(*nic));
        nic->unicast_mac_count = 0xFFFF;
@@ -3601,9 +3576,20 @@ void be_reset_nic_desc(struct be_nic_res_desc *nic)
        nic->wol_param = 0x0F;
        nic->tunnel_iface_count = 0xFFFF;
        nic->direct_tenant_iface_count = 0xFFFF;
+       nic->bw_min = 0xFFFFFFFF;
        nic->bw_max = 0xFFFFFFFF;
 }
 
+/* Mark all fields invalid */
+static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
+{
+       memset(pcie, 0, sizeof(*pcie));
+       pcie->sriov_state = 0xFF;
+       pcie->pf_state = 0xFF;
+       pcie->pf_type = 0xFF;
+       pcie->num_vfs = 0xFFFF;
+}
+
 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
                      u8 domain)
 {
@@ -3634,7 +3620,63 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
 
        return be_cmd_set_profile_config(adapter, &nic_desc,
                                         nic_desc.hdr.desc_len,
-                                        version, domain);
+                                        1, version, domain);
+}
+
+int be_cmd_set_sriov_config(struct be_adapter *adapter,
+                           struct be_resources res, u16 num_vfs)
+{
+       struct {
+               struct be_pcie_res_desc pcie;
+               struct be_nic_res_desc nic_vft;
+       } __packed desc;
+       u16 vf_q_count;
+
+       if (BEx_chip(adapter) || lancer_chip(adapter))
+               return 0;
+
+       /* PF PCIE descriptor */
+       be_reset_pcie_desc(&desc.pcie);
+       desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
+       desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+       desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+       desc.pcie.pf_num = adapter->pdev->devfn;
+       desc.pcie.sriov_state = num_vfs ? 1 : 0;
+       desc.pcie.num_vfs = cpu_to_le16(num_vfs);
+
+       /* VF NIC Template descriptor */
+       be_reset_nic_desc(&desc.nic_vft);
+       desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
+       desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+       desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
+                               (1 << NOSV_SHIFT);
+       desc.nic_vft.pf_num = adapter->pdev->devfn;
+       desc.nic_vft.vf_num = 0;
+
+       if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+               /* If number of VFs requested is 8 less than max supported,
+                * assign 8 queue pairs to the PF and divide the remaining
+                * resources evenly among the VFs
+                */
+               if (num_vfs < (be_max_vfs(adapter) - 8))
+                       vf_q_count = (res.max_rss_qs - 8) / num_vfs;
+               else
+                       vf_q_count = res.max_rss_qs / num_vfs;
+
+               desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
+               desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
+               desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
+               desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
+       } else {
+               desc.nic_vft.txq_count = cpu_to_le16(1);
+               desc.nic_vft.rq_count = cpu_to_le16(1);
+               desc.nic_vft.rssq_count = cpu_to_le16(0);
+               /* One CQ for each TX, RX and MCCQ */
+               desc.nic_vft.cq_count = cpu_to_le16(3);
+       }
+
+       return be_cmd_set_profile_config(adapter, &desc,
+                                        2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
 }
 
 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
@@ -3686,7 +3728,7 @@ int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
        }
 
        return be_cmd_set_profile_config(adapter, &port_desc,
-                                        RESOURCE_DESC_SIZE_V1, 1, 0);
+                                        RESOURCE_DESC_SIZE_V1, 1, 1, 0);
 }
 
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
index 59b3c056f3297bae3a101192943a944772f3d5c7..c0f7167049b74a0327157e17583fbe1f3e6a3abf 100644 (file)
@@ -1835,6 +1835,7 @@ struct be_cmd_req_set_ext_fat_caps {
 #define PORT_RESOURCE_DESC_TYPE_V1             0x55
 #define MAX_RESOURCE_DESC                      264
 
+#define VFT_SHIFT                              3       /* VF template */
 #define IMM_SHIFT                              6       /* Immediate */
 #define NOSV_SHIFT                             7       /* No save */
 
@@ -1962,8 +1963,8 @@ struct be_cmd_req_set_profile_config {
        struct be_cmd_req_hdr hdr;
        u32 rsvd;
        u32 desc_count;
-       u8 desc[RESOURCE_DESC_SIZE_V1];
-};
+       u8 desc[2 * RESOURCE_DESC_SIZE_V1];
+} __packed;
 
 struct be_cmd_resp_set_profile_config {
        struct be_cmd_resp_hdr hdr;
@@ -2157,8 +2158,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
                           struct be_resources *res);
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 domain);
-int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
-                             int size, u8 version, u8 domain);
 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
                     int vf_num);
@@ -2168,3 +2167,5 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
                                          int link_state, u8 domain);
 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
+int be_cmd_set_sriov_config(struct be_adapter *adapter,
+                           struct be_resources res, u16 num_vfs);
index 34a26e42f19d39b66b7b644ea296a58413f5e691..6297e72b77e2e1d294973057adf0f2139cf8140d 100644 (file)
@@ -1172,20 +1172,15 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       int status = 0;
 
        /* Packets with VID 0 are always received by Lancer by default */
        if (lancer_chip(adapter) && vid == 0)
-               goto ret;
+               return 0;
 
        clear_bit(vid, adapter->vids);
-       status = be_vid_config(adapter);
-       if (!status)
-               adapter->vlans_added--;
-       else
-               set_bit(vid, adapter->vids);
-ret:
-       return status;
+       adapter->vlans_added--;
+
+       return be_vid_config(adapter);
 }
 
 static void be_clear_promisc(struct be_adapter *adapter)
@@ -3098,6 +3093,13 @@ static int be_clear(struct be_adapter *adapter)
        if (sriov_enabled(adapter))
                be_vf_clear(adapter);
 
+       /* Re-configure FW to distribute resources evenly across max-supported
+        * number of VFs, only when VFs are not already enabled.
+        */
+       if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
+               be_cmd_set_sriov_config(adapter, adapter->pool_res,
+                                       pci_sriov_get_totalvfs(adapter->pdev));
+
 #ifdef CONFIG_BE2NET_VXLAN
        be_disable_vxlan_offloads(adapter);
 #endif
@@ -3170,19 +3172,6 @@ static int be_vf_setup(struct be_adapter *adapter)
        u32 privileges;
 
        old_vfs = pci_num_vf(adapter->pdev);
-       if (old_vfs) {
-               dev_info(dev, "%d VFs are already enabled\n", old_vfs);
-               if (old_vfs != num_vfs)
-                       dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
-               adapter->num_vfs = old_vfs;
-       } else {
-               if (num_vfs > be_max_vfs(adapter))
-                       dev_info(dev, "Device supports %d VFs and not %d\n",
-                                be_max_vfs(adapter), num_vfs);
-               adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
-               if (!adapter->num_vfs)
-                       return 0;
-       }
 
        status = be_vf_setup_init(adapter);
        if (status)
@@ -3194,17 +3183,15 @@ static int be_vf_setup(struct be_adapter *adapter)
                        if (status)
                                goto err;
                }
-       } else {
-               status = be_vfs_if_create(adapter);
-               if (status)
-                       goto err;
-       }
 
-       if (old_vfs) {
                status = be_vfs_mac_query(adapter);
                if (status)
                        goto err;
        } else {
+               status = be_vfs_if_create(adapter);
+               if (status)
+                       goto err;
+
                status = be_vf_eth_addr_config(adapter);
                if (status)
                        goto err;
@@ -3270,19 +3257,7 @@ static u8 be_convert_mc_type(u32 function_mode)
 static void BEx_get_resources(struct be_adapter *adapter,
                              struct be_resources *res)
 {
-       struct pci_dev *pdev = adapter->pdev;
-       bool use_sriov = false;
-       int max_vfs = 0;
-
-       if (be_physfn(adapter) && BE3_chip(adapter)) {
-               be_cmd_get_profile_config(adapter, res, 0);
-               /* Some old versions of BE3 FW don't report max_vfs value */
-               if (res->max_vfs == 0) {
-                       max_vfs = pci_sriov_get_totalvfs(pdev);
-                       res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
-               }
-               use_sriov = res->max_vfs && sriov_want(adapter);
-       }
+       bool use_sriov = adapter->num_vfs ? 1 : 0;
 
        if (be_physfn(adapter))
                res->max_uc_mac = BE_UC_PMAC_COUNT;
@@ -3349,6 +3324,54 @@ static void be_setup_init(struct be_adapter *adapter)
                adapter->cmd_privileges = MIN_PRIVILEGES;
 }
 
+static int be_get_sriov_config(struct be_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       struct be_resources res = {0};
+       int status, max_vfs, old_vfs;
+
+       status = be_cmd_get_profile_config(adapter, &res, 0);
+       if (status)
+               return status;
+
+       adapter->pool_res = res;
+
+       /* Some old versions of BE3 FW don't report max_vfs value */
+       if (BE3_chip(adapter) && !res.max_vfs) {
+               max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
+               res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
+       }
+
+       adapter->pool_res.max_vfs = res.max_vfs;
+       pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
+
+       if (!be_max_vfs(adapter)) {
+               if (num_vfs)
+                       dev_warn(dev, "device doesn't support SRIOV\n");
+               adapter->num_vfs = 0;
+               return 0;
+       }
+
+       /* validate num_vfs module param */
+       old_vfs = pci_num_vf(adapter->pdev);
+       if (old_vfs) {
+               dev_info(dev, "%d VFs are already enabled\n", old_vfs);
+               if (old_vfs != num_vfs)
+                       dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
+               adapter->num_vfs = old_vfs;
+       } else {
+               if (num_vfs > be_max_vfs(adapter)) {
+                       dev_info(dev, "Resources unavailable to init %d VFs\n",
+                                num_vfs);
+                       dev_info(dev, "Limiting to %d VFs\n",
+                                be_max_vfs(adapter));
+               }
+               adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
+       }
+
+       return 0;
+}
+
 static int be_get_resources(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
@@ -3374,13 +3397,6 @@ static int be_get_resources(struct be_adapter *adapter)
                        res.max_evt_qs /= 2;
                adapter->res = res;
 
-               if (be_physfn(adapter)) {
-                       status = be_cmd_get_profile_config(adapter, &res, 0);
-                       if (status)
-                               return status;
-                       adapter->res.max_vfs = res.max_vfs;
-               }
-
                dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
                         be_max_txqs(adapter), be_max_rxqs(adapter),
                         be_max_rss(adapter), be_max_eqs(adapter),
@@ -3393,7 +3409,6 @@ static int be_get_resources(struct be_adapter *adapter)
        return 0;
 }
 
-/* Routine to query per function resource limits */
 static int be_get_config(struct be_adapter *adapter)
 {
        u16 profile_id;
@@ -3411,6 +3426,26 @@ static int be_get_config(struct be_adapter *adapter)
                if (!status)
                        dev_info(&adapter->pdev->dev,
                                 "Using profile 0x%x\n", profile_id);
+
+               status = be_get_sriov_config(adapter);
+               if (status)
+                       return status;
+
+               /* When the HW is in SRIOV capable configuration, the PF-pool
+                * resources are equally distributed across the max-number of
+                * VFs. The user may request only a subset of the max-vfs to be
+                * enabled. Based on num_vfs, redistribute the resources across
+                * num_vfs so that each VF will have access to more number of
+                * resources. This facility is not available in BE3 FW.
+                * Also, this is done by FW in Lancer chip.
+                */
+               if (!pci_num_vf(adapter->pdev)) {
+                       status = be_cmd_set_sriov_config(adapter,
+                                                        adapter->pool_res,
+                                                        adapter->num_vfs);
+                       if (status)
+                               return status;
+               }
        }
 
        status = be_get_resources(adapter);
@@ -3596,12 +3631,8 @@ static int be_setup(struct be_adapter *adapter)
                be_cmd_set_logical_link_config(adapter,
                                               IFLA_VF_LINK_STATE_AUTO, 0);
 
-       if (sriov_want(adapter)) {
-               if (be_max_vfs(adapter))
-                       be_vf_setup(adapter);
-               else
-                       dev_warn(dev, "device doesn't support SRIOV\n");
-       }
+       if (adapter->num_vfs)
+               be_vf_setup(adapter);
 
        status = be_cmd_get_phy_info(adapter);
        if (!status && be_pause_supported(adapter))
index 671d080105a7e08c5e20456a5ad38b29f6704e19..96d2a18f1b999cf419b997c343ed163abb510242 100644 (file)
@@ -308,7 +308,6 @@ struct fec_enet_private {
 
        struct  platform_device *pdev;
 
-       int     opened;
        int     dev_id;
 
        /* Phylib and MDIO interface */
index 77037fd377b85dcda23bddc3c043b8d11e9d8cb3..f43c388e2eb93414fea632944e5e5190b9549aaa 100644 (file)
@@ -373,6 +373,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
        skb_frag_t *this_frag;
        unsigned int index;
        void *bufaddr;
+       dma_addr_t addr;
        int i;
 
        for (frag = 0; frag < nr_frags; frag++) {
@@ -415,15 +416,16 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
                                swap_buffer(bufaddr, frag_len);
                }
 
-               bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
-                                               frag_len, DMA_TO_DEVICE);
-               if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+               addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
+                                     DMA_TO_DEVICE);
+               if (dma_mapping_error(&fep->pdev->dev, addr)) {
                        dev_kfree_skb_any(skb);
                        if (net_ratelimit())
                                netdev_err(ndev, "Tx DMA memory map failed\n");
                        goto dma_mapping_error;
                }
 
+               bdp->cbd_bufaddr = addr;
                bdp->cbd_datlen = frag_len;
                bdp->cbd_sc = status;
        }
@@ -450,6 +452,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
        int nr_frags = skb_shinfo(skb)->nr_frags;
        struct bufdesc *bdp, *last_bdp;
        void *bufaddr;
+       dma_addr_t addr;
        unsigned short status;
        unsigned short buflen;
        unsigned int estatus = 0;
@@ -490,12 +493,9 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
                        swap_buffer(bufaddr, buflen);
        }
 
-       /* Push the data cache so the CPM does not get stale memory
-        * data.
-        */
-       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
-                                       buflen, DMA_TO_DEVICE);
-       if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+       /* Push the data cache so the CPM does not get stale memory data. */
+       addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
+       if (dma_mapping_error(&fep->pdev->dev, addr)) {
                dev_kfree_skb_any(skb);
                if (net_ratelimit())
                        netdev_err(ndev, "Tx DMA memory map failed\n");
@@ -537,6 +537,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
        fep->tx_skbuff[index] = skb;
 
        bdp->cbd_datlen = buflen;
+       bdp->cbd_bufaddr = addr;
 
        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
         * it's the last BD of the frame, and to put the CRC on the end.
@@ -570,12 +571,12 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
        unsigned short status;
        unsigned int estatus = 0;
+       dma_addr_t addr;
 
        status = bdp->cbd_sc;
        status &= ~BD_ENET_TX_STATS;
 
        status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
-       bdp->cbd_datlen = size;
 
        if (((unsigned long) data) & FEC_ALIGNMENT ||
                id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
@@ -586,15 +587,17 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
                        swap_buffer(data, size);
        }
 
-       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
-                                       size, DMA_TO_DEVICE);
-       if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+       addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
+       if (dma_mapping_error(&fep->pdev->dev, addr)) {
                dev_kfree_skb_any(skb);
                if (net_ratelimit())
                        netdev_err(ndev, "Tx DMA memory map failed\n");
                return NETDEV_TX_BUSY;
        }
 
+       bdp->cbd_datlen = size;
+       bdp->cbd_bufaddr = addr;
+
        if (fep->bufdesc_ex) {
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -801,7 +804,7 @@ static void fec_enet_bd_init(struct net_device *dev)
 
                /* Initialize the BD for every fragment in the page. */
                bdp->cbd_sc = 0;
-               if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
+               if (fep->tx_skbuff[i]) {
                        dev_kfree_skb_any(fep->tx_skbuff[i]);
                        fep->tx_skbuff[i] = NULL;
                }
@@ -834,7 +837,7 @@ fec_restart(struct net_device *ndev, int duplex)
        if (netif_running(ndev)) {
                netif_device_detach(ndev);
                napi_disable(&fep->napi);
-               netif_stop_queue(ndev);
+               netif_tx_disable(ndev);
                netif_tx_lock_bh(ndev);
        }
 
@@ -1100,6 +1103,7 @@ fec_enet_tx(struct net_device *ndev)
                index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
 
                skb = fep->tx_skbuff[index];
+               fep->tx_skbuff[index] = NULL;
                if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
                        dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                        bdp->cbd_datlen, DMA_TO_DEVICE);
@@ -1154,7 +1158,6 @@ fec_enet_tx(struct net_device *ndev)
 
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
-               fep->tx_skbuff[index] = NULL;
 
                fep->dirty_tx = bdp;
 
@@ -1215,9 +1218,6 @@ fec_enet_rx(struct net_device *ndev, int budget)
                if ((status & BD_ENET_RX_LAST) == 0)
                        netdev_err(ndev, "rcv is not +last\n");
 
-               if (!fep->opened)
-                       goto rx_processing_done;
-
                /* Check for errors. */
                if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
                           BD_ENET_RX_CR | BD_ENET_RX_OV)) {
@@ -1369,29 +1369,25 @@ fec_enet_interrupt(int irq, void *dev_id)
 {
        struct net_device *ndev = dev_id;
        struct fec_enet_private *fep = netdev_priv(ndev);
+       const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
        uint int_events;
        irqreturn_t ret = IRQ_NONE;
 
-       do {
-               int_events = readl(fep->hwp + FEC_IEVENT);
-               writel(int_events, fep->hwp + FEC_IEVENT);
+       int_events = readl(fep->hwp + FEC_IEVENT);
+       writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
 
-               if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
-                       ret = IRQ_HANDLED;
+       if (int_events & napi_mask) {
+               ret = IRQ_HANDLED;
 
-                       /* Disable the RX interrupt */
-                       if (napi_schedule_prep(&fep->napi)) {
-                               writel(FEC_RX_DISABLED_IMASK,
-                                       fep->hwp + FEC_IMASK);
-                               __napi_schedule(&fep->napi);
-                       }
-               }
+               /* Disable the NAPI interrupts */
+               writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+               napi_schedule(&fep->napi);
+       }
 
-               if (int_events & FEC_ENET_MII) {
-                       ret = IRQ_HANDLED;
-                       complete(&fep->mdio_done);
-               }
-       } while (int_events);
+       if (int_events & FEC_ENET_MII) {
+               ret = IRQ_HANDLED;
+               complete(&fep->mdio_done);
+       }
 
        return ret;
 }
@@ -1399,8 +1395,16 @@ fec_enet_interrupt(int irq, void *dev_id)
 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
 {
        struct net_device *ndev = napi->dev;
-       int pkts = fec_enet_rx(ndev, budget);
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int pkts;
+
+       /*
+        * Clear any pending transmit or receive interrupts before
+        * processing the rings to avoid racing with the hardware.
+        */
+       writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
+
+       pkts = fec_enet_rx(ndev, budget);
 
        fec_enet_tx(ndev);
 
@@ -1667,6 +1671,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        /* mask with MAC supported features */
        if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
                phy_dev->supported &= PHY_GBIT_FEATURES;
+               phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
 #if !defined(CONFIG_M5272)
                phy_dev->supported |= SUPPORTED_Pause;
 #endif
@@ -1870,6 +1875,9 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       if (!fep->phy_dev)
+               return -ENODEV;
+
        if (pause->tx_pause != pause->rx_pause) {
                netdev_info(ndev,
                        "hardware only support enable/disable both tx and rx");
@@ -1896,7 +1904,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
                phy_start_aneg(fep->phy_dev);
        }
        if (netif_running(ndev))
-               fec_restart(ndev, 0);
+               fec_restart(ndev, fep->full_duplex);
 
        return 0;
 }
@@ -2061,18 +2069,23 @@ static void fec_enet_free_buffers(struct net_device *ndev)
        bdp = fep->rx_bd_base;
        for (i = 0; i < fep->rx_ring_size; i++) {
                skb = fep->rx_skbuff[i];
-
-               if (bdp->cbd_bufaddr)
+               fep->rx_skbuff[i] = NULL;
+               if (skb) {
                        dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                        FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
-               if (skb)
                        dev_kfree_skb(skb);
+               }
                bdp = fec_enet_get_nextdesc(bdp, fep);
        }
 
        bdp = fep->tx_bd_base;
-       for (i = 0; i < fep->tx_ring_size; i++)
+       for (i = 0; i < fep->tx_ring_size; i++) {
                kfree(fep->tx_bounce[i]);
+               fep->tx_bounce[i] = NULL;
+               skb = fep->tx_skbuff[i];
+               fep->tx_skbuff[i] = NULL;
+               dev_kfree_skb(skb);
+       }
 }
 
 static int fec_enet_alloc_buffers(struct net_device *ndev)
@@ -2084,21 +2097,23 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
 
        bdp = fep->rx_bd_base;
        for (i = 0; i < fep->rx_ring_size; i++) {
+               dma_addr_t addr;
+
                skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
-               if (!skb) {
-                       fec_enet_free_buffers(ndev);
-                       return -ENOMEM;
-               }
-               fep->rx_skbuff[i] = skb;
+               if (!skb)
+                       goto err_alloc;
 
-               bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
+               addr = dma_map_single(&fep->pdev->dev, skb->data,
                                FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
-               if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
-                       fec_enet_free_buffers(ndev);
+               if (dma_mapping_error(&fep->pdev->dev, addr)) {
+                       dev_kfree_skb(skb);
                        if (net_ratelimit())
                                netdev_err(ndev, "Rx DMA memory map failed\n");
-                       return -ENOMEM;
+                       goto err_alloc;
                }
+
+               fep->rx_skbuff[i] = skb;
+               bdp->cbd_bufaddr = addr;
                bdp->cbd_sc = BD_ENET_RX_EMPTY;
 
                if (fep->bufdesc_ex) {
@@ -2116,6 +2131,8 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
        bdp = fep->tx_bd_base;
        for (i = 0; i < fep->tx_ring_size; i++) {
                fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+               if (!fep->tx_bounce[i])
+                       goto err_alloc;
 
                bdp->cbd_sc = 0;
                bdp->cbd_bufaddr = 0;
@@ -2133,6 +2150,10 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
        bdp->cbd_sc |= BD_SC_WRAP;
 
        return 0;
+
+ err_alloc:
+       fec_enet_free_buffers(ndev);
+       return -ENOMEM;
 }
 
 static int
@@ -2164,7 +2185,6 @@ fec_enet_open(struct net_device *ndev)
        napi_enable(&fep->napi);
        phy_start(fep->phy_dev);
        netif_start_queue(ndev);
-       fep->opened = 1;
        return 0;
 }
 
@@ -2173,16 +2193,15 @@ fec_enet_close(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       phy_stop(fep->phy_dev);
+
        /* Don't know what to do yet. */
        napi_disable(&fep->napi);
-       fep->opened = 0;
-       netif_stop_queue(ndev);
+       netif_tx_disable(ndev);
        fec_stop(ndev);
 
-       if (fep->phy_dev) {
-               phy_stop(fep->phy_dev);
-               phy_disconnect(fep->phy_dev);
-       }
+       phy_disconnect(fep->phy_dev);
+       fep->phy_dev = NULL;
 
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
@@ -2662,6 +2681,7 @@ fec_suspend(struct device *dev)
        struct fec_enet_private *fep = netdev_priv(ndev);
 
        if (netif_running(ndev)) {
+               phy_stop(fep->phy_dev);
                fec_stop(ndev);
                netif_device_detach(ndev);
        }
@@ -2695,6 +2715,7 @@ fec_resume(struct device *dev)
        if (netif_running(ndev)) {
                fec_restart(ndev, fep->full_duplex);
                netif_device_attach(ndev);
+               phy_start(fep->phy_dev);
        }
 
        return 0;
index 6b56f85951e581826afc152109d0eee4b53dd08d..ab92f67da035f2f5f9aaa8ea4effa87ca83a2a41 100644 (file)
@@ -256,23 +256,21 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
                      u16 ts_seqid, u8 ts_msgtype)
 {
        u16 *seqid;
-       unsigned int offset;
+       unsigned int offset = 0;
        u8 *msgtype, *data = skb->data;
 
-       switch (ptp_class) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V2_IPV4:
-               offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
-               break;
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV6:
-               offset = OFF_PTP6;
+       if (ptp_class & PTP_CLASS_VLAN)
+               offset += VLAN_HLEN;
+
+       switch (ptp_class & PTP_CLASS_PMASK) {
+       case PTP_CLASS_IPV4:
+               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_L2:
-               offset = ETH_HLEN;
+       case PTP_CLASS_IPV6:
+               offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_VLAN:
-               offset = ETH_HLEN + VLAN_HLEN;
+       case PTP_CLASS_L2:
+               offset += ETH_HLEN;
                break;
        default:
                return 0;
index 62b19be5183d3349433ee5855ad082b1560b5c69..6078342fe3f24de7fe8df95ee806cf28a26f6a6b 100644 (file)
@@ -69,10 +69,6 @@ MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
 MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
 MODULE_LICENSE("GPL");
 
-
-/* Define this to enable Link beat monitoring */
-#undef MONITOR
-
 /* Turn on debugging. See Documentation/networking/tlan.txt for details */
 static  int            debug;
 module_param(debug, int, 0);
@@ -107,8 +103,10 @@ static struct board {
        { "Compaq Netelligent 10/100 TX Embedded UTP",
          TLAN_ADAPTER_NONE, 0x83 },
        { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
-       { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
-       { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
+       { "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED |
+         TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+       { "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED |
+         TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
        { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
        { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
        { "Compaq NetFlex-3/E",
@@ -192,9 +190,7 @@ static void tlan_phy_power_up(struct net_device *);
 static void    tlan_phy_reset(struct net_device *);
 static void    tlan_phy_start_link(struct net_device *);
 static void    tlan_phy_finish_auto_neg(struct net_device *);
-#ifdef MONITOR
-static void     tlan_phy_monitor(struct net_device *);
-#endif
+static void     tlan_phy_monitor(unsigned long);
 
 /*
   static int   tlan_phy_nop(struct net_device *);
@@ -337,6 +333,7 @@ static void tlan_stop(struct net_device *dev)
 {
        struct tlan_priv *priv = netdev_priv(dev);
 
+       del_timer_sync(&priv->media_timer);
        tlan_read_and_clear_stats(dev, TLAN_RECORD);
        outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
        /* Reset and power down phy */
@@ -368,8 +365,10 @@ static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
 static int tlan_resume(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
+       int rc = pci_enable_device(pdev);
 
-       pci_set_power_state(pdev, PCI_D0);
+       if (rc)
+               return rc;
        pci_restore_state(pdev);
        pci_enable_wake(pdev, PCI_D0, 0);
        netif_device_attach(dev);
@@ -781,7 +780,43 @@ static const struct net_device_ops tlan_netdev_ops = {
 #endif
 };
 
+static void tlan_get_drvinfo(struct net_device *dev,
+                            struct ethtool_drvinfo *info)
+{
+       struct tlan_priv *priv = netdev_priv(dev);
+
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       if (priv->pci_dev)
+               strlcpy(info->bus_info, pci_name(priv->pci_dev),
+                       sizeof(info->bus_info));
+       else
+               strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
+       info->eedump_len = TLAN_EEPROM_SIZE;
+}
+
+static int tlan_get_eeprom_len(struct net_device *dev)
+{
+       return TLAN_EEPROM_SIZE;
+}
+
+static int tlan_get_eeprom(struct net_device *dev,
+                          struct ethtool_eeprom *eeprom, u8 *data)
+{
+       int i;
+
+       for (i = 0; i < TLAN_EEPROM_SIZE; i++)
+               if (tlan_ee_read_byte(dev, i, &data[i]))
+                       return -EIO;
 
+       return 0;
+}
+
+static const struct ethtool_ops tlan_ethtool_ops = {
+       .get_drvinfo    = tlan_get_drvinfo,
+       .get_link       = ethtool_op_get_link,
+       .get_eeprom_len = tlan_get_eeprom_len,
+       .get_eeprom     = tlan_get_eeprom,
+};
 
 /***************************************************************
  *     tlan_init
@@ -830,7 +865,7 @@ static int tlan_init(struct net_device *dev)
                priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
 
        err = 0;
-       for (i = 0;  i < 6 ; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                err |= tlan_ee_read_byte(dev,
                                         (u8) priv->adapter->addr_ofs + i,
                                         (u8 *) &dev->dev_addr[i]);
@@ -838,12 +873,20 @@ static int tlan_init(struct net_device *dev)
                pr_err("%s: Error reading MAC from eeprom: %d\n",
                       dev->name, err);
        }
-       dev->addr_len = 6;
+       /* Olicom OC-2325/OC-2326 have the address byte-swapped */
+       if (priv->adapter->addr_ofs == 0xf8) {
+               for (i = 0; i < ETH_ALEN; i += 2) {
+                       char tmp = dev->dev_addr[i];
+                       dev->dev_addr[i] = dev->dev_addr[i + 1];
+                       dev->dev_addr[i + 1] = tmp;
+               }
+       }
 
        netif_carrier_off(dev);
 
        /* Device methods */
        dev->netdev_ops = &tlan_netdev_ops;
+       dev->ethtool_ops = &tlan_ethtool_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        return 0;
@@ -886,6 +929,7 @@ static int tlan_open(struct net_device *dev)
        }
 
        init_timer(&priv->timer);
+       init_timer(&priv->media_timer);
 
        tlan_start(dev);
 
@@ -1156,9 +1200,6 @@ static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
 
 static int tlan_close(struct net_device *dev)
 {
-       struct tlan_priv *priv = netdev_priv(dev);
-
-       priv->neg_be_verbose = 0;
        tlan_stop(dev);
 
        free_irq(dev->irq, dev);
@@ -1808,11 +1849,6 @@ static void tlan_timer(unsigned long data)
        priv->timer.function = NULL;
 
        switch (priv->timer_type) {
-#ifdef MONITOR
-       case TLAN_TIMER_LINK_BEAT:
-               tlan_phy_monitor(dev);
-               break;
-#endif
        case TLAN_TIMER_PHY_PDOWN:
                tlan_phy_power_down(dev);
                break;
@@ -1856,8 +1892,6 @@ static void tlan_timer(unsigned long data)
 }
 
 
-
-
 /*****************************************************************************
 ******************************************************************************
 
@@ -2205,7 +2239,9 @@ tlan_reset_adapter(struct net_device *dev)
                }
        }
 
-       if (priv->phy_num == 0)
+       /* don't power down internal PHY if we're going to use it */
+       if (priv->phy_num == 0 ||
+          (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))
                data |= TLAN_NET_CFG_PHY_EN;
        tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
 
@@ -2255,42 +2291,39 @@ tlan_finish_reset(struct net_device *dev)
                tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
                udelay(1000);
                tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
-               if ((status & MII_GS_LINK) &&
-                   /* We only support link info on Nat.Sem. PHY's */
-                   (tlphy_id1 == NAT_SEM_ID1) &&
-                   (tlphy_id2 == NAT_SEM_ID2)) {
-                       tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
-                       tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
-
-                       netdev_info(dev,
-                                   "Link active with %s %uMbps %s-Duplex\n",
-                                   !(tlphy_par & TLAN_PHY_AN_EN_STAT)
-                                   ? "forced" : "Autonegotiation enabled,",
-                                   tlphy_par & TLAN_PHY_SPEED_100
-                                   ? 100 : 10,
-                                   tlphy_par & TLAN_PHY_DUPLEX_FULL
-                                   ? "Full" : "Half");
-
-                       if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
-                               netdev_info(dev, "Partner capability:");
-                               for (i = 5; i < 10; i++)
-                                       if (partner & (1 << i))
-                                               pr_cont(" %s", media[i-5]);
-                               pr_cont("\n");
-                       }
-
-                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
-                                       TLAN_LED_LINK);
-#ifdef MONITOR
-                       /* We have link beat..for now anyway */
-                       priv->link = 1;
-                       /*Enabling link beat monitoring */
-                       tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
-#endif
-               } else if (status & MII_GS_LINK)  {
-                       netdev_info(dev, "Link active\n");
-                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
-                                       TLAN_LED_LINK);
+               if (status & MII_GS_LINK) {
+                       /* We only support link info on Nat.Sem. PHY's */
+                       if ((tlphy_id1 == NAT_SEM_ID1) &&
+                           (tlphy_id2 == NAT_SEM_ID2)) {
+                               tlan_mii_read_reg(dev, phy, MII_AN_LPA,
+                                       &partner);
+                               tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR,
+                                       &tlphy_par);
+
+                               netdev_info(dev,
+                                       "Link active, %s %uMbps %s-Duplex\n",
+                                       !(tlphy_par & TLAN_PHY_AN_EN_STAT)
+                                       ? "forced" : "Autonegotiation enabled,",
+                                       tlphy_par & TLAN_PHY_SPEED_100
+                                       ? 100 : 10,
+                                       tlphy_par & TLAN_PHY_DUPLEX_FULL
+                                       ? "Full" : "Half");
+
+                               if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
+                                       netdev_info(dev, "Partner capability:");
+                                       for (i = 5; i < 10; i++)
+                                               if (partner & (1 << i))
+                                                       pr_cont(" %s",
+                                                               media[i-5]);
+                                       pr_cont("\n");
+                               }
+                       } else
+                               netdev_info(dev, "Link active\n");
+                       /* Enabling link beat monitoring */
+                       priv->media_timer.function = tlan_phy_monitor;
+                       priv->media_timer.data = (unsigned long) dev;
+                       priv->media_timer.expires = jiffies + HZ;
+                       add_timer(&priv->media_timer);
                }
        }
 
@@ -2312,6 +2345,7 @@ tlan_finish_reset(struct net_device *dev)
                             dev->base_addr + TLAN_HOST_CMD + 1);
                outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
                outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
+               tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
                netif_carrier_on(dev);
        } else {
                netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
@@ -2494,9 +2528,10 @@ static void tlan_phy_power_down(struct net_device *dev)
        value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
        tlan_mii_sync(dev->base_addr);
        tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
-       if ((priv->phy_num == 0) &&
-           (priv->phy[1] != TLAN_PHY_NONE) &&
-           (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+       if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) {
+               /* if using internal PHY, the external PHY must be powered on */
+               if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)
+                       value = MII_GC_ISOLATE; /* just isolate it from MII */
                tlan_mii_sync(dev->base_addr);
                tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
        }
@@ -2538,6 +2573,7 @@ static void tlan_phy_reset(struct net_device *dev)
        struct tlan_priv        *priv = netdev_priv(dev);
        u16             phy;
        u16             value;
+       unsigned long timeout = jiffies + HZ;
 
        phy = priv->phy[priv->phy_num];
 
@@ -2545,9 +2581,13 @@ static void tlan_phy_reset(struct net_device *dev)
        tlan_mii_sync(dev->base_addr);
        value = MII_GC_LOOPBK | MII_GC_RESET;
        tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
-       tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
-       while (value & MII_GC_RESET)
+       do {
                tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+               if (time_after(jiffies, timeout)) {
+                       netdev_err(dev, "PHY reset timeout\n");
+                       return;
+               }
+       } while (value & MII_GC_RESET);
 
        /* Wait for 500 ms and initialize.
         * I don't remember why I wait this long.
@@ -2653,7 +2693,6 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
        struct tlan_priv        *priv = netdev_priv(dev);
        u16             an_adv;
        u16             an_lpa;
-       u16             data;
        u16             mode;
        u16             phy;
        u16             status;
@@ -2668,13 +2707,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
                /* Wait for 8 sec to give the process
                 * more time.  Perhaps we should fail after a while.
                 */
-               if (!priv->neg_be_verbose++) {
-                       pr_info("Giving autonegotiation more time.\n");
-                       pr_info("Please check that your adapter has\n");
-                       pr_info("been properly connected to a HUB or Switch.\n");
-                       pr_info("Trying to establish link in the background...\n");
-               }
-               tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
+               tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN);
                return;
        }
 
@@ -2687,13 +2720,11 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
        else if (!(mode & 0x0080) && (mode & 0x0040))
                priv->tlan_full_duplex = true;
 
+       /* switch to internal PHY for 10 Mbps */
        if ((!(mode & 0x0180)) &&
            (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
            (priv->phy_num != 0)) {
                priv->phy_num = 0;
-               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
-                       | TLAN_NET_CFG_PHY_EN;
-               tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
                tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
                return;
        }
@@ -2717,7 +2748,6 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
 
 }
 
-#ifdef MONITOR
 
 /*********************************************************************
  *
@@ -2727,18 +2757,18 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
  *           None
  *
  *     Params:
- *           dev            The device structure of this device.
+ *           data           The device structure of this device.
  *
  *
  *     This function monitors PHY condition by reading the status
- *     register via the MII bus. This can be used to give info
- *     about link changes (up/down), and possible switch to alternate
- *     media.
+ *     register via the MII bus, controls LINK LED and notifies the
+ *     kernel about link state.
  *
  *******************************************************************/
 
-void tlan_phy_monitor(struct net_device *dev)
+static void tlan_phy_monitor(unsigned long data)
 {
+       struct net_device *dev = (struct net_device *) data;
        struct tlan_priv *priv = netdev_priv(dev);
        u16     phy;
        u16     phy_status;
@@ -2750,30 +2780,40 @@ void tlan_phy_monitor(struct net_device *dev)
 
        /* Check if link has been lost */
        if (!(phy_status & MII_GS_LINK)) {
-               if (priv->link) {
-                       priv->link = 0;
+               if (netif_carrier_ok(dev)) {
                        printk(KERN_DEBUG "TLAN: %s has lost link\n",
                               dev->name);
+                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0);
                        netif_carrier_off(dev);
-                       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
-                       return;
+                       if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) {
+                               /* power down internal PHY */
+                               u16 data = MII_GC_PDOWN | MII_GC_LOOPBK |
+                                          MII_GC_ISOLATE;
+
+                               tlan_mii_sync(dev->base_addr);
+                               tlan_mii_write_reg(dev, priv->phy[0],
+                                                  MII_GEN_CTL, data);
+                               /* set to external PHY */
+                               priv->phy_num = 1;
+                               /* restart autonegotiation */
+                               tlan_set_timer(dev, 4 * HZ / 10,
+                                              TLAN_TIMER_PHY_PDOWN);
+                               return;
+                       }
                }
        }
 
        /* Link restablished? */
-       if ((phy_status & MII_GS_LINK) && !priv->link) {
-               priv->link = 1;
+       if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) {
+               tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
                printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
                       dev->name);
                netif_carrier_on(dev);
        }
-
-       /* Setup a new monitor */
-       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+       priv->media_timer.expires = jiffies + HZ;
+       add_timer(&priv->media_timer);
 }
 
-#endif /* MONITOR */
-
 
 /*****************************************************************************
 ******************************************************************************
index 2eb33a250788abca1235d3ee93d809b422eb328d..e9928411827e9e58e09cb81cee6dcdb419ba042b 100644 (file)
@@ -195,6 +195,7 @@ struct tlan_priv {
        u32                     timer_set_at;
        u32                     timer_type;
        struct timer_list       timer;
+       struct timer_list       media_timer;
        struct board            *adapter;
        u32                     adapter_rev;
        u32                     aui;
@@ -206,9 +207,7 @@ struct tlan_priv {
        u8                      tlan_rev;
        u8                      tlan_full_duplex;
        spinlock_t              lock;
-       u8                      link;
        struct work_struct                      tlan_tqueue;
-       u8                      neg_be_verbose;
 };
 
 
@@ -219,7 +218,6 @@ struct tlan_priv {
         *
         ****************************************************************/
 
-#define TLAN_TIMER_LINK_BEAT           1
 #define TLAN_TIMER_ACTIVITY            2
 #define TLAN_TIMER_PHY_PDOWN           3
 #define TLAN_TIMER_PHY_PUP             4
@@ -241,6 +239,7 @@ struct tlan_priv {
 #define TLAN_EEPROM_ACK                0
 #define TLAN_EEPROM_STOP       1
 
+#define TLAN_EEPROM_SIZE       256
 
 
 
index 293ad064905d6058bb72d5e4b68f832c9053451a..53bd1af68422f53e9d831d0e07319ec154bbcf55 100644 (file)
@@ -856,20 +856,18 @@ static int is_sync(struct sk_buff *skb, int type)
        u8 *data = skb->data, *msgtype;
        unsigned int offset = 0;
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V2_IPV4:
-               offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
-               break;
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV6:
-               offset = OFF_PTP6;
+       if (type & PTP_CLASS_VLAN)
+               offset += VLAN_HLEN;
+
+       switch (type & PTP_CLASS_PMASK) {
+       case PTP_CLASS_IPV4:
+               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_L2:
-               offset = ETH_HLEN;
+       case PTP_CLASS_IPV6:
+               offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_VLAN:
-               offset = ETH_HLEN + VLAN_HLEN;
+       case PTP_CLASS_L2:
+               offset += ETH_HLEN;
                break;
        default:
                return 0;
@@ -889,25 +887,23 @@ static int is_sync(struct sk_buff *skb, int type)
 static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
 {
        u16 *seqid;
-       unsigned int offset;
+       unsigned int offset = 0;
        u8 *msgtype, *data = skb_mac_header(skb);
 
        /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V2_IPV4:
-               offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
-               break;
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV6:
-               offset = OFF_PTP6;
+       if (type & PTP_CLASS_VLAN)
+               offset += VLAN_HLEN;
+
+       switch (type & PTP_CLASS_PMASK) {
+       case PTP_CLASS_IPV4:
+               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_L2:
-               offset = ETH_HLEN;
+       case PTP_CLASS_IPV6:
+               offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_VLAN:
-               offset = ETH_HLEN + VLAN_HLEN;
+       case PTP_CLASS_L2:
+               offset += ETH_HLEN;
                break;
        default:
                return 0;
index ade33ef82823b230a34890d77af039d8531aefb7..c2d360150804b82e6846da53072e936433122ba0 100644 (file)
@@ -1570,25 +1570,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
        return false;
 }
 
-/* Compute source port for outgoing packet
- *   first choice to use L4 flow hash since it will spread
- *     better and maybe available from hardware
- *   secondary choice is to use jhash on the Ethernet header
- */
-__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
-{
-       unsigned int range = (port_max - port_min) + 1;
-       u32 hash;
-
-       hash = skb_get_hash(skb);
-       if (!hash)
-               hash = jhash(skb->data, 2 * ETH_ALEN,
-                            (__force u32) skb->protocol);
-
-       return htons((((u64) hash * range) >> 32) + port_min);
-}
-EXPORT_SYMBOL_GPL(vxlan_src_port);
-
 static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb,
                                                    bool udp_csum)
 {
@@ -1807,7 +1788,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        if (tos == 1)
                tos = ip_tunnel_get_dsfield(old_iph, skb);
 
-       src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
+       src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
+                                    vxlan->port_max, true);
 
        if (dst->sa.sa_family == AF_INET) {
                memset(&fl4, 0, sizeof(fl4));
@@ -2235,7 +2217,6 @@ static void vxlan_setup(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        unsigned int h;
-       int low, high;
 
        eth_hw_addr_random(dev);
        ether_setup(dev);
@@ -2272,9 +2253,6 @@ static void vxlan_setup(struct net_device *dev)
        vxlan->age_timer.function = vxlan_cleanup;
        vxlan->age_timer.data = (unsigned long) vxlan;
 
-       inet_get_local_port_range(dev_net(dev), &low, &high);
-       vxlan->port_min = low;
-       vxlan->port_max = high;
        vxlan->dst_port = htons(vxlan_port);
 
        vxlan->dev = dev;
index 5dc68c3ebcbdaebca5961314ff1557e216e91256..ff560537dd61b334a3dfbfb559dfa0f1cff20f41 100644 (file)
@@ -199,7 +199,8 @@ struct ipv6_pinfo {
                                                 * 010: prefer public address
                                                 * 100: prefer care-of address
                                                 */
-                               dontfrag:1;
+                               dontfrag:1,
+                               autoflowlabel:1;
        __u8                    min_hopcount;
        __u8                    tclass;
        __be32                  rcv_flowinfo;
index 66f9a04ec27041445afddbb70729b039719387c5..8b43a28ee0bc65eaadfd7b1d2f4a5af71daf639b 100644 (file)
@@ -2486,7 +2486,7 @@ static inline int netif_set_xps_queue(struct net_device *dev,
  * as a distribution range limit for the returned value.
  */
 static inline u16 skb_tx_hash(const struct net_device *dev,
-                             const struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
 }
index 7dfed71d76a6e9823b75a5b47f353543a234cd36..159c987b1853106fc13e36391f8534383c947d46 100644 (file)
@@ -33,8 +33,8 @@
 #define PTP_CLASS_IPV4  0x10 /* event in an IPV4 UDP packet */
 #define PTP_CLASS_IPV6  0x20 /* event in an IPV6 UDP packet */
 #define PTP_CLASS_L2    0x30 /* event in a L2 packet */
-#define PTP_CLASS_VLAN  0x40 /* event in a VLAN tagged L2 packet */
-#define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */
+#define PTP_CLASS_PMASK 0x30 /* mask for the packet type field */
+#define PTP_CLASS_VLAN  0x40 /* event in a VLAN tagged packet */
 
 #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4)
 #define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */
@@ -54,7 +54,6 @@
 #define IP6_HLEN       40
 #define UDP_HLEN       8
 #define OFF_IHL                14
-#define OFF_PTP6       (ETH_HLEN + IP6_HLEN + UDP_HLEN)
 #define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
 
 #if defined(CONFIG_NET_PTP_CLASSIFY)
index ec89301ada418aff749bf924da1b7f63040ac048..890fb3307dd620b623a692b4a47cf9e802be2ef0 100644 (file)
@@ -455,6 +455,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
  *     @ooo_okay: allow the mapping of a socket to a queue to be changed
  *     @l4_hash: indicate hash is a canonical 4-tuple hash over transport
  *             ports.
+ *     @sw_hash: indicates hash was computed in software stack
  *     @wifi_acked_valid: wifi_acked was set
  *     @wifi_acked: whether frame was acked on wifi or not
  *     @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
@@ -562,6 +563,7 @@ struct sk_buff {
        __u8                    pfmemalloc:1;
        __u8                    ooo_okay:1;
        __u8                    l4_hash:1;
+       __u8                    sw_hash:1;
        __u8                    wifi_acked_valid:1;
        __u8                    wifi_acked:1;
        __u8                    no_fcs:1;
@@ -575,7 +577,7 @@ struct sk_buff {
        __u8                    encap_hdr_csum:1;
        __u8                    csum_valid:1;
        __u8                    csum_complete_sw:1;
-       /* 3/5 bit hole (depending on ndisc_nodetype presence) */
+       /* 2/4 bit hole (depending on ndisc_nodetype presence) */
        kmemcheck_bitfield_end(flags2);
 
 #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
@@ -830,13 +832,14 @@ static inline void
 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
 {
        skb->l4_hash = (type == PKT_HASH_TYPE_L4);
+       skb->sw_hash = 0;
        skb->hash = hash;
 }
 
 void __skb_get_hash(struct sk_buff *skb);
 static inline __u32 skb_get_hash(struct sk_buff *skb)
 {
-       if (!skb->l4_hash)
+       if (!skb->l4_hash && !skb->sw_hash)
                __skb_get_hash(skb);
 
        return skb->hash;
@@ -850,6 +853,7 @@ static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
 static inline void skb_clear_hash(struct sk_buff *skb)
 {
        skb->hash = 0;
+       skb->sw_hash = 0;
        skb->l4_hash = 0;
 }
 
@@ -862,6 +866,7 @@ static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
 {
        to->hash = from->hash;
+       to->sw_hash = from->sw_hash;
        to->l4_hash = from->l4_hash;
 };
 
@@ -3005,7 +3010,7 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
        return skb->queue_mapping != 0;
 }
 
-u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
                  unsigned int num_tx_queues);
 
 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
index fbefdca5e28394b8b88abcd1f852090b0bdf8985..6667a054763adfad3407d59d0f6810fb13f56a14 100644 (file)
@@ -29,4 +29,5 @@ struct flow_keys {
 
 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
 __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
+u32 flow_hash_from_keys(struct flow_keys *keys);
 #endif
index 0e795df05ec983d07d7acc52cddabd728ea6e0b1..2e8f055989c3c982df11c0160ae409e98814bcab 100644 (file)
@@ -31,6 +31,7 @@
 #include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
+#include <net/flow_keys.h>
 
 struct sock;
 
@@ -353,6 +354,19 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
                                  skb->len, proto, 0);
 }
 
+static inline void inet_set_txhash(struct sock *sk)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct flow_keys keys;
+
+       keys.src = inet->inet_saddr;
+       keys.dst = inet->inet_daddr;
+       keys.port16[0] = inet->inet_sport;
+       keys.port16[1] = inet->inet_dport;
+
+       sk->sk_txhash = flow_hash_from_keys(&keys);
+}
+
 /*
  *     Map a multicast IP onto multicast MAC for type ethernet.
  */
index 574337fe72ddbd3038c061dab5aef142ea0f068b..4308f2ada8b34ec8a5b83bbd7a73b6ade15092cb 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/if_inet6.h>
 #include <net/ndisc.h>
 #include <net/flow.h>
+#include <net/flow_keys.h>
 #include <net/snmp.h>
 
 #define SIN6_LEN_RFC2133       24
@@ -684,6 +685,40 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
        return hlimit;
 }
 
+static inline void ip6_set_txhash(struct sock *sk)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct flow_keys keys;
+
+       keys.src = (__force __be32)ipv6_addr_hash(&np->saddr);
+       keys.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr);
+       keys.port16[0] = inet->inet_sport;
+       keys.port16[1] = inet->inet_dport;
+
+       sk->sk_txhash = flow_hash_from_keys(&keys);
+}
+
+static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
+                                       __be32 flowlabel, bool autolabel)
+{
+       if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
+               __be32 hash;
+
+               hash = skb_get_hash(skb);
+
+               /* Since this is being sent on the wire obfuscate hash a bit
+                * to minimize possbility that any useful information to an
+                * attacker is leaked. Only lower 20 bits are relevant.
+                */
+               hash ^= hash >> 12;
+
+               flowlabel = hash & IPV6_FLOWLABEL_MASK;
+       }
+
+       return flowlabel;
+}
+
 /*
  *     Header manipulation
  */
index 19d3446e59d2555639e9553b1958d7354792af1e..eade27adecf3678ed2d1568adf34badd38ed88f9 100644 (file)
@@ -28,6 +28,7 @@ struct netns_sysctl_ipv6 {
        int ip6_rt_mtu_expires;
        int ip6_rt_min_advmss;
        int flowlabel_consistency;
+       int auto_flowlabels;
        int icmpv6_time;
        int anycast_src_echo_reply;
        int fwmark_reflect;
index 8e4de46c052ec7d78188f203df2b69b0a0b423ab..c2035c96a2ee150a833ab28aa71e510abab72a5b 100644 (file)
@@ -388,27 +388,6 @@ static inline int sctp_list_single_entry(struct list_head *head)
        return (head->next != head) && (head->next == head->prev);
 }
 
-/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */
-static inline __s32 sctp_jitter(__u32 rto)
-{
-       static __u32 sctp_rand;
-       __s32 ret;
-
-       /* Avoid divide by zero. */
-       if (!rto)
-               rto = 1;
-
-       sctp_rand += jiffies;
-       sctp_rand ^= (sctp_rand << 12);
-       sctp_rand ^= (sctp_rand >> 20);
-
-       /* Choose random number from 0 to rto, then move to -50% ~ +50%
-        * of rto.
-        */
-       ret = sctp_rand % rto - (rto >> 1);
-       return ret;
-}
-
 /* Break down data chunks at this point.  */
 static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
 {
index 8d4c9473e7d721f35bb063f37318af567cb76e59..cb84b2f1ad8f16b58afe75bb0f4c469e08b36235 100644 (file)
@@ -273,6 +273,7 @@ struct cg_proto;
   *    @sk_rcvtimeo: %SO_RCVTIMEO setting
   *    @sk_sndtimeo: %SO_SNDTIMEO setting
   *    @sk_rxhash: flow hash received from netif layer
+  *    @sk_txhash: computed flow hash for use on transmit
   *    @sk_filter: socket filtering instructions
   *    @sk_protinfo: private area, net family specific, when not using slab
   *    @sk_timer: sock cleanup timer
@@ -347,6 +348,7 @@ struct sock {
 #ifdef CONFIG_RPS
        __u32                   sk_rxhash;
 #endif
+       __u32                   sk_txhash;
 #ifdef CONFIG_NET_RX_BUSY_POLL
        unsigned int            sk_napi_id;
        unsigned int            sk_ll_usec;
@@ -1980,6 +1982,14 @@ static inline void sock_poll_wait(struct file *filp,
        }
 }
 
+static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
+{
+       if (sk->sk_txhash) {
+               skb->l4_hash = 1;
+               skb->hash = sk->sk_txhash;
+       }
+}
+
 /*
  *     Queue a received datagram if it will fit. Stream and sequenced
  *     protocols can't normally use this as they need to fit buffers in
@@ -1994,6 +2004,7 @@ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
        skb_orphan(skb);
        skb->sk = sk;
        skb->destructor = sock_wfree;
+       skb_set_hash_from_sk(skb, sk);
        /*
         * We used to take a refcount on sk, but following operation
         * is enough to guarantee sk_free() wont free this sock until
index 0d5389aecf182fecf4c43054f7548dd275be8411..c9a75dbba0c7fc01a0971ebcbc3fb477a53da461 100644 (file)
@@ -1093,7 +1093,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
        req->cookie_ts = 0;
        tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
        tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
-       tcp_rsk(req)->snt_synack = 0;
+       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        req->mss = rx_opt->mss_clamp;
        req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
        ireq->tstamp_ok = rx_opt->tstamp_ok;
index 68a1fefe3dfe46c3fc8f4847fa074fdb237573bb..70f941368ace488dc93f4e78f81b1dca935cd87f 100644 (file)
@@ -176,6 +176,35 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                     int (*)(const struct sock *, const struct sock *),
                     unsigned int hash2_nulladdr);
 
+static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
+                                      int min, int max, bool use_eth)
+{
+       u32 hash;
+
+       if (min >= max) {
+               /* Use default range */
+               inet_get_local_port_range(net, &min, &max);
+       }
+
+       hash = skb_get_hash(skb);
+       if (unlikely(!hash) && use_eth) {
+               /* Can't find a normal hash, caller has indicated an Ethernet
+                * packet so use that to compute a hash.
+                */
+               hash = jhash(skb->data, 2 * ETH_ALEN,
+                            (__force u32) skb->protocol);
+       }
+
+       /* Since this is being sent on the wire obfuscate hash a bit
+        * to minimize possbility that any useful information to an
+        * attacker is leaked. Only upper 16 bits are relevant in the
+        * computation for 16 bit port value.
+        */
+       hash ^= hash << 16;
+
+       return htons((((u64) hash * (max - min)) >> 32) + min);
+}
+
 /* net/ipv4/udp.c */
 void udp_v4_early_demux(struct sk_buff *skb);
 int udp_get_port(struct sock *sk, unsigned short snum,
index 12196ce661d9e288a3d3928ccd66cefff10920a9..d5f59f3fc35df67141c8234a8741ab5b49501ad1 100644 (file)
@@ -45,8 +45,6 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
                   __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
 
-__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
-
 /* IP header + UDP + VXLAN + Ethernet header */
 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
 /* IPv6 header + UDP + VXLAN + Ethernet header */
index 0d8e0f0342dc183acd5393e11bfe203f019ca165..22b7a69619d87446904480342ede77cdb295164c 100644 (file)
@@ -233,6 +233,7 @@ struct in6_flowlabel_req {
 #if 0  /* not yet */
 #define IPV6_USE_MIN_MTU       63
 #endif
+#define IPV6_AUTOFLOWLABEL     64
 
 /*
  * Netfilter (1)
index ad2ac3c003988741c066c2bb467c2abc4a523c5a..9d0223b16b46f935db039d58154942725c990f08 100644 (file)
@@ -385,6 +385,8 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case SIOCGMIIPHY:
        case SIOCGMIIREG:
        case SIOCSMIIREG:
+       case SIOCSHWTSTAMP:
+       case SIOCGHWTSTAMP:
                if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
                        err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
                break;
index fc47baa888c54896c6ccde6352202736d3c9ba6b..f40cb0436eba1ece028c9852d5f69b9c504f62b4 100644 (file)
@@ -900,32 +900,24 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
 
        bat_kobj = &bat_priv->soft_iface->dev.kobj;
 
-       uevent_env[0] = kmalloc(strlen(BATADV_UEV_TYPE_VAR) +
-                               strlen(batadv_uev_type_str[type]) + 1,
-                               GFP_ATOMIC);
+       uevent_env[0] = kasprintf(GFP_ATOMIC,
+                                 "%s%s", BATADV_UEV_TYPE_VAR,
+                                 batadv_uev_type_str[type]);
        if (!uevent_env[0])
                goto out;
 
-       sprintf(uevent_env[0], "%s%s", BATADV_UEV_TYPE_VAR,
-               batadv_uev_type_str[type]);
-
-       uevent_env[1] = kmalloc(strlen(BATADV_UEV_ACTION_VAR) +
-                               strlen(batadv_uev_action_str[action]) + 1,
-                               GFP_ATOMIC);
+       uevent_env[1] = kasprintf(GFP_ATOMIC,
+                                 "%s%s", BATADV_UEV_ACTION_VAR,
+                                 batadv_uev_action_str[action]);
        if (!uevent_env[1])
                goto out;
 
-       sprintf(uevent_env[1], "%s%s", BATADV_UEV_ACTION_VAR,
-               batadv_uev_action_str[action]);
-
        /* If the event is DEL, ignore the data field */
        if (action != BATADV_UEV_DEL) {
-               uevent_env[2] = kmalloc(strlen(BATADV_UEV_DATA_VAR) +
-                                       strlen(data) + 1, GFP_ATOMIC);
+               uevent_env[2] = kasprintf(GFP_ATOMIC,
+                                         "%s%s", BATADV_UEV_DATA_VAR, data);
                if (!uevent_env[2])
                        goto out;
-
-               sprintf(uevent_env[2], "%s%s", BATADV_UEV_DATA_VAR, data);
        }
 
        ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
index c2b53c1b21d2aa14039198639425f83aaf4eebfd..5f362c1d03322692da59509c7d594f72255330b8 100644 (file)
@@ -80,6 +80,8 @@ ip:
        case htons(ETH_P_IPV6): {
                const struct ipv6hdr *iph;
                struct ipv6hdr _iph;
+               __be32 flow_label;
+
 ipv6:
                iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
                if (!iph)
@@ -89,6 +91,21 @@ ipv6:
                flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
                flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
                nhoff += sizeof(struct ipv6hdr);
+
+               flow_label = ip6_flowlabel(iph);
+               if (flow_label) {
+                       /* Awesome, IPv6 packet has a flow label so we can
+                        * use that to represent the ports without any
+                        * further dissection.
+                        */
+                       flow->n_proto = proto;
+                       flow->ip_proto = ip_proto;
+                       flow->ports = flow_label;
+                       flow->thoff = (u16)nhoff;
+
+                       return true;
+               }
+
                break;
        }
        case htons(ETH_P_8021AD):
@@ -196,12 +213,33 @@ static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
        return jhash_3words(a, b, c, hashrnd);
 }
 
-static __always_inline u32 __flow_hash_1word(u32 a)
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
 {
-       __flow_hash_secret_init();
-       return jhash_1word(a, hashrnd);
+       u32 hash;
+
+       /* get a consistent hash (same value on both flow directions) */
+       if (((__force u32)keys->dst < (__force u32)keys->src) ||
+           (((__force u32)keys->dst == (__force u32)keys->src) &&
+            ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) {
+               swap(keys->dst, keys->src);
+               swap(keys->port16[0], keys->port16[1]);
+       }
+
+       hash = __flow_hash_3words((__force u32)keys->dst,
+                                 (__force u32)keys->src,
+                                 (__force u32)keys->ports);
+       if (!hash)
+               hash = 1;
+
+       return hash;
 }
 
+u32 flow_hash_from_keys(struct flow_keys *keys)
+{
+       return __flow_hash_from_keys(keys);
+}
+EXPORT_SYMBOL(flow_hash_from_keys);
+
 /*
  * __skb_get_hash: calculate a flow hash based on src/dst addresses
  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
@@ -211,7 +249,6 @@ static __always_inline u32 __flow_hash_1word(u32 a)
 void __skb_get_hash(struct sk_buff *skb)
 {
        struct flow_keys keys;
-       u32 hash;
 
        if (!skb_flow_dissect(skb, &keys))
                return;
@@ -219,21 +256,9 @@ void __skb_get_hash(struct sk_buff *skb)
        if (keys.ports)
                skb->l4_hash = 1;
 
-       /* get a consistent hash (same value on both flow directions) */
-       if (((__force u32)keys.dst < (__force u32)keys.src) ||
-           (((__force u32)keys.dst == (__force u32)keys.src) &&
-            ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
-               swap(keys.dst, keys.src);
-               swap(keys.port16[0], keys.port16[1]);
-       }
-
-       hash = __flow_hash_3words((__force u32)keys.dst,
-                                 (__force u32)keys.src,
-                                 (__force u32)keys.ports);
-       if (!hash)
-               hash = 1;
+       skb->sw_hash = 1;
 
-       skb->hash = hash;
+       skb->hash = __flow_hash_from_keys(&keys);
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
@@ -241,7 +266,7 @@ EXPORT_SYMBOL(__skb_get_hash);
  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  * to be used as a distribution range.
  */
-u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
                  unsigned int num_tx_queues)
 {
        u32 hash;
@@ -261,13 +286,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
                qcount = dev->tc_to_txq[tc].count;
        }
 
-       if (skb->sk && skb->sk->sk_hash)
-               hash = skb->sk->sk_hash;
-       else
-               hash = (__force u16) skb->protocol;
-       hash = __flow_hash_1word(hash);
-
-       return (u16) (((u64) hash * qcount) >> 32) + qoffset;
+       return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset;
 }
 EXPORT_SYMBOL(__skb_tx_hash);
 
@@ -339,17 +358,10 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
                if (map) {
                        if (map->len == 1)
                                queue_index = map->queues[0];
-                       else {
-                               u32 hash;
-                               if (skb->sk && skb->sk->sk_hash)
-                                       hash = skb->sk->sk_hash;
-                               else
-                                       hash = (__force u16) skb->protocol ^
-                                           skb->hash;
-                               hash = __flow_hash_1word(hash);
+                       else
                                queue_index = map->queues[
-                                   ((u64)hash * map->len) >> 32];
-                       }
+                                   ((u64)skb_get_hash(skb) * map->len) >> 32];
+
                        if (unlikely(queue_index >= dev->real_num_tx_queues))
                                queue_index = -1;
                }
index d3027a73fd4bbc152f13011cb09335af365f19dc..12ab7b4be60932700901f4f3fb9d159f3ddb7ebd 100644 (file)
  * test_8021q:
  *   jneq #0x8100, test_ieee1588   ; ETH_P_8021Q ?
  *   ldh [16]                      ; load inner type
- *   jneq #0x88f7, drop_ieee1588   ; ETH_P_1588 ?
+ *   jneq #0x88f7, test_8021q_ipv4 ; ETH_P_1588 ?
  *   ldb [18]                      ; load payload
  *   and #0x8                      ; as we don't have ports here, test
  *   jneq #0x0, drop_ieee1588      ; for PTP_GEN_BIT and drop these
  *   ldh [18]                      ; reload payload
  *   and #0xf                      ; mask PTP_CLASS_VMASK
- *   or #0x40                      ; PTP_CLASS_V2_VLAN
+ *   or #0x70                      ; PTP_CLASS_VLAN|PTP_CLASS_L2
+ *   ret a                         ; return PTP class
+ *
+ * ; PTP over UDP over IPv4 over 802.1Q over Ethernet
+ * test_8021q_ipv4:
+ *   jneq #0x800, test_8021q_ipv6  ; ETH_P_IP ?
+ *   ldb [27]                      ; load proto
+ *   jneq #17, drop_8021q_ipv4     ; IPPROTO_UDP ?
+ *   ldh [24]                      ; load frag offset field
+ *   jset #0x1fff, drop_8021q_ipv4; don't allow fragments
+ *   ldxb 4*([18]&0xf)             ; load IP header len
+ *   ldh [x + 20]                  ; load UDP dst port
+ *   jneq #319, drop_8021q_ipv4    ; is port PTP_EV_PORT ?
+ *   ldh [x + 26]                  ; load payload
+ *   and #0xf                      ; mask PTP_CLASS_VMASK
+ *   or #0x50                      ; PTP_CLASS_VLAN|PTP_CLASS_IPV4
+ *   ret a                         ; return PTP class
+ *   drop_8021q_ipv4: ret #0x0     ; PTP_CLASS_NONE
+ *
+ * ; PTP over UDP over IPv6 over 802.1Q over Ethernet
+ * test_8021q_ipv6:
+ *   jneq #0x86dd, drop_8021q_ipv6 ; ETH_P_IPV6 ?
+ *   ldb [24]                      ; load proto
+ *   jneq #17, drop_8021q_ipv6           ; IPPROTO_UDP ?
+ *   ldh [60]                      ; load UDP dst port
+ *   jneq #319, drop_8021q_ipv6          ; is port PTP_EV_PORT ?
+ *   ldh [66]                      ; load payload
+ *   and #0xf                      ; mask PTP_CLASS_VMASK
+ *   or #0x60                      ; PTP_CLASS_VLAN|PTP_CLASS_IPV6
  *   ret a                         ; return PTP class
+ *   drop_8021q_ipv6: ret #0x0     ; PTP_CLASS_NONE
  *
  * ; PTP over Ethernet
  * test_ieee1588:
@@ -113,16 +142,39 @@ void __init ptp_classifier_init(void)
                { 0x44,  0,  0, 0x00000020 },
                { 0x16,  0,  0, 0x00000000 },
                { 0x06,  0,  0, 0x00000000 },
-               { 0x15,  0,  9, 0x00008100 },
+               { 0x15,  0, 32, 0x00008100 },
                { 0x28,  0,  0, 0x00000010 },
-               { 0x15,  0, 15, 0x000088f7 },
+               { 0x15,  0,  7, 0x000088f7 },
                { 0x30,  0,  0, 0x00000012 },
                { 0x54,  0,  0, 0x00000008 },
-               { 0x15,  0, 12, 0x00000000 },
+               { 0x15,  0, 35, 0x00000000 },
                { 0x28,  0,  0, 0x00000012 },
                { 0x54,  0,  0, 0x0000000f },
-               { 0x44,  0,  0, 0x00000040 },
+               { 0x44,  0,  0, 0x00000070 },
+               { 0x16,  0,  0, 0x00000000 },
+               { 0x15,  0, 12, 0x00000800 },
+               { 0x30,  0,  0, 0x0000001b },
+               { 0x15,  0,  9, 0x00000011 },
+               { 0x28,  0,  0, 0x00000018 },
+               { 0x45,  7,  0, 0x00001fff },
+               { 0xb1,  0,  0, 0x00000012 },
+               { 0x48,  0,  0, 0x00000014 },
+               { 0x15,  0,  4, 0x0000013f },
+               { 0x48,  0,  0, 0x0000001a },
+               { 0x54,  0,  0, 0x0000000f },
+               { 0x44,  0,  0, 0x00000050 },
+               { 0x16,  0,  0, 0x00000000 },
+               { 0x06,  0,  0, 0x00000000 },
+               { 0x15,  0,  8, 0x000086dd },
+               { 0x30,  0,  0, 0x00000018 },
+               { 0x15,  0,  6, 0x00000011 },
+               { 0x28,  0,  0, 0x0000003c },
+               { 0x15,  0,  4, 0x0000013f },
+               { 0x28,  0,  0, 0x00000042 },
+               { 0x54,  0,  0, 0x0000000f },
+               { 0x44,  0,  0, 0x00000060 },
                { 0x16,  0,  0, 0x00000000 },
+               { 0x06,  0,  0, 0x00000000 },
                { 0x15,  0,  7, 0x000088f7 },
                { 0x30,  0,  0, 0x0000000e },
                { 0x54,  0,  0, 0x00000008 },
index 6521dfd8b7c8563aa5cc7ccea8e5d6da05a7a586..a8770391ea5bfc0db85135cf913c84d09f55cedb 100644 (file)
@@ -43,31 +43,22 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
                return;
 
        type = classify(skb);
+       if (type == PTP_CLASS_NONE)
+               return;
+
+       phydev = skb->dev->phydev;
+       if (likely(phydev->drv->txtstamp)) {
+               if (!atomic_inc_not_zero(&sk->sk_refcnt))
+                       return;
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV4:
-       case PTP_CLASS_V2_IPV6:
-       case PTP_CLASS_V2_L2:
-       case PTP_CLASS_V2_VLAN:
-               phydev = skb->dev->phydev;
-               if (likely(phydev->drv->txtstamp)) {
-                       if (!atomic_inc_not_zero(&sk->sk_refcnt))
-                               return;
-
-                       clone = skb_clone(skb, GFP_ATOMIC);
-                       if (!clone) {
-                               sock_put(sk);
-                               return;
-                       }
-
-                       clone->sk = sk;
-                       phydev->drv->txtstamp(phydev, clone, type);
+               clone = skb_clone(skb, GFP_ATOMIC);
+               if (!clone) {
+                       sock_put(sk);
+                       return;
                }
-               break;
-       default:
-               break;
+
+               clone->sk = sk;
+               phydev->drv->txtstamp(phydev, clone, type);
        }
 }
 EXPORT_SYMBOL_GPL(skb_clone_tx_timestamp);
@@ -114,20 +105,12 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
 
        __skb_pull(skb, ETH_HLEN);
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV4:
-       case PTP_CLASS_V2_IPV6:
-       case PTP_CLASS_V2_L2:
-       case PTP_CLASS_V2_VLAN:
-               phydev = skb->dev->phydev;
-               if (likely(phydev->drv->rxtstamp))
-                       return phydev->drv->rxtstamp(phydev, skb, type);
-               break;
-       default:
-               break;
-       }
+       if (type == PTP_CLASS_NONE)
+               return false;
+
+       phydev = skb->dev->phydev;
+       if (likely(phydev->drv->rxtstamp))
+               return phydev->drv->rxtstamp(phydev, skb, type);
 
        return false;
 }
index 211b5686d719679242d6a7b5e0cd65547ae37cd3..511ddeee73539b740023a86e8ed95262d91a38ca 100644 (file)
@@ -3,8 +3,7 @@
  * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
  */
 
-/*
- * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+/* Based on patches from Jon Smirl <jonsmirl@gmail.com>
  * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
 #include <net/ipv6.h>
 #include <net/af_ieee802154.h>
 
-/*
- * Uncompress address function for source and
+/* Uncompress address function for source and
  * destination address(non-multicast).
  *
  * address_mode is sam value or dam value.
  */
 static int uncompress_addr(struct sk_buff *skb,
-                               struct in6_addr *ipaddr, const u8 address_mode,
-                               const u8 *lladdr, const u8 addr_type,
-                               const u8 addr_len)
+                          struct in6_addr *ipaddr, const u8 address_mode,
+                          const u8 *lladdr, const u8 addr_type,
+                          const u8 addr_len)
 {
        bool fail;
 
@@ -140,13 +138,12 @@ static int uncompress_addr(struct sk_buff *skb,
        return 0;
 }
 
-/*
- * Uncompress address function for source context
+/* Uncompress address function for source context
  * based address(non-multicast).
  */
 static int uncompress_context_based_src_addr(struct sk_buff *skb,
-                                               struct in6_addr *ipaddr,
-                                               const u8 sam)
+                                            struct in6_addr *ipaddr,
+                                            const u8 sam)
 {
        switch (sam) {
        case LOWPAN_IPHC_ADDR_00:
@@ -175,13 +172,13 @@ static int uncompress_context_based_src_addr(struct sk_buff *skb,
 }
 
 static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
-               struct net_device *dev, skb_delivery_cb deliver_skb)
+                      struct net_device *dev, skb_delivery_cb deliver_skb)
 {
        struct sk_buff *new;
        int stat;
 
-       new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
-                                                               GFP_ATOMIC);
+       new = skb_copy_expand(skb, sizeof(struct ipv6hdr),
+                             skb_tailroom(skb), GFP_ATOMIC);
        kfree_skb(skb);
 
        if (!new)
@@ -196,7 +193,7 @@ static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
        new->dev = dev;
 
        raw_dump_table(__func__, "raw skb data dump before receiving",
-                       new->data, new->len);
+                      new->data, new->len);
 
        stat = deliver_skb(new, dev);
 
@@ -210,8 +207,8 @@ static int skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr,
  */
 static int
 lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
-               struct in6_addr *ipaddr,
-               const u8 dam)
+                                 struct in6_addr *ipaddr,
+                                 const u8 dam)
 {
        bool fail;
 
@@ -314,8 +311,7 @@ uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
                        fail |= lowpan_fetch_skb(skb, &uh->check, 2);
                }
 
-               /*
-                * UDP lenght needs to be infered from the lower layers
+               /* UDP lenght needs to be infered from the lower layers
                 * here, we obtain the hint from the remaining size of the
                 * frame
                 */
@@ -338,16 +334,17 @@ err:
 static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
 
 int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
-               const u8 *saddr, const u8 saddr_type, const u8 saddr_len,
-               const u8 *daddr, const u8 daddr_type, const u8 daddr_len,
-               u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb)
+                       const u8 *saddr, const u8 saddr_type,
+                       const u8 saddr_len, const u8 *daddr,
+                       const u8 daddr_type, const u8 daddr_len,
+                       u8 iphc0, u8 iphc1, skb_delivery_cb deliver_skb)
 {
        struct ipv6hdr hdr = {};
        u8 tmp, num_context = 0;
        int err;
 
        raw_dump_table(__func__, "raw skb data dump uncompressed",
-                               skb->data, skb->len);
+                      skb->data, skb->len);
 
        /* another if the CID flag is set */
        if (iphc1 & LOWPAN_IPHC_CID) {
@@ -360,8 +357,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
 
        /* Traffic Class and Flow Label */
        switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
-       /*
-        * Traffic Class and FLow Label carried in-line
+       /* Traffic Class and FLow Label carried in-line
         * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
         */
        case 0: /* 00b */
@@ -374,8 +370,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
                                        (hdr.flow_lbl[0] & 0x0f);
                break;
-       /*
-        * Traffic class carried in-line
+       /* Traffic class carried in-line
         * ECN + DSCP (1 byte), Flow Label is elided
         */
        case 2: /* 10b */
@@ -385,8 +380,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                hdr.priority = ((tmp >> 2) & 0x0f);
                hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
                break;
-       /*
-        * Flow Label carried in-line
+       /* Flow Label carried in-line
         * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
         */
        case 1: /* 01b */
@@ -415,9 +409,9 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
        }
 
        /* Hop Limit */
-       if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
+       if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) {
                hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
-       else {
+       else {
                if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
                        goto drop;
        }
@@ -429,12 +423,12 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                /* Source address context based uncompression */
                pr_debug("SAC bit is set. Handle context based source address.\n");
                err = uncompress_context_based_src_addr(
-                               skb, &hdr.saddr, tmp);
+                                               skb, &hdr.saddr, tmp);
        } else {
                /* Source address uncompression */
                pr_debug("source address stateless compression\n");
                err = uncompress_addr(skb, &hdr.saddr, tmp, saddr,
-                                       saddr_type, saddr_len);
+                                     saddr_type, saddr_len);
        }
 
        /* Check on error of previous branch */
@@ -457,9 +451,9 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                }
        } else {
                err = uncompress_addr(skb, &hdr.daddr, tmp, daddr,
-                                       daddr_type, daddr_len);
+                                     daddr_type, daddr_len);
                pr_debug("dest: stateless compression mode %d dest %pI6c\n",
-                       tmp, &hdr.daddr);
+                        tmp, &hdr.daddr);
                if (err)
                        goto drop;
        }
@@ -468,11 +462,11 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
        if (iphc0 & LOWPAN_IPHC_NH_C) {
                struct udphdr uh;
                struct sk_buff *new;
+
                if (uncompress_udp_header(skb, &uh))
                        goto drop;
 
-               /*
-                * replace the compressed UDP head by the uncompressed UDP
+               /* replace the compressed UDP head by the uncompressed UDP
                 * header
                 */
                new = skb_copy_expand(skb, sizeof(struct udphdr),
@@ -489,7 +483,7 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
 
                raw_dump_table(__func__, "raw UDP header dump",
-                                     (u8 *)&uh, sizeof(uh));
+                              (u8 *)&uh, sizeof(uh));
 
                hdr.nexthdr = UIP_PROTO_UDP;
        }
@@ -504,8 +498,8 @@ int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
                hdr.version, ntohs(hdr.payload_len), hdr.nexthdr,
                hdr.hop_limit, &hdr.daddr);
 
-       raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
-                                                       sizeof(hdr));
+       raw_dump_table(__func__, "raw header dump",
+                      (u8 *)&hdr, sizeof(hdr));
 
        return skb_deliver(skb, &hdr, dev, deliver_skb);
 
@@ -516,8 +510,8 @@ drop:
 EXPORT_SYMBOL_GPL(lowpan_process_data);
 
 static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift,
-                               const struct in6_addr *ipaddr,
-                               const unsigned char *lladdr)
+                                 const struct in6_addr *ipaddr,
+                                 const unsigned char *lladdr)
 {
        u8 val = 0;
 
@@ -530,14 +524,14 @@ static u8 lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift,
                *hc06_ptr += 2;
                val = 2; /* 16-bits */
                raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
-                       *hc06_ptr - 2, 2);
+                               *hc06_ptr - 2, 2);
        } else {
                /* do not compress IID => xxxx::IID */
                memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
                *hc06_ptr += 8;
                val = 1; /* 64-bits */
                raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
-                       *hc06_ptr - 8, 8);
+                               *hc06_ptr - 8, 8);
        }
 
        return rol8(val, shift);
@@ -601,8 +595,8 @@ static void compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
 }
 
 int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
-                       unsigned short type, const void *_daddr,
-                       const void *_saddr, unsigned int len)
+                          unsigned short type, const void *_daddr,
+                          const void *_saddr, unsigned int len)
 {
        u8 tmp, iphc0, iphc1, *hc06_ptr;
        struct ipv6hdr *hdr;
@@ -616,14 +610,13 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
 
        pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength  = %d\n"
                 "\tnexthdr = 0x%02x\n\thop_lim = %d\n\tdest    = %pI6c\n",
-               hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
-               hdr->hop_limit, &hdr->daddr);
+                hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
+                hdr->hop_limit, &hdr->daddr);
 
        raw_dump_table(__func__, "raw skb network header dump",
-               skb_network_header(skb), sizeof(struct ipv6hdr));
+                      skb_network_header(skb), sizeof(struct ipv6hdr));
 
-       /*
-        * As we copy some bit-length fields, in the IPHC encoding bytes,
+       /* As we copy some bit-length fields, in the IPHC encoding bytes,
         * we sometimes use |=
         * If the field is 0, and the current bit value in memory is 1,
         * this does not work. We therefore reset the IPHC encoding here
@@ -639,11 +632,10 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
                        (unsigned char *)_daddr, IEEE802154_ADDR_LEN);
 
        raw_dump_table(__func__,
-                       "sending raw skb network uncompressed packet",
-                       skb->data, skb->len);
+                      "sending raw skb network uncompressed packet",
+                      skb->data, skb->len);
 
-       /*
-        * Traffic class, flow label
+       /* Traffic class, flow label
         * If flow label is 0, compress it. If traffic class is 0, compress it
         * We have to process both in the same time as the offset of traffic
         * class depends on the presence of version and flow label
@@ -654,11 +646,11 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
 
        if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
-            (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
+           (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
                /* flow label can be compressed */
                iphc0 |= LOWPAN_IPHC_FL_C;
                if ((hdr->priority == 0) &&
-                  ((hdr->flow_lbl[0] & 0xF0) == 0)) {
+                   ((hdr->flow_lbl[0] & 0xF0) == 0)) {
                        /* compress (elide) all */
                        iphc0 |= LOWPAN_IPHC_TC_C;
                } else {
@@ -669,7 +661,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        } else {
                /* Flow label cannot be compressed */
                if ((hdr->priority == 0) &&
-                  ((hdr->flow_lbl[0] & 0xF0) == 0)) {
+                   ((hdr->flow_lbl[0] & 0xF0) == 0)) {
                        /* compress only traffic class */
                        iphc0 |= LOWPAN_IPHC_TC_C;
                        *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
@@ -695,8 +687,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
                hc06_ptr += 1;
        }
 
-       /*
-        * Hop limit
+       /* Hop limit
         * if 1:   compress, encoding is 01
         * if 64:  compress, encoding is 10
         * if 255: compress, encoding is 11
@@ -793,7 +784,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
        pr_debug("header len %d skb %u\n", (int)(hc06_ptr - head), skb->len);
 
        raw_dump_table(__func__, "raw skb data dump compressed",
-                               skb->data, skb->len);
+                      skb->data, skb->len);
        return 0;
 }
 EXPORT_SYMBOL_GPL(lowpan_header_compress);
index fe6bd7a7108169138faf198fefcab7f3217e9c55..016b77ee88f0e2f1394c0456f67e9e01ec190d9f 100644 (file)
@@ -80,14 +80,14 @@ lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
 static inline void lowpan_address_flip(u8 *src, u8 *dest)
 {
        int i;
+
        for (i = 0; i < IEEE802154_ADDR_LEN; i++)
                (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
 }
 
-static int lowpan_header_create(struct sk_buff *skb,
-                          struct net_device *dev,
-                          unsigned short type, const void *_daddr,
-                          const void *_saddr, unsigned int len)
+static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
+                               unsigned short type, const void *_daddr,
+                               const void *_saddr, unsigned int len)
 {
        const u8 *saddr = _saddr;
        const u8 *daddr = _daddr;
@@ -144,7 +144,7 @@ static int lowpan_header_create(struct sk_buff *skb,
 }
 
 static int lowpan_give_skb_to_devices(struct sk_buff *skb,
-                                       struct net_device *dev)
+                                     struct net_device *dev)
 {
        struct lowpan_dev_record *entry;
        struct sk_buff *skb_cp;
@@ -368,24 +368,28 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
 }
 
 static __le16 lowpan_get_pan_id(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
 }
 
 static __le16 lowpan_get_short_addr(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
 }
 
 static u8 lowpan_get_dsn(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
 }
 
@@ -454,7 +458,7 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
 }
 
 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
-       struct packet_type *pt, struct net_device *orig_dev)
+                     struct packet_type *pt, struct net_device *orig_dev)
 {
        struct ieee802154_hdr hdr;
        int ret;
index 351d9a94ec2faa429612d6b7da26b09b63e49a25..29e0de63001b68930f11eab46e6d444a8ee69b96 100644 (file)
@@ -40,9 +40,7 @@
 
 #include "af802154.h"
 
-/*
- * Utility function for families
- */
+/* Utility function for families */
 struct net_device*
 ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
 {
@@ -87,8 +85,8 @@ ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
                rtnl_unlock();
                break;
        default:
-               pr_warning("Unsupported ieee802154 address type: %d\n",
-                               addr->mode);
+               pr_warn("Unsupported ieee802154 address type: %d\n",
+                       addr->mode);
                break;
        }
 
@@ -106,7 +104,7 @@ static int ieee802154_sock_release(struct socket *sock)
        return 0;
 }
 static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-               struct msghdr *msg, size_t len)
+                                  struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
 
@@ -114,7 +112,7 @@ static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 }
 
 static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
-               int addr_len)
+                               int addr_len)
 {
        struct sock *sk = sock->sk;
 
@@ -125,7 +123,7 @@ static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
 }
 
 static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
-                       int addr_len, int flags)
+                                  int addr_len, int flags)
 {
        struct sock *sk = sock->sk;
 
@@ -139,7 +137,7 @@ static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
 }
 
 static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
-               unsigned int cmd)
+                               unsigned int cmd)
 {
        struct ifreq ifr;
        int ret = -ENOIOCTLCMD;
@@ -167,7 +165,7 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
 }
 
 static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd,
-               unsigned long arg)
+                                unsigned long arg)
 {
        struct sock *sk = sock->sk;
 
@@ -238,8 +236,7 @@ static const struct proto_ops ieee802154_dgram_ops = {
 };
 
 
-/*
- * Create a socket. Initialise the socket, blank the addresses
+/* Create a socket. Initialise the socket, blank the addresses
  * set the state.
  */
 static int ieee802154_create(struct net *net, struct socket *sock,
@@ -301,13 +298,14 @@ static const struct net_proto_family ieee802154_family_ops = {
 };
 
 static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
-       struct packet_type *pt, struct net_device *orig_dev)
+                         struct packet_type *pt, struct net_device *orig_dev)
 {
        if (!netif_running(dev))
                goto drop;
        pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
 #ifdef DEBUG
-       print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
+       print_hex_dump_bytes("ieee802154_rcv ",
+                            DUMP_PREFIX_NONE, skb->data, skb->len);
 #endif
 
        if (!net_eq(dev_net(dev), &init_net))
index 4f0ed8780194502465f0d5b60383bf6794bfadf0..ef2ad8aaef1361f2e061519dd773cd0f03dd4b30 100644 (file)
@@ -149,8 +149,7 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
                if (skb != NULL) {
-                       /*
-                        * We will only return the amount
+                       /* We will only return the amount
                         * of this packet since that is all
                         * that will be read.
                         */
@@ -161,12 +160,13 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
        }
 
        }
+
        return -ENOIOCTLCMD;
 }
 
 /* FIXME: autobind */
 static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
-                       int len)
+                        int len)
 {
        struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
        struct dgram_sock *ro = dgram_sk(sk);
@@ -205,7 +205,7 @@ static int dgram_disconnect(struct sock *sk, int flags)
 }
 
 static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
-               struct msghdr *msg, size_t size)
+                        struct msghdr *msg, size_t size)
 {
        struct net_device *dev;
        unsigned int mtu;
@@ -248,8 +248,8 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk, hlen + tlen + size,
-                       msg->msg_flags & MSG_DONTWAIT,
-                       &err);
+                                 msg->msg_flags & MSG_DONTWAIT,
+                                 &err);
        if (!skb)
                goto out_dev;
 
@@ -262,7 +262,8 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        cb->ackreq = ro->want_ack;
 
        if (msg->msg_name) {
-               DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
+               DECLARE_SOCKADDR(struct sockaddr_ieee802154*,
+                                daddr, msg->msg_name);
 
                ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
        } else {
@@ -304,8 +305,8 @@ out:
 }
 
 static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
-               struct msghdr *msg, size_t len, int noblock, int flags,
-               int *addr_len)
+                        struct msghdr *msg, size_t len, int noblock,
+                        int flags, int *addr_len)
 {
        size_t copied = 0;
        int err = -EOPNOTSUPP;
@@ -398,6 +399,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
                                          dgram_sk(sk))) {
                        if (prev) {
                                struct sk_buff *clone;
+
                                clone = skb_clone(skb, GFP_ATOMIC);
                                if (clone)
                                        dgram_rcv_skb(prev, clone);
@@ -407,9 +409,9 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
                }
        }
 
-       if (prev)
+       if (prev) {
                dgram_rcv_skb(prev, skb);
-       else {
+       else {
                kfree_skb(skb);
                ret = NET_RX_DROP;
        }
@@ -419,7 +421,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
 }
 
 static int dgram_getsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int __user *optlen)
+                           char __user *optval, int __user *optlen)
 {
        struct dgram_sock *ro = dgram_sk(sk);
 
@@ -463,7 +465,7 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
 }
 
 static int dgram_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen)
+                           char __user *optval, unsigned int optlen)
 {
        struct dgram_sock *ro = dgram_sk(sk);
        struct net *net = sock_net(sk);
index 8b83a231299e46a0668b3fe329803fa1a6154791..5d352f86979e40b191e08d4bb1d63100560fd1b6 100644 (file)
@@ -43,7 +43,7 @@ struct genl_info;
 struct sk_buff *ieee802154_nl_create(int flags, u8 req);
 int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group);
 struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
-               int flags, u8 req);
+                                       int flags, u8 req);
 int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info);
 
 extern struct genl_family nl802154_family;
index 26efcf4fd2ff72079a678ef3a4dbd0ae887848e1..9222966f5e6d7438183825ffe306a3c4ab502b10 100644 (file)
@@ -52,7 +52,7 @@ struct sk_buff *ieee802154_nl_create(int flags, u8 req)
 
        spin_lock_irqsave(&ieee802154_seq_lock, f);
        hdr = genlmsg_put(msg, 0, ieee802154_seq_num++,
-                       &nl802154_family, flags, req);
+                         &nl802154_family, flags, req);
        spin_unlock_irqrestore(&ieee802154_seq_lock, f);
        if (!hdr) {
                nlmsg_free(msg);
@@ -86,7 +86,7 @@ struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
                return NULL;
 
        hdr = genlmsg_put_reply(msg, info,
-                       &nl802154_family, flags, req);
+                               &nl802154_family, flags, req);
        if (!hdr) {
                nlmsg_free(msg);
                return NULL;
index a3281b8bfd5bf1fa24bd05a351b476031776797f..c6bfe22bfa5ebedff765e6dcd4bf7bebd26d0b49 100644 (file)
@@ -60,7 +60,8 @@ static __le16 nla_get_shortaddr(const struct nlattr *nla)
 }
 
 int ieee802154_nl_assoc_indic(struct net_device *dev,
-               struct ieee802154_addr *addr, u8 cap)
+                             struct ieee802154_addr *addr,
+                             u8 cap)
 {
        struct sk_buff *msg;
 
@@ -93,7 +94,7 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
 
 int ieee802154_nl_assoc_confirm(struct net_device *dev, __le16 short_addr,
-               u8 status)
+                               u8 status)
 {
        struct sk_buff *msg;
 
@@ -119,7 +120,8 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_assoc_confirm);
 
 int ieee802154_nl_disassoc_indic(struct net_device *dev,
-               struct ieee802154_addr *addr, u8 reason)
+                                struct ieee802154_addr *addr,
+                                u8 reason)
 {
        struct sk_buff *msg;
 
@@ -205,8 +207,9 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
 
 int ieee802154_nl_scan_confirm(struct net_device *dev,
-               u8 status, u8 scan_type, u32 unscanned, u8 page,
-               u8 *edl/* , struct list_head *pan_desc_list */)
+                              u8 status, u8 scan_type,
+                              u32 unscanned, u8 page,
+                              u8 *edl/* , struct list_head *pan_desc_list */)
 {
        struct sk_buff *msg;
 
@@ -260,7 +263,7 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_start_confirm);
 
 static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
-       u32 seq, int flags, struct net_device *dev)
+                                   u32 seq, int flags, struct net_device *dev)
 {
        void *hdr;
        struct wpan_phy *phy;
@@ -270,7 +273,7 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
        pr_debug("%s\n", __func__);
 
        hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
-               IEEE802154_LIST_IFACE);
+                         IEEE802154_LIST_IFACE);
        if (!hdr)
                goto out;
 
@@ -330,14 +333,16 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
 
        if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
                char name[IFNAMSIZ + 1];
+
                nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
-                               sizeof(name));
+                           sizeof(name));
                dev = dev_get_by_name(&init_net, name);
-       } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
+       } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) {
                dev = dev_get_by_index(&init_net,
                        nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
-       else
+       } else {
                return NULL;
+       }
 
        if (!dev)
                return NULL;
@@ -435,7 +440,7 @@ int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
        int ret = -EOPNOTSUPP;
 
        if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] &&
-               !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
+           !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
            !info->attrs[IEEE802154_ATTR_REASON])
                return -EINVAL;
 
@@ -464,8 +469,7 @@ out:
        return ret;
 }
 
-/*
- * PANid, channel, beacon_order = 15, superframe_order = 15,
+/* PANid, channel, beacon_order = 15, superframe_order = 15,
  * PAN_coordinator, battery_life_extension = 0,
  * coord_realignment = 0, security_enable = 0
 */
@@ -559,8 +563,8 @@ int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
                page = 0;
 
 
-       ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page,
-                       duration);
+       ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels,
+                                                page, duration);
 
 out:
        dev_put(dev);
@@ -570,7 +574,8 @@ out:
 int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info)
 {
        /* Request for interface name, index, type, IEEE address,
-          PAN Id, short address */
+        * PAN Id, short address
+        */
        struct sk_buff *msg;
        struct net_device *dev = NULL;
        int rc = -ENOBUFS;
@@ -586,7 +591,7 @@ int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info)
                goto out_dev;
 
        rc = ieee802154_nl_fill_iface(msg, info->snd_portid, info->snd_seq,
-                       0, dev);
+                                     0, dev);
        if (rc < 0)
                goto out_free;
 
@@ -598,7 +603,6 @@ out_free:
 out_dev:
        dev_put(dev);
        return rc;
-
 }
 
 int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
@@ -616,7 +620,8 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
                        goto cont;
 
                if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
-                       cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
+                                            cb->nlh->nlmsg_seq,
+                                            NLM_F_MULTI, dev) < 0)
                        break;
 cont:
                idx++;
@@ -765,6 +770,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
        case IEEE802154_SCF_KEY_SHORT_INDEX:
        {
                u32 source = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]);
+
                desc->short_source = cpu_to_le32(source);
                break;
        }
@@ -842,7 +848,7 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
                goto out_dev;
 
        hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0,
-               IEEE802154_LLSEC_GETPARAMS);
+                         IEEE802154_LLSEC_GETPARAMS);
        if (!hdr)
                goto out_free;
 
@@ -946,7 +952,7 @@ struct llsec_dump_data {
 
 static int
 ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
-                           int (*step)(struct llsec_dump_data*))
+                           int (*step)(struct llsec_dump_data *))
 {
        struct net *net = sock_net(skb->sk);
        struct net_device *dev;
index 89b265aea151eaf7f301c28f391d064b7d22edfc..972baf83411af7c64a6f11826ff7b1ab61b43ec4 100644 (file)
@@ -36,7 +36,7 @@
 #include "ieee802154.h"
 
 static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
-       u32 seq, int flags, struct wpan_phy *phy)
+                                 u32 seq, int flags, struct wpan_phy *phy)
 {
        void *hdr;
        int i, pages = 0;
@@ -48,7 +48,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
                return -EMSGSIZE;
 
        hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
-               IEEE802154_LIST_PHY);
+                         IEEE802154_LIST_PHY);
        if (!hdr)
                goto out;
 
@@ -80,7 +80,8 @@ out:
 int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
 {
        /* Request for interface name, index, type, IEEE address,
-          PAN Id, short address */
+        * PAN Id, short address
+        */
        struct sk_buff *msg;
        struct wpan_phy *phy;
        const char *name;
@@ -105,7 +106,7 @@ int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
                goto out_dev;
 
        rc = ieee802154_nl_fill_phy(msg, info->snd_portid, info->snd_seq,
-                       0, phy);
+                                   0, phy);
        if (rc < 0)
                goto out_free;
 
@@ -117,7 +118,6 @@ out_free:
 out_dev:
        wpan_phy_put(phy);
        return rc;
-
 }
 
 struct dump_phy_data {
@@ -137,10 +137,10 @@ static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
                return 0;
 
        rc = ieee802154_nl_fill_phy(data->skb,
-                       NETLINK_CB(data->cb->skb).portid,
-                       data->cb->nlh->nlmsg_seq,
-                       NLM_F_MULTI,
-                       phy);
+                                   NETLINK_CB(data->cb->skb).portid,
+                                   data->cb->nlh->nlmsg_seq,
+                                   NLM_F_MULTI,
+                                   phy);
 
        if (rc < 0) {
                data->idx--;
@@ -238,10 +238,9 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
 
                addr.sa_family = ARPHRD_IEEE802154;
                nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR],
-                               IEEE802154_ADDR_LEN);
+                          IEEE802154_ADDR_LEN);
 
-               /*
-                * strangely enough, some callbacks (inetdev_event) from
+               /* strangely enough, some callbacks (inetdev_event) from
                 * dev_set_mac_address require RTNL_LOCK
                 */
                rtnl_lock();
index 74d54fae33d74a58ca2628f7547250906bea663e..9d1f64806f02127808986a84b36fae92e33df4a2 100644 (file)
@@ -96,7 +96,7 @@ out:
 }
 
 static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
-                       int addr_len)
+                      int addr_len)
 {
        return -ENOTSUPP;
 }
@@ -106,8 +106,8 @@ static int raw_disconnect(struct sock *sk, int flags)
        return 0;
 }
 
-static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t size)
+static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
+                      struct msghdr *msg, size_t size)
 {
        struct net_device *dev;
        unsigned int mtu;
@@ -145,7 +145,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk, hlen + tlen + size,
-                       msg->msg_flags & MSG_DONTWAIT, &err);
+                                 msg->msg_flags & MSG_DONTWAIT, &err);
        if (!skb)
                goto out_dev;
 
@@ -235,7 +235,6 @@ void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
                bh_lock_sock(sk);
                if (!sk->sk_bound_dev_if ||
                    sk->sk_bound_dev_if == dev->ifindex) {
-
                        struct sk_buff *clone;
 
                        clone = skb_clone(skb, GFP_ATOMIC);
@@ -248,13 +247,13 @@ void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
 }
 
 static int raw_getsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int __user *optlen)
+                         char __user *optval, int __user *optlen)
 {
        return -EOPNOTSUPP;
 }
 
 static int raw_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        return -EOPNOTSUPP;
 }
@@ -274,4 +273,3 @@ struct proto ieee802154_raw_prot = {
        .getsockopt     = raw_getsockopt,
        .setsockopt     = raw_setsockopt,
 };
-
index 6f1428c4870b11e9bab087c54d414079514caf8a..b85bd3f7048e7bdb7e44d06c0b49b4d2a22724f5 100644 (file)
@@ -378,6 +378,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
        fq = fq_find(net, frag_info, &source, &dest);
        if (fq != NULL) {
                int ret;
+
                spin_lock(&fq->q.lock);
                ret = lowpan_frag_queue(fq, skb, frag_type);
                spin_unlock(&fq->q.lock);
index 8d6f6704da84e8b95745cedb7040230cb5b96cb1..4955e0fe5883ae705edb822b62aab36c93e7e646 100644 (file)
@@ -48,7 +48,8 @@ MASTER_SHOW(transmit_power, "%d +- 1 dB");
 MASTER_SHOW(cca_mode, "%d");
 
 static ssize_t channels_supported_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+                                      struct device_attribute *attr,
+                                      char *buf)
 {
        struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
        int ret;
@@ -57,7 +58,7 @@ static ssize_t channels_supported_show(struct device *dev,
        mutex_lock(&phy->pib_lock);
        for (i = 0; i < 32; i++) {
                ret = snprintf(buf + len, PAGE_SIZE - len,
-                               "%#09x\n", phy->channels_supported[i]);
+                              "%#09x\n", phy->channels_supported[i]);
                if (ret < 0)
                        break;
                len += ret;
@@ -80,6 +81,7 @@ ATTRIBUTE_GROUPS(pmib);
 static void wpan_phy_release(struct device *d)
 {
        struct wpan_phy *phy = container_of(d, struct wpan_phy, dev);
+
        kfree(phy);
 }
 
@@ -121,11 +123,12 @@ static int wpan_phy_iter(struct device *dev, void *_data)
 {
        struct wpan_phy_iter_data *wpid = _data;
        struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
+
        return wpid->fn(phy, wpid->data);
 }
 
 int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data),
-               void *data)
+                     void *data)
 {
        struct wpan_phy_iter_data wpid = {
                .fn = fn,
@@ -197,6 +200,7 @@ EXPORT_SYMBOL(wpan_phy_free);
 static int __init wpan_phy_class_init(void)
 {
        int rc;
+
        rc = class_register(&wpan_phy_class);
        if (rc)
                goto err;
index a3095fdefbed98ed4e320ac6c44ea3e18241d1a4..90c0e8386116177f4bbf412f2175aec93c64870c 100644 (file)
@@ -76,6 +76,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        inet->inet_daddr = fl4->daddr;
        inet->inet_dport = usin->sin_port;
        sk->sk_state = TCP_ESTABLISHED;
+       inet_set_txhash(sk);
        inet->inet_id = jiffies;
 
        sk_dst_set(sk, &rt->dst);
index 5dfebd2f2e382269f5327cef7f53ea1bc68a95fc..1edc739b9da59e29decbc304e9c5dd037634bf09 100644 (file)
@@ -208,6 +208,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        inet->inet_dport = usin->sin_port;
        inet->inet_daddr = daddr;
 
+       inet_set_txhash(sk);
+
        inet_csk(sk)->icsk_ext_hdr_len = 0;
        if (inet_opt)
                inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -838,8 +840,6 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                                            ireq->ir_rmt_addr,
                                            ireq->opt);
                err = net_xmit_eval(err);
-               if (!tcp_rsk(req)->snt_synack && !err)
-                       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        }
 
        return err;
@@ -1336,6 +1336,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newinet->mc_ttl       = ip_hdr(skb)->ttl;
        newinet->rcv_tos      = ip_hdr(skb)->tos;
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
+       inet_set_txhash(newsk);
        if (inet_opt)
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
        newinet->inet_id = newtp->write_seq ^ jiffies;
index f8f2a944a1ce091f216d7ca7968667419fe6ed0a..bcee13c4627cc29deb17695454c0413d2b0b87ff 100644 (file)
@@ -916,6 +916,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        skb_orphan(skb);
        skb->sk = sk;
        skb->destructor = tcp_wfree;
+       skb_set_hash_from_sk(skb, sk);
        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
        /* Build TCP header and checksum it. */
index a426cd7099bbba73831839f4f22395960114e99f..2daa3a133e498cdccfe5695ee62db7c72da8ce25 100644 (file)
@@ -765,6 +765,7 @@ static int __net_init inet6_net_init(struct net *net)
        net->ipv6.sysctl.bindv6only = 0;
        net->ipv6.sysctl.icmpv6_time = 1*HZ;
        net->ipv6.sysctl.flowlabel_consistency = 1;
+       net->ipv6.sysctl.auto_flowlabels = 0;
        atomic_set(&net->ipv6.rt_genid, 0);
 
        err = ipv6_init_mibs(net);
index c3bf2d2e519ea3ec86c07c43dffaff85fad80b5e..2753319524f1acabb34a0520ea29ee361c1dfe9e 100644 (file)
@@ -199,6 +199,7 @@ ipv4_connected:
                      NULL);
 
        sk->sk_state = TCP_ESTABLISHED;
+       ip6_set_txhash(sk);
 out:
        fl6_sock_release(flowlabel);
        return err;
index 3873181ed85614a28f9857d7d53acf2be9d2b9fb..365b2b6f3942cdcdb8794bed6dcfa2e4477466b9 100644 (file)
@@ -723,7 +723,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
         *      Push down and install the IP header.
         */
        ipv6h = ipv6_hdr(skb);
-       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
+       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
        ipv6h->hop_limit = tunnel->parms.hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
@@ -1174,7 +1175,9 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
        struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
        __be16 *p = (__be16 *)(ipv6h+1);
 
-       ip6_flow_hdr(ipv6h, 0, t->fl.u.ip6.flowlabel);
+       ip6_flow_hdr(ipv6h, 0,
+                    ip6_make_flowlabel(dev_net(dev), skb,
+                                       t->fl.u.ip6.flowlabel, false));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = NEXTHDR_GRE;
        ipv6h->saddr = t->parms.laddr;
index cb9df0eb40237065e696dd1013180b6c82a9c2dc..fa83bdd4c3dddd18a40eeea020bdad3cfd534162 100644 (file)
@@ -205,7 +205,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        if (hlimit < 0)
                hlimit = ip6_dst_hoplimit(dst);
 
-       ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
+       ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
+                                                    np->autoflowlabel));
 
        hdr->payload_len = htons(seg_len);
        hdr->nexthdr = proto;
@@ -1569,7 +1570,9 @@ int ip6_push_pending_frames(struct sock *sk)
        skb_reset_network_header(skb);
        hdr = ipv6_hdr(skb);
 
-       ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
+       ip6_flow_hdr(hdr, np->cork.tclass,
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel,
+                                       np->autoflowlabel));
        hdr->hop_limit = np->cork.hop_limit;
        hdr->nexthdr = proto;
        hdr->saddr = fl6->saddr;
index afa082458360216ff33012ee43e93354888e44b9..51a1eb185ea7b6e255ecdb454eb12ed83875739d 100644 (file)
@@ -1046,7 +1046,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        skb_push(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        ipv6h = ipv6_hdr(skb);
-       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
+       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
index cc34f65179e4b771438439d976c03467dc493c24..b50b9e54cf531cf7ec4b327b650856b72ff677c5 100644 (file)
@@ -834,6 +834,10 @@ pref_skip_coa:
                np->dontfrag = valbool;
                retv = 0;
                break;
+       case IPV6_AUTOFLOWLABEL:
+               np->autoflowlabel = valbool;
+               retv = 0;
+               break;
        }
 
        release_sock(sk);
@@ -1273,6 +1277,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                val = np->dontfrag;
                break;
 
+       case IPV6_AUTOFLOWLABEL:
+               val = np->autoflowlabel;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
index 058f3eca2e53efd1fe016cfe8450d3ab0a9c13b1..5bf7b61f8ae8bc9403b9ea8b705f73d09eaccac5 100644 (file)
@@ -38,6 +38,13 @@ static struct ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "auto_flowlabels",
+               .data           = &init_net.ipv6.sysctl.auto_flowlabels,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        {
                .procname       = "fwmark_reflect",
                .data           = &init_net.ipv6.sysctl.fwmark_reflect,
@@ -74,6 +81,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
        ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
        ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply;
        ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
+       ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
 
        ipv6_route_table = ipv6_route_sysctl_init(net);
        if (!ipv6_route_table)
index bc24ee21339a4a6c10e2f314e1a95ce78e26351c..22055b098428df812ca0dfabe3e39420b650d321 100644 (file)
@@ -198,6 +198,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        sk->sk_v6_daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
+       ip6_set_txhash(sk);
+
        /*
         *      TCP over IPv4
         */
@@ -498,8 +500,6 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
                skb_set_queue_mapping(skb, queue_mapping);
                err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
                err = net_xmit_eval(err);
-               if (!tcp_rsk(req)->snt_synack && !err)
-                       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        }
 
 done:
@@ -1134,6 +1134,8 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
        newsk->sk_bound_dev_if = ireq->ir_iif;
 
+       ip6_set_txhash(newsk);
+
        /* Now IPv6 options...
 
           First: no IPv4 options.
index 2cf66d885e68f38360cfb1ced1a08a924b342d22..c4d4568611ca7f43a018cccf8f16fcc4464930ed 100644 (file)
@@ -143,6 +143,7 @@ static void
 mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev)
 {
        struct mac802154_sub_if_data *sdata;
+
        ASSERT_RTNL();
 
        sdata = netdev_priv(dev);
@@ -276,7 +277,8 @@ ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
        }
 
        priv = wpan_phy_priv(phy);
-       priv->hw.phy = priv->phy = phy;
+       priv->phy = phy;
+       priv->hw.phy = priv->phy;
        priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN);
        priv->ops = ops;
 
index 1456f73b02b9f8ae92909d84ee408d0c6ca93987..457058142098376bb9731600a9c162aaf4247b64 100644 (file)
@@ -538,6 +538,7 @@ static int llsec_recover_addr(struct mac802154_llsec *sec,
                              struct ieee802154_addr *addr)
 {
        __le16 caddr = sec->params.coord_shortaddr;
+
        addr->pan_id = sec->params.pan_id;
 
        if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
index 15aa2f2b03a78c29138db43c08073c4ba2817e54..868a040fd422ea6e877cc7df036b425f8b62008e 100644 (file)
@@ -175,9 +175,9 @@ static void phy_chan_notify(struct work_struct *work)
 
        mutex_lock(&priv->hw->phy->pib_lock);
        res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan);
-       if (res)
+       if (res) {
                pr_debug("set_channel failed\n");
-       else {
+       else {
                priv->hw->phy->current_channel = priv->chan;
                priv->hw->phy->current_page = priv->page;
        }
@@ -210,8 +210,9 @@ void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
                INIT_WORK(&work->work, phy_chan_notify);
                work->dev = dev;
                queue_work(priv->hw->dev_workqueue, &work->work);
-       } else
+       } else {
                mutex_unlock(&priv->hw->phy->pib_lock);
+       }
 }
 
 
index 6d1647399d4fefb4584a7bdd2c8ac5adf9b05702..8124353646ae64239556a0ed915f046f7e689864 100644 (file)
@@ -98,6 +98,7 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
        if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
                u16 crc = crc_ccitt(0, skb->data, skb->len);
                u8 *data = skb_put(skb, 2);
+
                data[0] = crc & 0xff;
                data[1] = crc >> 8;
        }
index 15c731f03fa664a64f7bf3cdde36cf1a8e4150b6..e8c9f97042163f0297f638a8ce87e2ba6ad00edf 100644 (file)
@@ -1961,25 +1961,25 @@ struct netlink_broadcast_data {
        void *tx_data;
 };
 
-static int do_one_broadcast(struct sock *sk,
-                                  struct netlink_broadcast_data *p)
+static void do_one_broadcast(struct sock *sk,
+                                   struct netlink_broadcast_data *p)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
        int val;
 
        if (p->exclude_sk == sk)
-               goto out;
+               return;
 
        if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
            !test_bit(p->group - 1, nlk->groups))
-               goto out;
+               return;
 
        if (!net_eq(sock_net(sk), p->net))
-               goto out;
+               return;
 
        if (p->failure) {
                netlink_overrun(sk);
-               goto out;
+               return;
        }
 
        sock_hold(sk);
@@ -2017,9 +2017,6 @@ static int do_one_broadcast(struct sock *sk,
                p->skb2 = NULL;
        }
        sock_put(sk);
-
-out:
-       return 0;
 }
 
 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
index 0edbd95c60e73abfba45a145978ac1d829bb1321..d8b7e247bebff5c5f4c3b83345ca5e4f492be60b 100644 (file)
@@ -143,8 +143,6 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
        struct rtable *rt;
        struct flowi4 fl;
        __be16 src_port;
-       int port_min;
-       int port_max;
        __be16 df;
        int err;
 
@@ -172,8 +170,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
 
        skb->ignore_df = 1;
 
-       inet_get_local_port_range(net, &port_min, &port_max);
-       src_port = vxlan_src_port(port_min, port_max, skb);
+       src_port = udp_flow_src_port(net, skb, 0, 0, true);
 
        err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
                             fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst,
index 12c7e01c267711ef19878f6c1da1e66e257ecf7e..2e9ada10fd846c10bc6281c30f29cf1fed02ca3c 100644 (file)
@@ -424,8 +424,9 @@ static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
                                   void __user *buffer, size_t *lenp,
                                   loff_t *ppos)
 {
-       pr_warn_once("Changing rto_alpha or rto_beta may lead to "
-                    "suboptimal rtt/srtt estimations!\n");
+       if (write)
+               pr_warn_once("Changing rto_alpha or rto_beta may lead to "
+                            "suboptimal rtt/srtt estimations!\n");
 
        return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
 }
index 7dd672fa651f979ae6b93578fc678254a7317b6b..b10e047bbd15548b405393837047b437e4802d26 100644 (file)
@@ -594,15 +594,16 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
 }
 
 /* What is the next timeout value for this transport? */
-unsigned long sctp_transport_timeout(struct sctp_transport *t)
+unsigned long sctp_transport_timeout(struct sctp_transport *trans)
 {
-       unsigned long timeout;
-       timeout = t->rto + sctp_jitter(t->rto);
-       if ((t->state != SCTP_UNCONFIRMED) &&
-           (t->state != SCTP_PF))
-               timeout += t->hbinterval;
-       timeout += jiffies;
-       return timeout;
+       /* RTO + timer slack +/- 50% of RTO */
+       unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto);
+
+       if (trans->state != SCTP_UNCONFIRMED &&
+           trans->state != SCTP_PF)
+               timeout += trans->hbinterval;
+
+       return timeout + jiffies;
 }
 
 /* Reset transport variables to their initial values */
index a081e7d08d22aefec08f2e661cbdee29c03e1502..a235b245f682bb8acbbf94b74334bada8040ff4f 100644 (file)
@@ -88,6 +88,8 @@ static void link_print(struct tipc_link *l_ptr, const char *str);
 static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
 static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf);
+static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf);
 
 /*
  *  Simple link routines
@@ -1420,11 +1422,6 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                if (unlikely(!list_empty(&l_ptr->waiting_ports)))
                        tipc_link_wakeup_ports(l_ptr, 0);
 
-               if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
-                       l_ptr->stats.sent_acks++;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
-               }
-
                /* Process the incoming packet */
                if (unlikely(!link_working_working(l_ptr))) {
                        if (msg_user(msg) == LINK_PROTOCOL) {
@@ -1458,54 +1455,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                if (unlikely(l_ptr->oldest_deferred_in))
                        head = link_insert_deferred_queue(l_ptr, head);
 
-               /* Deliver packet/message to correct user: */
-               if (unlikely(msg_user(msg) ==  CHANGEOVER_PROTOCOL)) {
-                       if (!tipc_link_tunnel_rcv(n_ptr, &buf)) {
-                               tipc_node_unlock(n_ptr);
-                               continue;
-                       }
-                       msg = buf_msg(buf);
-               } else if (msg_user(msg) == MSG_FRAGMENTER) {
-                       l_ptr->stats.recv_fragments++;
-                       if (tipc_buf_append(&l_ptr->reasm_buf, &buf)) {
-                               l_ptr->stats.recv_fragmented++;
-                               msg = buf_msg(buf);
-                       } else {
-                               if (!l_ptr->reasm_buf)
-                                       tipc_link_reset(l_ptr);
-                               tipc_node_unlock(n_ptr);
-                               continue;
-                       }
+               if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
+                       l_ptr->stats.sent_acks++;
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
                }
 
-               switch (msg_user(msg)) {
-               case TIPC_LOW_IMPORTANCE:
-               case TIPC_MEDIUM_IMPORTANCE:
-               case TIPC_HIGH_IMPORTANCE:
-               case TIPC_CRITICAL_IMPORTANCE:
-               case CONN_MANAGER:
-                       tipc_node_unlock(n_ptr);
-                       tipc_sk_rcv(buf);
-                       continue;
-               case MSG_BUNDLER:
-                       l_ptr->stats.recv_bundles++;
-                       l_ptr->stats.recv_bundled += msg_msgcnt(msg);
+               if (tipc_link_prepare_input(l_ptr, &buf)) {
                        tipc_node_unlock(n_ptr);
-                       tipc_link_bundle_rcv(buf);
                        continue;
-               case NAME_DISTRIBUTOR:
-                       n_ptr->bclink.recv_permitted = true;
-                       tipc_node_unlock(n_ptr);
-                       tipc_named_rcv(buf);
-                       continue;
-               case BCAST_PROTOCOL:
-                       tipc_link_sync_rcv(n_ptr, buf);
-                       break;
-               default:
-                       kfree_skb(buf);
-                       break;
                }
                tipc_node_unlock(n_ptr);
+               msg = buf_msg(buf);
+               if (tipc_link_input(l_ptr, buf) != 0)
+                       goto discard;
                continue;
 unlock_discard:
                tipc_node_unlock(n_ptr);
@@ -1514,6 +1476,80 @@ discard:
        }
 }
 
+/**
+ * tipc_link_prepare_input - process TIPC link messages
+ *
+ * returns nonzero if the message was consumed
+ *
+ * Node lock must be held
+ */
+static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
+{
+       struct tipc_node *n;
+       struct tipc_msg *msg;
+       int res = -EINVAL;
+
+       n = l->owner;
+       msg = buf_msg(*buf);
+       switch (msg_user(msg)) {
+       case CHANGEOVER_PROTOCOL:
+               if (tipc_link_tunnel_rcv(n, buf))
+                       res = 0;
+               break;
+       case MSG_FRAGMENTER:
+               l->stats.recv_fragments++;
+               if (tipc_buf_append(&l->reasm_buf, buf)) {
+                       l->stats.recv_fragmented++;
+                       res = 0;
+               } else if (!l->reasm_buf) {
+                       tipc_link_reset(l);
+               }
+               break;
+       case MSG_BUNDLER:
+               l->stats.recv_bundles++;
+               l->stats.recv_bundled += msg_msgcnt(msg);
+               res = 0;
+               break;
+       case NAME_DISTRIBUTOR:
+               n->bclink.recv_permitted = true;
+               res = 0;
+               break;
+       case BCAST_PROTOCOL:
+               tipc_link_sync_rcv(n, *buf);
+               break;
+       default:
+               res = 0;
+       }
+       return res;
+}
+/**
+ * tipc_link_input - Deliver message too higher layers
+ */
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       int res = 0;
+
+       switch (msg_user(msg)) {
+       case TIPC_LOW_IMPORTANCE:
+       case TIPC_MEDIUM_IMPORTANCE:
+       case TIPC_HIGH_IMPORTANCE:
+       case TIPC_CRITICAL_IMPORTANCE:
+       case CONN_MANAGER:
+               tipc_sk_rcv(buf);
+               break;
+       case NAME_DISTRIBUTOR:
+               tipc_named_rcv(buf);
+               break;
+       case MSG_BUNDLER:
+               tipc_link_bundle_rcv(buf);
+               break;
+       default:
+               res = -EINVAL;
+       }
+       return res;
+}
+
 /**
  * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
  *