Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net...
authorDavid S. Miller <davem@davemloft.net>
Fri, 2 Nov 2012 22:45:35 +0000 (18:45 -0400)
committerDavid S. Miller <davem@davemloft.net>
Fri, 2 Nov 2012 22:45:35 +0000 (18:45 -0400)
Jeff Kirsher says:

====================
This series contains updates to igb, ixgbe and e1000.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
87 files changed:
Documentation/devicetree/bindings/net/cpsw.txt
arch/alpha/include/asm/socket.h
arch/arm/boot/dts/imx6q.dtsi
arch/arm/mach-imx/mach-imx6q.c
arch/avr32/include/uapi/asm/socket.h
arch/cris/include/asm/socket.h
arch/frv/include/uapi/asm/socket.h
arch/h8300/include/asm/socket.h
arch/ia64/include/uapi/asm/socket.h
arch/m32r/include/asm/socket.h
arch/m68k/include/asm/socket.h
arch/mips/include/uapi/asm/socket.h
arch/mn10300/include/uapi/asm/socket.h
arch/parisc/include/asm/socket.h
arch/powerpc/include/uapi/asm/socket.h
arch/s390/include/uapi/asm/socket.h
arch/sparc/include/uapi/asm/socket.h
arch/xtensa/include/asm/socket.h
drivers/net/ethernet/adi/Kconfig
drivers/net/ethernet/adi/bfin_mac.c
drivers/net/ethernet/adi/bfin_mac.h
drivers/net/ethernet/cadence/at91_ether.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/Makefile
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_ptp.c [new file with mode: 0644]
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/igb/Makefile
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/Makefile
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/sfc/Kconfig
drivers/net/ethernet/sfc/Makefile
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/Makefile
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.h
drivers/net/ethernet/ti/cpts.c [new file with mode: 0644]
drivers/net/ethernet/ti/cpts.h [new file with mode: 0644]
drivers/net/ppp/ppp_generic.c
drivers/net/tun.c
drivers/net/veth.c
drivers/pps/Kconfig
drivers/ptp/Kconfig
drivers/ptp/ptp_chardev.c
include/linux/filter.h
include/linux/platform_data/cpsw.h
include/linux/timecompare.h [deleted file]
include/uapi/asm-generic/socket.h
include/uapi/linux/if_tun.h
include/uapi/linux/netconf.h
include/uapi/linux/ptp_clock.h
kernel/time/Makefile
kernel/time/timecompare.c [deleted file]
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/core/filter.c
net/core/sock.c
net/ipv4/devinet.c
net/ipv6/addrconf.c
net/ipv6/ah6.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/icmp.c
net/ipv6/ip6_output.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/netfilter/nf_nat_proto_icmpv6.c
net/ipv6/raw.c
net/ipv6/xfrm6_policy.c
net/ipv6/xfrm6_state.c

index dcaabe9fe869d7f029f413d6d97335549974fd05..221460714c56a56309867005254432edf046e4bb 100644 (file)
@@ -16,12 +16,16 @@ Required properties:
 - ale_entries          : Specifies No of entries ALE can hold
 - host_port_reg_ofs    : Specifies host port register offset
 - hw_stats_reg_ofs     : Specifies hardware statistics register offset
+- cpts_reg_ofs         : Specifies the offset of the CPTS registers
 - bd_ram_ofs           : Specifies internal desciptor RAM offset
 - bd_ram_size          : Specifies internal descriptor RAM size
 - rx_descs             : Specifies number of Rx descriptors
 - mac_control          : Specifies Default MAC control register content
                          for the specific platform
 - slaves               : Specifies number for slaves
+- cpts_active_slave    : Specifies the slave to use for time stamping
+- cpts_clock_mult      : Numerator to convert input clock ticks into nanoseconds
+- cpts_clock_shift     : Denominator to convert input clock ticks into nanoseconds
 - slave_reg_ofs                : Specifies slave register offset
 - sliver_reg_ofs       : Specifies slave sliver register offset
 - phy_id               : Specifies slave phy id
@@ -52,21 +56,25 @@ Examples:
                ale_entries = <1024>;
                host_port_reg_ofs = <0x108>;
                hw_stats_reg_ofs = <0x900>;
+               cpts_reg_ofs = <0xc00>;
                bd_ram_ofs = <0x2000>;
                bd_ram_size = <0x2000>;
                no_bd_ram = <0>;
                rx_descs = <64>;
                mac_control = <0x20>;
                slaves = <2>;
+               cpts_active_slave = <0>;
+               cpts_clock_mult = <0x80000000>;
+               cpts_clock_shift = <29>;
                cpsw_emac0: slave@0 {
-                       slave_reg_ofs = <0x208>;
+                       slave_reg_ofs = <0x200>;
                        sliver_reg_ofs = <0xd80>;
                        phy_id = "davinci_mdio.16:00";
                        /* Filled in by U-Boot */
                        mac-address = [ 00 00 00 00 00 00 ];
                };
                cpsw_emac1: slave@1 {
-                       slave_reg_ofs = <0x308>;
+                       slave_reg_ofs = <0x300>;
                        sliver_reg_ofs = <0xdc0>;
                        phy_id = "davinci_mdio.16:01";
                        /* Filled in by U-Boot */
@@ -86,21 +94,25 @@ Examples:
                ale_entries = <1024>;
                host_port_reg_ofs = <0x108>;
                hw_stats_reg_ofs = <0x900>;
+               cpts_reg_ofs = <0xc00>;
                bd_ram_ofs = <0x2000>;
                bd_ram_size = <0x2000>;
                no_bd_ram = <0>;
                rx_descs = <64>;
                mac_control = <0x20>;
                slaves = <2>;
+               cpts_active_slave = <0>;
+               cpts_clock_mult = <0x80000000>;
+               cpts_clock_shift = <29>;
                cpsw_emac0: slave@0 {
-                       slave_reg_ofs = <0x208>;
+                       slave_reg_ofs = <0x200>;
                        sliver_reg_ofs = <0xd80>;
                        phy_id = "davinci_mdio.16:00";
                        /* Filled in by U-Boot */
                        mac-address = [ 00 00 00 00 00 00 ];
                };
                cpsw_emac1: slave@1 {
-                       slave_reg_ofs = <0x308>;
+                       slave_reg_ofs = <0x300>;
                        sliver_reg_ofs = <0xdc0>;
                        phy_id = "davinci_mdio.16:01";
                        /* Filled in by U-Boot */
index 7d2f75be932e6d16e4ceba058fe0e0c769a8be32..0087d053b77f9ada256b4d58fec451552d48eb63 100644 (file)
@@ -47,6 +47,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
index f3990b04fecf4661216c9cf321dc817c72e1a0b7..3290e61be3e1c185e697ee406a22b5b2808db5b0 100644 (file)
                                                        66  0x1b0b0     /* MX6Q_PAD_RGMII_RD2__ENET_RGMII_RD2 */
                                                        70  0x1b0b0     /* MX6Q_PAD_RGMII_RD3__ENET_RGMII_RD3 */
                                                        48  0x1b0b0     /* MX6Q_PAD_RGMII_RX_CTL__RGMII_RX_CTL */
+                                                       1033 0x4001b0a8 /* MX6Q_PAD_GPIO_16__ENET_ANATOP_ETHERNET_REF_OUT*/
                                                >;
                                        };
 
                                compatible = "fsl,imx6q-fec";
                                reg = <0x02188000 0x4000>;
                                interrupts = <0 118 0x04 0 119 0x04>;
-                               clocks = <&clks 117>, <&clks 117>;
-                               clock-names = "ipg", "ahb";
+                               clocks = <&clks 117>, <&clks 117>, <&clks 177>;
+                               clock-names = "ipg", "ahb", "ptp";
                                status = "disabled";
                        };
 
index 47c91f7185d2fb7cdef048388e5cf82620e68a87..38d69100398d9d6bb5a174aaa7c25dec150b746c 100644 (file)
@@ -117,6 +117,17 @@ static void __init imx6q_sabrelite_init(void)
        imx6q_sabrelite_cko1_setup();
 }
 
+static void __init imx6q_1588_init(void)
+{
+       struct regmap *gpr;
+
+       gpr = syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+       if (!IS_ERR(gpr))
+               regmap_update_bits(gpr, 0x4, 1 << 21, 1 << 21);
+       else
+               pr_err("failed to find fsl,imx6q-iomux-gpr regmap\n");
+
+}
 static void __init imx6q_usb_init(void)
 {
        struct regmap *anatop;
@@ -153,6 +164,7 @@ static void __init imx6q_init_machine(void)
 
        imx6q_pm_init();
        imx6q_usb_init();
+       imx6q_1588_init();
 }
 
 static struct cpuidle_driver imx6q_cpuidle_driver = {
index a473f8c6a9aa2be66f04f375ef7b7820b7c0683a..486df68abeecc955757a352cf64e70ad9b39a020 100644 (file)
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
index ae52825021afd98237557656abf77dad4a75491f..b681b043f6c819fedc4b819c5ffa59a72cd7059f 100644 (file)
@@ -42,6 +42,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
index a5b1d7dbb205389eef5e2d5627f0aad3744778fe..871f89b7fbdaf08a0629679f01d804337804cbc7 100644 (file)
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME             28
 #define SO_TIMESTAMP           29
index ec4554e7b04b68ab4c41ad93e612469ac896708c..90a2e573c7e679ac92300734e3641f7d5d5b45e0 100644 (file)
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME             28
 #define SO_TIMESTAMP           29
index 41fc28a4a18a2305763db7be6e0d51210671030e..23d6759bb57b8dd6af9a824e78ce7335dede1d80 100644 (file)
@@ -49,6 +49,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER       26
 #define SO_DETACH_FILTER       27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
index a15f40b527834256eababa34dd23d076cf003e2d..5e7088a26726cc0133ae3a6f166f3597832646a6 100644 (file)
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
index d1be684edf97925331569d9af422a5f427e2a271..285da3b6ad92c440dabaec8dfbeea9b8ccf6613b 100644 (file)
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME             28
 #define SO_TIMESTAMP           29
index c5ed59549cb878c211146c8ac86c8eaff354c525..17307ab90474271e4c77f9d48bfb775f2049db74 100644 (file)
@@ -63,6 +63,7 @@ To add: #define SO_REUSEPORT 0x0200   /* Allow local address and port reuse.  */
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME             28
 #define SO_TIMESTAMP           29
index 820463a484b8dbc5b21ee627822297d4a33bf91f..af5366bbfe62727b09e2d47474e78a7483bb3a13 100644 (file)
@@ -40,6 +40,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
index 1b52c2c31a7a284c8e4f6b6d08cc53faa60da42f..d9ff4731253bb6e8f6402686e2401e251126ecb2 100644 (file)
@@ -48,6 +48,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        0x401a
 #define SO_DETACH_FILTER        0x401b
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_ACCEPTCONN          0x401c
 
index 3d5179bb122f120e24477c3be6f1459182b8bec5..eb0b1864d400b2a01386ed3a77fedb300a560227 100644 (file)
@@ -47,6 +47,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER       26
 #define SO_DETACH_FILTER       27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
index 69718cd6d63503e657a4878a1277ab260fa44bf7..436d07c23be8febc9ad245fb73e4f1671a004cd9 100644 (file)
@@ -46,6 +46,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
index bea1568ae4af1aeee10008fbf1ae110dc6fa47ae..c83a937ead00676eefef950b6c00d77cd644c8a9 100644 (file)
@@ -41,6 +41,7 @@
 
 #define SO_ATTACH_FILTER       0x001a
 #define SO_DETACH_FILTER        0x001b
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            0x001c
 #define SO_TIMESTAMP           0x001d
index e36c681849205cd728bbc59b5903c87f61a81a73..38079be1cf1ebc98fa6d9a61aebd59ff81822f78 100644 (file)
@@ -52,6 +52,7 @@
 
 #define SO_ATTACH_FILTER        26
 #define SO_DETACH_FILTER        27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
index 49a30d37ae4a93cf2c84f1e733093722f0799405..175c38c077b2bcc3fe199aa7cc283eb9d1f74df2 100644 (file)
@@ -61,7 +61,7 @@ config BFIN_RX_DESC_NUM
 
 config BFIN_MAC_USE_HWSTAMP
        bool "Use IEEE 1588 hwstamp"
-       depends on BFIN_MAC && BF518
+       depends on BFIN_MAC && BF518 && PTP_1588_CLOCK && !(BFIN_MAC=y && PTP_1588_CLOCK=m)
        default y
        ---help---
          To support the IEEE 1588 Precision Time Protocol (PTP), select y here
index f816426e1085812b6524c037992594e4a539c746..f1c458dc039a10df732dfdb0fe3f07ac35bb5041 100644 (file)
@@ -548,14 +548,17 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
        return 0;
 }
 
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
 static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
        struct ethtool_ts_info *info)
 {
+       struct bfin_mac_local *lp = netdev_priv(dev);
+
        info->so_timestamping =
                SOF_TIMESTAMPING_TX_HARDWARE |
                SOF_TIMESTAMPING_RX_HARDWARE |
-               SOF_TIMESTAMPING_SYS_HARDWARE;
-       info->phc_index = -1;
+               SOF_TIMESTAMPING_RAW_HARDWARE;
+       info->phc_index = lp->phc_index;
        info->tx_types =
                (1 << HWTSTAMP_TX_OFF) |
                (1 << HWTSTAMP_TX_ON);
@@ -566,6 +569,7 @@ static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
                (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
        return 0;
 }
+#endif
 
 static const struct ethtool_ops bfin_mac_ethtool_ops = {
        .get_settings = bfin_mac_ethtool_getsettings,
@@ -574,7 +578,9 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
        .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
        .get_wol = bfin_mac_ethtool_getwol,
        .set_wol = bfin_mac_ethtool_setwol,
+#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
        .get_ts_info = bfin_mac_ethtool_get_ts_info,
+#endif
 };
 
 /**************************************************************************/
@@ -649,6 +655,20 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
 #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
 #define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
 
+static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
+{
+       u32 ipn = 1000000000UL / input_clk;
+       u32 ppn = 1;
+       unsigned int shift = 0;
+
+       while (ppn <= ipn) {
+               ppn <<= 1;
+               shift++;
+       }
+       *shift_result = shift;
+       return 1000000000UL / ppn;
+}
+
 static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
                struct ifreq *ifr, int cmd)
 {
@@ -798,19 +818,7 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
                bfin_read_EMAC_PTP_TXSNAPLO();
                bfin_read_EMAC_PTP_TXSNAPHI();
 
-               /*
-                * Set registers so that rollover occurs soon to test this.
-                */
-               bfin_write_EMAC_PTP_TIMELO(0x00000000);
-               bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
-
                SSYNC();
-
-               lp->compare.last_update = 0;
-               timecounter_init(&lp->clock,
-                               &lp->cycles,
-                               ktime_to_ns(ktime_get_real()));
-               timecompare_update(&lp->compare, 0);
        }
 
        lp->stamp_cfg = config;
@@ -818,15 +826,6 @@ static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
                -EFAULT : 0;
 }
 
-static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
-{
-       ktime_t sys = ktime_get_real();
-
-       pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
-                       __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
-                       sys.tv.nsec, cmp->offset, cmp->skew);
-}
-
 static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
 {
        struct bfin_mac_local *lp = netdev_priv(netdev);
@@ -857,15 +856,9 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
                        regval = bfin_read_EMAC_PTP_TXSNAPLO();
                        regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
                        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-                       ns = timecounter_cyc2time(&lp->clock,
-                                       regval);
-                       timecompare_update(&lp->compare, ns);
+                       ns = regval << lp->shift;
                        shhwtstamps.hwtstamp = ns_to_ktime(ns);
-                       shhwtstamps.syststamp =
-                               timecompare_transform(&lp->compare, ns);
                        skb_tstamp_tx(skb, &shhwtstamps);
-
-                       bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
                }
        }
 }
@@ -888,55 +881,184 @@ static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
 
        regval = bfin_read_EMAC_PTP_RXSNAPLO();
        regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
-       ns = timecounter_cyc2time(&lp->clock, regval);
-       timecompare_update(&lp->compare, ns);
+       ns = regval << lp->shift;
        memset(shhwtstamps, 0, sizeof(*shhwtstamps));
        shhwtstamps->hwtstamp = ns_to_ktime(ns);
-       shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
+}
+
+static void bfin_mac_hwtstamp_init(struct net_device *netdev)
+{
+       struct bfin_mac_local *lp = netdev_priv(netdev);
+       u64 addend, ppb;
+       u32 input_clk, phc_clk;
+
+       /* Initialize hardware timer */
+       input_clk = get_sclk();
+       phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
+       addend = phc_clk * (1ULL << 32);
+       do_div(addend, input_clk);
+       bfin_write_EMAC_PTP_ADDEND((u32)addend);
+
+       lp->addend = addend;
+       ppb = 1000000000ULL * input_clk;
+       do_div(ppb, phc_clk);
+       lp->max_ppb = ppb - 1000000000ULL - 1ULL;
 
-       bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
+       /* Initialize hwstamp config */
+       lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+       lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
 }
 
-/*
- * bfin_read_clock - read raw cycle counter (to be used by time counter)
- */
-static cycle_t bfin_read_clock(const struct cyclecounter *tc)
+static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
 {
-       u64 stamp;
+       u64 ns;
+       u32 lo, hi;
+
+       lo = bfin_read_EMAC_PTP_TIMELO();
+       hi = bfin_read_EMAC_PTP_TIMEHI();
 
-       stamp =  bfin_read_EMAC_PTP_TIMELO();
-       stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
+       ns = ((u64) hi) << 32;
+       ns |= lo;
+       ns <<= lp->shift;
 
-       return stamp;
+       return ns;
 }
 
-#define PTP_CLK 25000000
+static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
+{
+       u32 hi, lo;
 
-static void bfin_mac_hwtstamp_init(struct net_device *netdev)
+       ns >>= lp->shift;
+       hi = ns >> 32;
+       lo = ns & 0xffffffff;
+
+       bfin_write_EMAC_PTP_TIMELO(lo);
+       bfin_write_EMAC_PTP_TIMEHI(hi);
+}
+
+/* PTP Hardware Clock operations */
+
+static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+       u64 adj;
+       u32 diff, addend;
+       int neg_adj = 0;
+       struct bfin_mac_local *lp =
+               container_of(ptp, struct bfin_mac_local, caps);
+
+       if (ppb < 0) {
+               neg_adj = 1;
+               ppb = -ppb;
+       }
+       addend = lp->addend;
+       adj = addend;
+       adj *= ppb;
+       diff = div_u64(adj, 1000000000ULL);
+
+       addend = neg_adj ? addend - diff : addend + diff;
+
+       bfin_write_EMAC_PTP_ADDEND(addend);
+
+       return 0;
+}
+
+static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       s64 now;
+       unsigned long flags;
+       struct bfin_mac_local *lp =
+               container_of(ptp, struct bfin_mac_local, caps);
+
+       spin_lock_irqsave(&lp->phc_lock, flags);
+
+       now = bfin_ptp_time_read(lp);
+       now += delta;
+       bfin_ptp_time_write(lp, now);
+
+       spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+       return 0;
+}
+
+static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+       u64 ns;
+       u32 remainder;
+       unsigned long flags;
+       struct bfin_mac_local *lp =
+               container_of(ptp, struct bfin_mac_local, caps);
+
+       spin_lock_irqsave(&lp->phc_lock, flags);
+
+       ns = bfin_ptp_time_read(lp);
+
+       spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+       ts->tv_nsec = remainder;
+       return 0;
+}
+
+static int bfin_ptp_settime(struct ptp_clock_info *ptp,
+                          const struct timespec *ts)
+{
+       u64 ns;
+       unsigned long flags;
+       struct bfin_mac_local *lp =
+               container_of(ptp, struct bfin_mac_local, caps);
+
+       ns = ts->tv_sec * 1000000000ULL;
+       ns += ts->tv_nsec;
+
+       spin_lock_irqsave(&lp->phc_lock, flags);
+
+       bfin_ptp_time_write(lp, ns);
+
+       spin_unlock_irqrestore(&lp->phc_lock, flags);
+
+       return 0;
+}
+
+static int bfin_ptp_enable(struct ptp_clock_info *ptp,
+                         struct ptp_clock_request *rq, int on)
+{
+       return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info bfin_ptp_caps = {
+       .owner          = THIS_MODULE,
+       .name           = "BF518 clock",
+       .max_adj        = 0,
+       .n_alarm        = 0,
+       .n_ext_ts       = 0,
+       .n_per_out      = 0,
+       .pps            = 0,
+       .adjfreq        = bfin_ptp_adjfreq,
+       .adjtime        = bfin_ptp_adjtime,
+       .gettime        = bfin_ptp_gettime,
+       .settime        = bfin_ptp_settime,
+       .enable         = bfin_ptp_enable,
+};
+
+static int bfin_phc_init(struct net_device *netdev, struct device *dev)
 {
        struct bfin_mac_local *lp = netdev_priv(netdev);
-       u64 append;
 
-       /* Initialize hardware timer */
-       append = PTP_CLK * (1ULL << 32);
-       do_div(append, get_sclk());
-       bfin_write_EMAC_PTP_ADDEND((u32)append);
-
-       memset(&lp->cycles, 0, sizeof(lp->cycles));
-       lp->cycles.read = bfin_read_clock;
-       lp->cycles.mask = CLOCKSOURCE_MASK(64);
-       lp->cycles.mult = 1000000000 / PTP_CLK;
-       lp->cycles.shift = 0;
-
-       /* Synchronize our NIC clock against system wall clock */
-       memset(&lp->compare, 0, sizeof(lp->compare));
-       lp->compare.source = &lp->clock;
-       lp->compare.target = ktime_get_real;
-       lp->compare.num_samples = 10;
+       lp->caps = bfin_ptp_caps;
+       lp->caps.max_adj = lp->max_ppb;
+       lp->clock = ptp_clock_register(&lp->caps, dev);
+       if (IS_ERR(lp->clock))
+               return PTR_ERR(lp->clock);
 
-       /* Initialize hwstamp config */
-       lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
-       lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+       lp->phc_index = ptp_clock_index(lp->clock);
+       spin_lock_init(&lp->phc_lock);
+
+       return 0;
+}
+
+static void bfin_phc_release(struct bfin_mac_local *lp)
+{
+       ptp_clock_unregister(lp->clock);
 }
 
 #else
@@ -945,6 +1067,8 @@ static void bfin_mac_hwtstamp_init(struct net_device *netdev)
 # define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
 # define bfin_rx_hwtstamp(dev, skb)
 # define bfin_tx_hwtstamp(dev, skb)
+# define bfin_phc_init(netdev, dev) 0
+# define bfin_phc_release(lp)
 #endif
 
 static inline void _tx_reclaim_skb(void)
@@ -1579,12 +1703,17 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
        }
 
        bfin_mac_hwtstamp_init(ndev);
+       if (bfin_phc_init(ndev, &pdev->dev)) {
+               dev_err(&pdev->dev, "Cannot register PHC device!\n");
+               goto out_err_phc;
+       }
 
        /* now, print out the card info, in a short format.. */
        netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
 
        return 0;
 
+out_err_phc:
 out_err_reg_ndev:
        free_irq(IRQ_MAC_RX, ndev);
 out_err_request_irq:
@@ -1603,6 +1732,8 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct bfin_mac_local *lp = netdev_priv(ndev);
 
+       bfin_phc_release(lp);
+
        platform_set_drvdata(pdev, NULL);
 
        lp->mii_bus->priv = NULL;
index 960905c0822389cee89101671fea646ce5d140a2..7a07ee07906b280f2d849ae0959c604ee58d411b 100644 (file)
@@ -11,8 +11,7 @@
 #define _BFIN_MAC_H_
 
 #include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
-#include <linux/timecompare.h>
+#include <linux/ptp_clock_kernel.h>
 #include <linux/timer.h>
 #include <linux/etherdevice.h>
 #include <linux/bfin_mac.h>
@@ -94,10 +93,14 @@ struct bfin_mac_local {
        struct mii_bus *mii_bus;
 
 #if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
-       struct cyclecounter cycles;
-       struct timecounter clock;
-       struct timecompare compare;
+       u32 addend;
+       unsigned int shift;
+       s32 max_ppb;
        struct hwtstamp_config stamp_cfg;
+       struct ptp_clock_info caps;
+       struct ptp_clock *clock;
+       int phc_index;
+       spinlock_t phc_lock; /* protects time lo/hi registers */
 #endif
 };
 
index b92815aabc6594b8ef878820e356f2fb4d5f6c23..0d6392d24ff716b9c011648ba5d19bc8ac635512 100644 (file)
@@ -156,7 +156,7 @@ static int at91ether_start(struct net_device *dev)
        int i;
 
        lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
-                                       MAX_RX_DESCR * sizeof(struct dma_desc),
+                                       MAX_RX_DESCR * sizeof(struct macb_dma_desc),
                                        &lp->rx_ring_dma, GFP_KERNEL);
        if (!lp->rx_ring) {
                netdev_err(lp->dev, "unable to alloc rx ring DMA buffer\n");
@@ -170,7 +170,7 @@ static int at91ether_start(struct net_device *dev)
                netdev_err(lp->dev, "unable to alloc rx data DMA buffer\n");
 
                dma_free_coherent(&lp->pdev->dev,
-                                       MAX_RX_DESCR * sizeof(struct dma_desc),
+                                       MAX_RX_DESCR * sizeof(struct macb_dma_desc),
                                        lp->rx_ring, lp->rx_ring_dma);
                lp->rx_ring = NULL;
                return -ENOMEM;
@@ -256,7 +256,7 @@ static int at91ether_close(struct net_device *dev)
        netif_stop_queue(dev);
 
        dma_free_coherent(&lp->pdev->dev,
-                               MAX_RX_DESCR * sizeof(struct dma_desc),
+                               MAX_RX_DESCR * sizeof(struct macb_dma_desc),
                                lp->rx_ring, lp->rx_ring_dma);
        lp->rx_ring = NULL;
 
index 6a4f4998cfe54601d952fd196746786c8a968998..c3748753512578577d155dd866ca2f6cc08e822f 100644 (file)
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
 
 #include "macb.h"
 
 #define RX_BUFFER_SIZE         128
-#define RX_RING_SIZE           512
-#define RX_RING_BYTES          (sizeof(struct dma_desc) * RX_RING_SIZE)
+#define RX_RING_SIZE           512 /* must be power of 2 */
+#define RX_RING_BYTES          (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
 
-/* Make the IP header word-aligned (the ethernet header is 14 bytes) */
-#define RX_OFFSET              2
-
-#define TX_RING_SIZE           128
-#define DEF_TX_RING_PENDING    (TX_RING_SIZE - 1)
-#define TX_RING_BYTES          (sizeof(struct dma_desc) * TX_RING_SIZE)
-
-#define TX_RING_GAP(bp)                                                \
-       (TX_RING_SIZE - (bp)->tx_pending)
-#define TX_BUFFS_AVAIL(bp)                                     \
-       (((bp)->tx_tail <= (bp)->tx_head) ?                     \
-        (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head :     \
-        (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp))
-#define NEXT_TX(n)             (((n) + 1) & (TX_RING_SIZE - 1))
-
-#define NEXT_RX(n)             (((n) + 1) & (RX_RING_SIZE - 1))
+#define TX_RING_SIZE           128 /* must be power of 2 */
+#define TX_RING_BYTES          (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
 
 /* minimum number of free TX descriptors before waking up TX process */
 #define MACB_TX_WAKEUP_THRESH  (TX_RING_SIZE / 4)
 
 #define MACB_RX_INT_FLAGS      (MACB_BIT(RCOMP) | MACB_BIT(RXUBR)      \
                                 | MACB_BIT(ISR_ROVR))
+#define MACB_TX_ERR_FLAGS      (MACB_BIT(ISR_TUND)                     \
+                                       | MACB_BIT(ISR_RLE)             \
+                                       | MACB_BIT(TXERR))
+#define MACB_TX_INT_FLAGS      (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+
+/*
+ * Graceful stop timeouts in us. We should allow up to
+ * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
+ */
+#define MACB_HALT_TIMEOUT      1230
+
+/* Ring buffer accessors */
+static unsigned int macb_tx_ring_wrap(unsigned int index)
+{
+       return index & (TX_RING_SIZE - 1);
+}
+
+static unsigned int macb_tx_ring_avail(struct macb *bp)
+{
+       return (bp->tx_tail - bp->tx_head) & (TX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
+{
+       return &bp->tx_ring[macb_tx_ring_wrap(index)];
+}
+
+static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
+{
+       return &bp->tx_skb[macb_tx_ring_wrap(index)];
+}
+
+static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
+{
+       dma_addr_t offset;
+
+       offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
+
+       return bp->tx_ring_dma + offset;
+}
+
+static unsigned int macb_rx_ring_wrap(unsigned int index)
+{
+       return index & (RX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+{
+       return &bp->rx_ring[macb_rx_ring_wrap(index)];
+}
+
+static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+{
+       return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
+}
 
 static void __macb_set_hwaddr(struct macb *bp)
 {
@@ -152,13 +194,17 @@ static void macb_handle_link_change(struct net_device *dev)
 
                        reg = macb_readl(bp, NCFGR);
                        reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+                       if (macb_is_gem(bp))
+                               reg &= ~GEM_BIT(GBE);
 
                        if (phydev->duplex)
                                reg |= MACB_BIT(FD);
                        if (phydev->speed == SPEED_100)
                                reg |= MACB_BIT(SPD);
+                       if (phydev->speed == SPEED_1000)
+                               reg |= GEM_BIT(GBE);
 
-                       macb_writel(bp, NCFGR, reg);
+                       macb_or_gem_writel(bp, NCFGR, reg);
 
                        bp->speed = phydev->speed;
                        bp->duplex = phydev->duplex;
@@ -216,7 +262,10 @@ static int macb_mii_probe(struct net_device *dev)
        }
 
        /* mask with MAC supported features */
-       phydev->supported &= PHY_BASIC_FEATURES;
+       if (macb_is_gem(bp))
+               phydev->supported &= PHY_GBIT_FEATURES;
+       else
+               phydev->supported &= PHY_BASIC_FEATURES;
 
        phydev->advertising = phydev->supported;
 
@@ -298,93 +347,147 @@ static void macb_update_stats(struct macb *bp)
                *p += __raw_readl(reg);
 }
 
-static void macb_tx(struct macb *bp)
+static int macb_halt_tx(struct macb *bp)
 {
-       unsigned int tail;
-       unsigned int head;
-       u32 status;
+       unsigned long   halt_time, timeout;
+       u32             status;
 
-       status = macb_readl(bp, TSR);
-       macb_writel(bp, TSR, status);
+       macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
 
-       netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status);
+       timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
+       do {
+               halt_time = jiffies;
+               status = macb_readl(bp, TSR);
+               if (!(status & MACB_BIT(TGO)))
+                       return 0;
 
-       if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
-               int i;
-               netdev_err(bp->dev, "TX %s, resetting buffers\n",
-                          status & MACB_BIT(UND) ?
-                          "underrun" : "retry limit exceeded");
+               usleep_range(10, 250);
+       } while (time_before(halt_time, timeout));
 
-               /* Transfer ongoing, disable transmitter, to avoid confusion */
-               if (status & MACB_BIT(TGO))
-                       macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE));
+       return -ETIMEDOUT;
+}
 
-               head = bp->tx_head;
+static void macb_tx_error_task(struct work_struct *work)
+{
+       struct macb     *bp = container_of(work, struct macb, tx_error_task);
+       struct macb_tx_skb      *tx_skb;
+       struct sk_buff          *skb;
+       unsigned int            tail;
 
-               /*Mark all the buffer as used to avoid sending a lost buffer*/
-               for (i = 0; i < TX_RING_SIZE; i++)
-                       bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+       netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
+                   bp->tx_tail, bp->tx_head);
 
-               /* Add wrap bit */
-               bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+       /* Make sure nobody is trying to queue up new packets */
+       netif_stop_queue(bp->dev);
 
-               /* free transmit buffer in upper layer*/
-               for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
-                       struct ring_info *rp = &bp->tx_skb[tail];
-                       struct sk_buff *skb = rp->skb;
+       /*
+        * Stop transmission now
+        * (in case we have just queued new packets)
+        */
+       if (macb_halt_tx(bp))
+               /* Just complain for now, reinitializing TX path can be good */
+               netdev_err(bp->dev, "BUG: halt tx timed out\n");
 
-                       BUG_ON(skb == NULL);
+       /* No need for the lock here as nobody will interrupt us anymore */
 
-                       rmb();
+       /*
+        * Treat frames in TX queue including the ones that caused the error.
+        * Free transmit buffers in upper layer.
+        */
+       for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
+               struct macb_dma_desc    *desc;
+               u32                     ctrl;
+
+               desc = macb_tx_desc(bp, tail);
+               ctrl = desc->ctrl;
+               tx_skb = macb_tx_skb(bp, tail);
+               skb = tx_skb->skb;
+
+               if (ctrl & MACB_BIT(TX_USED)) {
+                       netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
+                                   macb_tx_ring_wrap(tail), skb->data);
+                       bp->stats.tx_packets++;
+                       bp->stats.tx_bytes += skb->len;
+               } else {
+                       /*
+                        * "Buffers exhausted mid-frame" errors may only happen
+                        * if the driver is buggy, so complain loudly about those.
+                        * Statistics are updated by hardware.
+                        */
+                       if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
+                               netdev_err(bp->dev,
+                                          "BUG: TX buffers exhausted mid-frame\n");
 
-                       dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
-                                                        DMA_TO_DEVICE);
-                       rp->skb = NULL;
-                       dev_kfree_skb_irq(skb);
+                       desc->ctrl = ctrl | MACB_BIT(TX_USED);
                }
 
-               bp->tx_head = bp->tx_tail = 0;
-
-               /* Enable the transmitter again */
-               if (status & MACB_BIT(TGO))
-                       macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
+               dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+                                DMA_TO_DEVICE);
+               tx_skb->skb = NULL;
+               dev_kfree_skb(skb);
        }
 
-       if (!(status & MACB_BIT(COMP)))
-               /*
-                * This may happen when a buffer becomes complete
-                * between reading the ISR and scanning the
-                * descriptors.  Nothing to worry about.
-                */
-               return;
+       /* Make descriptor updates visible to hardware */
+       wmb();
+
+       /* Reinitialize the TX desc queue */
+       macb_writel(bp, TBQP, bp->tx_ring_dma);
+       /* Make TX ring reflect state of hardware */
+       bp->tx_head = bp->tx_tail = 0;
+
+       /* Now we are ready to start transmission again */
+       netif_wake_queue(bp->dev);
+
+       /* Housework before enabling TX IRQ */
+       macb_writel(bp, TSR, macb_readl(bp, TSR));
+       macb_writel(bp, IER, MACB_TX_INT_FLAGS);
+}
+
+static void macb_tx_interrupt(struct macb *bp)
+{
+       unsigned int tail;
+       unsigned int head;
+       u32 status;
+
+       status = macb_readl(bp, TSR);
+       macb_writel(bp, TSR, status);
+
+       netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
+               (unsigned long)status);
 
        head = bp->tx_head;
-       for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) {
-               struct ring_info *rp = &bp->tx_skb[tail];
-               struct sk_buff *skb = rp->skb;
-               u32 bufstat;
+       for (tail = bp->tx_tail; tail != head; tail++) {
+               struct macb_tx_skb      *tx_skb;
+               struct sk_buff          *skb;
+               struct macb_dma_desc    *desc;
+               u32                     ctrl;
 
-               BUG_ON(skb == NULL);
+               desc = macb_tx_desc(bp, tail);
 
+               /* Make hw descriptor updates visible to CPU */
                rmb();
-               bufstat = bp->tx_ring[tail].ctrl;
 
-               if (!(bufstat & MACB_BIT(TX_USED)))
+               ctrl = desc->ctrl;
+
+               if (!(ctrl & MACB_BIT(TX_USED)))
                        break;
 
-               netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n",
-                          tail, skb->data);
-               dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
+               tx_skb = macb_tx_skb(bp, tail);
+               skb = tx_skb->skb;
+
+               netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
+                       macb_tx_ring_wrap(tail), skb->data);
+               dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
                                 DMA_TO_DEVICE);
                bp->stats.tx_packets++;
                bp->stats.tx_bytes += skb->len;
-               rp->skb = NULL;
+               tx_skb->skb = NULL;
                dev_kfree_skb_irq(skb);
        }
 
        bp->tx_tail = tail;
-       if (netif_queue_stopped(bp->dev) &&
-           TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH)
+       if (netif_queue_stopped(bp->dev)
+                       && macb_tx_ring_avail(bp) > MACB_TX_WAKEUP_THRESH)
                netif_wake_queue(bp->dev);
 }
 
@@ -393,31 +496,48 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
 {
        unsigned int len;
        unsigned int frag;
-       unsigned int offset = 0;
+       unsigned int offset;
        struct sk_buff *skb;
+       struct macb_dma_desc *desc;
 
-       len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
+       desc = macb_rx_desc(bp, last_frag);
+       len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
 
-       netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
-                  first_frag, last_frag, len);
+       netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+               macb_rx_ring_wrap(first_frag),
+               macb_rx_ring_wrap(last_frag), len);
 
-       skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET);
+       /*
+        * The ethernet header starts NET_IP_ALIGN bytes into the
+        * first buffer. Since the header is 14 bytes, this makes the
+        * payload word-aligned.
+        *
+        * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
+        * the two padding bytes into the skb so that we avoid hitting
+        * the slowpath in memcpy(), and pull them off afterwards.
+        */
+       skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
        if (!skb) {
                bp->stats.rx_dropped++;
-               for (frag = first_frag; ; frag = NEXT_RX(frag)) {
-                       bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+               for (frag = first_frag; ; frag++) {
+                       desc = macb_rx_desc(bp, frag);
+                       desc->addr &= ~MACB_BIT(RX_USED);
                        if (frag == last_frag)
                                break;
                }
+
+               /* Make descriptor updates visible to hardware */
                wmb();
+
                return 1;
        }
 
-       skb_reserve(skb, RX_OFFSET);
+       offset = 0;
+       len += NET_IP_ALIGN;
        skb_checksum_none_assert(skb);
        skb_put(skb, len);
 
-       for (frag = first_frag; ; frag = NEXT_RX(frag)) {
+       for (frag = first_frag; ; frag++) {
                unsigned int frag_len = RX_BUFFER_SIZE;
 
                if (offset + frag_len > len) {
@@ -425,22 +545,24 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
                        frag_len = len - offset;
                }
                skb_copy_to_linear_data_offset(skb, offset,
-                                              (bp->rx_buffers +
-                                               (RX_BUFFER_SIZE * frag)),
-                                              frag_len);
+                               macb_rx_buffer(bp, frag), frag_len);
                offset += RX_BUFFER_SIZE;
-               bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
-               wmb();
+               desc = macb_rx_desc(bp, frag);
+               desc->addr &= ~MACB_BIT(RX_USED);
 
                if (frag == last_frag)
                        break;
        }
 
+       /* Make descriptor updates visible to hardware */
+       wmb();
+
+       __skb_pull(skb, NET_IP_ALIGN);
        skb->protocol = eth_type_trans(skb, bp->dev);
 
        bp->stats.rx_packets++;
-       bp->stats.rx_bytes += len;
-       netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n",
+       bp->stats.rx_bytes += skb->len;
+       netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
                   skb->len, skb->csum);
        netif_receive_skb(skb);
 
@@ -453,8 +575,12 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
 {
        unsigned int frag;
 
-       for (frag = begin; frag != end; frag = NEXT_RX(frag))
-               bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED);
+       for (frag = begin; frag != end; frag++) {
+               struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+               desc->addr &= ~MACB_BIT(RX_USED);
+       }
+
+       /* Make descriptor updates visible to hardware */
        wmb();
 
        /*
@@ -467,15 +593,18 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
 static int macb_rx(struct macb *bp, int budget)
 {
        int received = 0;
-       unsigned int tail = bp->rx_tail;
+       unsigned int tail;
        int first_frag = -1;
 
-       for (; budget > 0; tail = NEXT_RX(tail)) {
+       for (tail = bp->rx_tail; budget > 0; tail++) {
+               struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
                u32 addr, ctrl;
 
+               /* Make hw descriptor updates visible to CPU */
                rmb();
-               addr = bp->rx_ring[tail].addr;
-               ctrl = bp->rx_ring[tail].ctrl;
+
+               addr = desc->addr;
+               ctrl = desc->ctrl;
 
                if (!(addr & MACB_BIT(RX_USED)))
                        break;
@@ -518,7 +647,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
 
        work_done = 0;
 
-       netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n",
+       netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
                   (unsigned long)status, budget);
 
        work_done = macb_rx(bp, budget);
@@ -557,6 +686,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                        break;
                }
 
+               netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
+
                if (status & MACB_RX_INT_FLAGS) {
                        /*
                         * There's no point taking any more interrupts
@@ -568,14 +699,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                        macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
 
                        if (napi_schedule_prep(&bp->napi)) {
-                               netdev_dbg(bp->dev, "scheduling RX softirq\n");
+                               netdev_vdbg(bp->dev, "scheduling RX softirq\n");
                                __napi_schedule(&bp->napi);
                        }
                }
 
-               if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) |
-                           MACB_BIT(ISR_RLE)))
-                       macb_tx(bp);
+               if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
+                       macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
+                       schedule_work(&bp->tx_error_task);
+                       break;
+               }
+
+               if (status & MACB_BIT(TCOMP))
+                       macb_tx_interrupt(bp);
 
                /*
                 * Link change detection isn't possible with RMII, so we'll
@@ -627,11 +763,13 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct macb *bp = netdev_priv(dev);
        dma_addr_t mapping;
        unsigned int len, entry;
+       struct macb_dma_desc *desc;
+       struct macb_tx_skb *tx_skb;
        u32 ctrl;
        unsigned long flags;
 
-#ifdef DEBUG
-       netdev_dbg(bp->dev,
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+       netdev_vdbg(bp->dev,
                   "start_xmit: len %u head %p data %p tail %p end %p\n",
                   skb->len, skb->head, skb->data,
                   skb_tail_pointer(skb), skb_end_pointer(skb));
@@ -643,7 +781,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        spin_lock_irqsave(&bp->lock, flags);
 
        /* This is a hard error, log it. */
-       if (TX_BUFFS_AVAIL(bp) < 1) {
+       if (macb_tx_ring_avail(bp) < 1) {
                netif_stop_queue(dev);
                spin_unlock_irqrestore(&bp->lock, flags);
                netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
@@ -652,13 +790,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_BUSY;
        }
 
-       entry = bp->tx_head;
-       netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry);
+       entry = macb_tx_ring_wrap(bp->tx_head);
+       bp->tx_head++;
+       netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
        mapping = dma_map_single(&bp->pdev->dev, skb->data,
                                 len, DMA_TO_DEVICE);
-       bp->tx_skb[entry].skb = skb;
-       bp->tx_skb[entry].mapping = mapping;
-       netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+
+       tx_skb = &bp->tx_skb[entry];
+       tx_skb->skb = skb;
+       tx_skb->mapping = mapping;
+       netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
                   skb->data, (unsigned long)mapping);
 
        ctrl = MACB_BF(TX_FRMLEN, len);
@@ -666,18 +807,18 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (entry == (TX_RING_SIZE - 1))
                ctrl |= MACB_BIT(TX_WRAP);
 
-       bp->tx_ring[entry].addr = mapping;
-       bp->tx_ring[entry].ctrl = ctrl;
-       wmb();
+       desc = &bp->tx_ring[entry];
+       desc->addr = mapping;
+       desc->ctrl = ctrl;
 
-       entry = NEXT_TX(entry);
-       bp->tx_head = entry;
+       /* Make newly initialized descriptor visible to hardware */
+       wmb();
 
        skb_tx_timestamp(skb);
 
        macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
 
-       if (TX_BUFFS_AVAIL(bp) < 1)
+       if (macb_tx_ring_avail(bp) < 1)
                netif_stop_queue(dev);
 
        spin_unlock_irqrestore(&bp->lock, flags);
@@ -713,7 +854,7 @@ static int macb_alloc_consistent(struct macb *bp)
 {
        int size;
 
-       size = TX_RING_SIZE * sizeof(struct ring_info);
+       size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
        bp->tx_skb = kmalloc(size, GFP_KERNEL);
        if (!bp->tx_skb)
                goto out_err;
@@ -776,9 +917,6 @@ static void macb_init_rings(struct macb *bp)
 
 static void macb_reset_hw(struct macb *bp)
 {
-       /* Make sure we have the write buffer for ourselves */
-       wmb();
-
        /*
         * Disable RX and TX (XXX: Should we halt the transmission
         * more gracefully?)
@@ -883,6 +1021,7 @@ static void macb_init_hw(struct macb *bp)
        __macb_set_hwaddr(bp);
 
        config = macb_mdc_clk_div(bp);
+       config |= MACB_BF(RBOF, NET_IP_ALIGN);  /* Make eth data aligned */
        config |= MACB_BIT(PAE);                /* PAuse Enable */
        config |= MACB_BIT(DRFCS);              /* Discard Rx FCS */
        config |= MACB_BIT(BIG);                /* Receive oversized frames */
@@ -903,13 +1042,8 @@ static void macb_init_hw(struct macb *bp)
        macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
 
        /* Enable interrupts */
-       macb_writel(bp, IER, (MACB_BIT(RCOMP)
-                             | MACB_BIT(RXUBR)
-                             | MACB_BIT(ISR_TUND)
-                             | MACB_BIT(ISR_RLE)
-                             | MACB_BIT(TXERR)
-                             | MACB_BIT(TCOMP)
-                             | MACB_BIT(ISR_ROVR)
+       macb_writel(bp, IER, (MACB_RX_INT_FLAGS
+                             | MACB_TX_INT_FLAGS
                              | MACB_BIT(HRESP)));
 
 }
@@ -1206,20 +1340,49 @@ static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        return phy_ethtool_sset(phydev, cmd);
 }
 
-static void macb_get_drvinfo(struct net_device *dev,
-                            struct ethtool_drvinfo *info)
+static int macb_get_regs_len(struct net_device *netdev)
+{
+       return MACB_GREGS_NBR * sizeof(u32);
+}
+
+static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+                         void *p)
 {
        struct macb *bp = netdev_priv(dev);
+       unsigned int tail, head;
+       u32 *regs_buff = p;
+
+       regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
+                       | MACB_GREGS_VERSION;
+
+       tail = macb_tx_ring_wrap(bp->tx_tail);
+       head = macb_tx_ring_wrap(bp->tx_head);
+
+       regs_buff[0]  = macb_readl(bp, NCR);
+       regs_buff[1]  = macb_or_gem_readl(bp, NCFGR);
+       regs_buff[2]  = macb_readl(bp, NSR);
+       regs_buff[3]  = macb_readl(bp, TSR);
+       regs_buff[4]  = macb_readl(bp, RBQP);
+       regs_buff[5]  = macb_readl(bp, TBQP);
+       regs_buff[6]  = macb_readl(bp, RSR);
+       regs_buff[7]  = macb_readl(bp, IMR);
+
+       regs_buff[8]  = tail;
+       regs_buff[9]  = head;
+       regs_buff[10] = macb_tx_dma(bp, tail);
+       regs_buff[11] = macb_tx_dma(bp, head);
 
-       strcpy(info->driver, bp->pdev->dev.driver->name);
-       strcpy(info->version, "$Revision: 1.14 $");
-       strcpy(info->bus_info, dev_name(&bp->pdev->dev));
+       if (macb_is_gem(bp)) {
+               regs_buff[12] = gem_readl(bp, USRIO);
+               regs_buff[13] = gem_readl(bp, DMACFG);
+       }
 }
 
 const struct ethtool_ops macb_ethtool_ops = {
        .get_settings           = macb_get_settings,
        .set_settings           = macb_set_settings,
-       .get_drvinfo            = macb_get_drvinfo,
+       .get_regs_len           = macb_get_regs_len,
+       .get_regs               = macb_get_regs,
        .get_link               = ethtool_op_get_link,
        .get_ts_info            = ethtool_op_get_ts_info,
 };
@@ -1310,6 +1473,7 @@ static int __init macb_probe(struct platform_device *pdev)
        struct phy_device *phydev;
        u32 config;
        int err = -ENXIO;
+       struct pinctrl *pinctrl;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!regs) {
@@ -1317,6 +1481,15 @@ static int __init macb_probe(struct platform_device *pdev)
                goto err_out;
        }
 
+       pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+       if (IS_ERR(pinctrl)) {
+               err = PTR_ERR(pinctrl);
+               if (err == -EPROBE_DEFER)
+                       goto err_out;
+
+               dev_warn(&pdev->dev, "No pinctrl provided\n");
+       }
+
        err = -ENOMEM;
        dev = alloc_etherdev(sizeof(*bp));
        if (!dev)
@@ -1332,6 +1505,7 @@ static int __init macb_probe(struct platform_device *pdev)
        bp->dev = dev;
 
        spin_lock_init(&bp->lock);
+       INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
 
        bp->pclk = clk_get(&pdev->dev, "pclk");
        if (IS_ERR(bp->pclk)) {
@@ -1388,7 +1562,9 @@ static int __init macb_probe(struct platform_device *pdev)
                bp->phy_interface = err;
        }
 
-       if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+       if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+               macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
+       else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
 #if defined(CONFIG_ARCH_AT91)
                macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
                                               MACB_BIT(CLKEN)));
@@ -1402,8 +1578,6 @@ static int __init macb_probe(struct platform_device *pdev)
                macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
 #endif
 
-       bp->tx_pending = DEF_TX_RING_PENDING;
-
        err = register_netdev(dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
index a3627517ec9533005e10bc57ef7e41cae49d18d1..4235ab871ab4e80ccc19a6ef4301b395ff4db236 100644 (file)
@@ -10,6 +10,9 @@
 #ifndef _MACB_H
 #define _MACB_H
 
+#define MACB_GREGS_NBR 16
+#define MACB_GREGS_VERSION 1
+
 /* MACB register offsets */
 #define MACB_NCR                               0x0000
 #define MACB_NCFGR                             0x0004
 #define MACB_IRXFCS_SIZE                       1
 
 /* GEM specific NCFGR bitfields. */
+#define GEM_GBE_OFFSET                         10
+#define GEM_GBE_SIZE                           1
 #define GEM_CLK_OFFSET                         18
 #define GEM_CLK_SIZE                           3
 #define GEM_DBW_OFFSET                         21
 /* Bitfields in USRIO (AT91) */
 #define MACB_RMII_OFFSET                       0
 #define MACB_RMII_SIZE                         1
+#define GEM_RGMII_OFFSET                       0       /* GEM gigabit mode */
+#define GEM_RGMII_SIZE                         1
 #define MACB_CLKEN_OFFSET                      1
 #define MACB_CLKEN_SIZE                                1
 
                __v; \
        })
 
-struct dma_desc {
+/**
+ * struct macb_dma_desc - Hardware DMA descriptor
+ * @addr: DMA address of data buffer
+ * @ctrl: Control and status bits
+ */
+struct macb_dma_desc {
        u32     addr;
        u32     ctrl;
 };
@@ -423,7 +435,12 @@ struct dma_desc {
 #define MACB_TX_USED_OFFSET                    31
 #define MACB_TX_USED_SIZE                      1
 
-struct ring_info {
+/**
+ * struct macb_tx_skb - data about an skb which is being transmitted
+ * @skb: skb currently being transmitted
+ * @mapping: DMA address of the skb's data buffer
+ */
+struct macb_tx_skb {
        struct sk_buff          *skb;
        dma_addr_t              mapping;
 };
@@ -508,12 +525,12 @@ struct macb {
        void __iomem            *regs;
 
        unsigned int            rx_tail;
-       struct dma_desc         *rx_ring;
+       struct macb_dma_desc    *rx_ring;
        void                    *rx_buffers;
 
        unsigned int            tx_head, tx_tail;
-       struct dma_desc         *tx_ring;
-       struct ring_info        *tx_skb;
+       struct macb_dma_desc    *tx_ring;
+       struct macb_tx_skb      *tx_skb;
 
        spinlock_t              lock;
        struct platform_device  *pdev;
@@ -521,6 +538,7 @@ struct macb {
        struct clk              *hclk;
        struct net_device       *dev;
        struct napi_struct      napi;
+       struct work_struct      tx_error_task;
        struct net_device_stats stats;
        union {
                struct macb_stats       macb;
@@ -531,8 +549,6 @@ struct macb {
        dma_addr_t              tx_ring_dma;
        dma_addr_t              rx_buffers_dma;
 
-       unsigned int            rx_pending, tx_pending;
-
        struct mii_bus          *mii_bus;
        struct phy_device       *phy_dev;
        unsigned int            link;
index feff51664dcf76974cc62a58be978bbbe83f0cf7..ff3be53d016971dac5704a3012259871d43275ea 100644 (file)
@@ -92,4 +92,13 @@ config GIANFAR
          This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx,
          and MPC86xx family of chips, and the FEC on the 8540.
 
+config FEC_PTP
+       bool "PTP Hardware Clock (PHC)"
+       depends on FEC
+       select PPS
+       select PTP_1588_CLOCK
+       --help---
+         Say Y here if you want to use PTP Hardware Clock (PHC) in the
+         driver.  Only the basic clock operations have been implemented.
+
 endif # NET_VENDOR_FREESCALE
index 3d1839afff6574ac96a9f4f2c7955739a8574567..d4d19b3d00aed69b8d905307f16b53e717926dcc 100644 (file)
@@ -3,6 +3,7 @@
 #
 
 obj-$(CONFIG_FEC) += fec.o
+obj-$(CONFIG_FEC_PTP) += fec_ptp.o
 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
 ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
        obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
index fffd20528b5da8e0864eeddcccbf4f034a9ba9d2..2665162ff4e59baf7d57d52db4ec677a67f37af3 100644 (file)
@@ -140,21 +140,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 #endif
 #endif /* CONFIG_M5272 */
 
-/* The number of Tx and Rx buffers.  These are allocated from the page
- * pool.  The code may assume these are power of two, so it it best
- * to keep them that size.
- * We don't need to allocate pages for the transmitter.  We just use
- * the skbuffer directly.
- */
-#define FEC_ENET_RX_PAGES      8
-#define FEC_ENET_RX_FRSIZE     2048
-#define FEC_ENET_RX_FRPPG      (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
-#define RX_RING_SIZE           (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
-#define FEC_ENET_TX_FRSIZE     2048
-#define FEC_ENET_TX_FRPPG      (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
-#define TX_RING_SIZE           16      /* Must be power of two */
-#define TX_RING_MOD_MASK       15      /*   for this to work */
-
 #if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
 #error "FEC: descriptor ring size constants too large"
 #endif
@@ -179,9 +164,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 #define PKT_MINBUF_SIZE                64
 #define PKT_MAXBLR_SIZE                1520
 
-/* This device has up to three irqs on some platforms */
-#define FEC_IRQ_NUM            3
-
 /*
  * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
  * size bits. Other FEC hardware does not, so we need to take that into
@@ -194,61 +176,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
 #define        OPT_FRAME_SIZE  0
 #endif
 
-/* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
- * tx_bd_base always point to the base of the buffer descriptors.  The
- * cur_rx and cur_tx point to the currently available buffer.
- * The dirty_tx tracks the current buffer that is being sent by the
- * controller.  The cur_tx and dirty_tx are equal under both completely
- * empty and completely full conditions.  The empty/ready indicator in
- * the buffer descriptor determines the actual condition.
- */
-struct fec_enet_private {
-       /* Hardware registers of the FEC device */
-       void __iomem *hwp;
-
-       struct net_device *netdev;
-
-       struct clk *clk_ipg;
-       struct clk *clk_ahb;
-
-       /* The saved address of a sent-in-place packet/buffer, for skfree(). */
-       unsigned char *tx_bounce[TX_RING_SIZE];
-       struct  sk_buff* tx_skbuff[TX_RING_SIZE];
-       struct  sk_buff* rx_skbuff[RX_RING_SIZE];
-       ushort  skb_cur;
-       ushort  skb_dirty;
-
-       /* CPM dual port RAM relative addresses */
-       dma_addr_t      bd_dma;
-       /* Address of Rx and Tx buffers */
-       struct bufdesc  *rx_bd_base;
-       struct bufdesc  *tx_bd_base;
-       /* The next free ring entry */
-       struct bufdesc  *cur_rx, *cur_tx;
-       /* The ring entries to be free()ed */
-       struct bufdesc  *dirty_tx;
-
-       uint    tx_full;
-       /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
-       spinlock_t hw_lock;
-
-       struct  platform_device *pdev;
-
-       int     opened;
-       int     dev_id;
-
-       /* Phylib and MDIO interface */
-       struct  mii_bus *mii_bus;
-       struct  phy_device *phy_dev;
-       int     mii_timeout;
-       uint    phy_speed;
-       phy_interface_t phy_interface;
-       int     link;
-       int     full_duplex;
-       struct  completion mdio_done;
-       int     irq[FEC_IRQ_NUM];
-};
-
 /* FEC MII MMFR bits definition */
 #define FEC_MMFR_ST            (1 << 30)
 #define FEC_MMFR_OP_READ       (2 << 28)
@@ -353,6 +280,17 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        | BD_ENET_TX_LAST | BD_ENET_TX_TC);
        bdp->cbd_sc = status;
 
+#ifdef CONFIG_FEC_PTP
+       bdp->cbd_bdu = 0;
+       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+                       fep->hwts_tx_en)) {
+                       bdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+                       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+       } else {
+
+               bdp->cbd_esc = BD_ENET_TX_INT;
+       }
+#endif
        /* Trigger transmission start */
        writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 
@@ -510,10 +448,17 @@ fec_restart(struct net_device *ndev, int duplex)
                writel(1 << 8, fep->hwp + FEC_X_WMRK);
        }
 
+#ifdef CONFIG_FEC_PTP
+       ecntl |= (1 << 4);
+#endif
+
        /* And last, enable the transmit and receive processing */
        writel(ecntl, fep->hwp + FEC_ECNTRL);
        writel(0, fep->hwp + FEC_R_DES_ACTIVE);
 
+#ifdef CONFIG_FEC_PTP
+       fec_ptp_start_cyclecounter(ndev);
+#endif
        /* Enable interrupts we wish to service */
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
 }
@@ -599,6 +544,19 @@ fec_enet_tx(struct net_device *ndev)
                        ndev->stats.tx_packets++;
                }
 
+#ifdef CONFIG_FEC_PTP
+               if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+                       struct skb_shared_hwtstamps shhwtstamps;
+                       unsigned long flags;
+
+                       memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+                       spin_lock_irqsave(&fep->tmreg_lock, flags);
+                       shhwtstamps.hwtstamp = ns_to_ktime(
+                               timecounter_cyc2time(&fep->tc, bdp->ts));
+                       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+                       skb_tstamp_tx(skb, &shhwtstamps);
+               }
+#endif
                if (status & BD_ENET_TX_READY)
                        printk("HEY! Enet xmit interrupt and TX_READY.\n");
 
@@ -725,6 +683,21 @@ fec_enet_rx(struct net_device *ndev)
                        skb_put(skb, pkt_len - 4);      /* Make room */
                        skb_copy_to_linear_data(skb, data, pkt_len - 4);
                        skb->protocol = eth_type_trans(skb, ndev);
+#ifdef CONFIG_FEC_PTP
+                       /* Get receive timestamp from the skb */
+                       if (fep->hwts_rx_en) {
+                               struct skb_shared_hwtstamps *shhwtstamps =
+                                                           skb_hwtstamps(skb);
+                               unsigned long flags;
+
+                               memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+                               spin_lock_irqsave(&fep->tmreg_lock, flags);
+                               shhwtstamps->hwtstamp = ns_to_ktime(
+                                   timecounter_cyc2time(&fep->tc, bdp->ts));
+                               spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+                       }
+#endif
                        if (!skb_defer_rx_timestamp(skb))
                                netif_rx(skb);
                }
@@ -739,6 +712,12 @@ rx_processing_done:
                status |= BD_ENET_RX_EMPTY;
                bdp->cbd_sc = status;
 
+#ifdef CONFIG_FEC_PTP
+               bdp->cbd_esc = BD_ENET_RX_INT;
+               bdp->cbd_prot = 0;
+               bdp->cbd_bdu = 0;
+#endif
+
                /* Update BD pointer to next entry */
                if (status & BD_ENET_RX_WRAP)
                        bdp = fep->rx_bd_base;
@@ -1178,6 +1157,10 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
        if (!phydev)
                return -ENODEV;
 
+#ifdef CONFIG_FEC_PTP
+       if (cmd == SIOCSHWTSTAMP)
+               return fec_ptp_ioctl(ndev, rq, cmd);
+#endif
        return phy_mii_ioctl(phydev, rq, cmd);
 }
 
@@ -1224,6 +1207,9 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
                bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
                                FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
                bdp->cbd_sc = BD_ENET_RX_EMPTY;
+#ifdef CONFIG_FEC_PTP
+               bdp->cbd_esc = BD_ENET_RX_INT;
+#endif
                bdp++;
        }
 
@@ -1237,6 +1223,10 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
 
                bdp->cbd_sc = 0;
                bdp->cbd_bufaddr = 0;
+
+#ifdef CONFIG_FEC_PTP
+               bdp->cbd_esc = BD_ENET_RX_INT;
+#endif
                bdp++;
        }
 
@@ -1638,9 +1628,19 @@ fec_probe(struct platform_device *pdev)
                goto failed_clk;
        }
 
+#ifdef CONFIG_FEC_PTP
+       fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+       if (IS_ERR(fep->clk_ptp)) {
+               ret = PTR_ERR(fep->clk_ptp);
+               goto failed_clk;
+       }
+#endif
+
        clk_prepare_enable(fep->clk_ahb);
        clk_prepare_enable(fep->clk_ipg);
-
+#ifdef CONFIG_FEC_PTP
+       clk_prepare_enable(fep->clk_ptp);
+#endif
        reg_phy = devm_regulator_get(&pdev->dev, "phy");
        if (!IS_ERR(reg_phy)) {
                ret = regulator_enable(reg_phy);
@@ -1668,6 +1668,10 @@ fec_probe(struct platform_device *pdev)
        if (ret)
                goto failed_register;
 
+#ifdef CONFIG_FEC_PTP
+       fec_ptp_init(ndev, pdev);
+#endif
+
        return 0;
 
 failed_register:
@@ -1677,6 +1681,9 @@ failed_init:
 failed_regulator:
        clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
+#ifdef CONFIG_FEC_PTP
+       clk_disable_unprepare(fep->clk_ptp);
+#endif
 failed_pin:
 failed_clk:
        for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1709,6 +1716,12 @@ fec_drv_remove(struct platform_device *pdev)
                if (irq > 0)
                        free_irq(irq, ndev);
        }
+#ifdef CONFIG_FEC_PTP
+       del_timer_sync(&fep->time_keep);
+       clk_disable_unprepare(fep->clk_ptp);
+       if (fep->ptp_clock)
+               ptp_clock_unregister(fep->ptp_clock);
+#endif
        clk_disable_unprepare(fep->clk_ahb);
        clk_disable_unprepare(fep->clk_ipg);
        iounmap(fep->hwp);
index 8408c627b1953a230c1f25d24b05217b0718d19f..c5a3bc1475c7f58381b3ad654501a5a116821651 100644 (file)
 #define        FEC_H
 /****************************************************************************/
 
+#ifdef CONFIG_FEC_PTP
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#endif
+
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
     defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
     defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
@@ -88,6 +94,13 @@ struct bufdesc {
        unsigned short cbd_datlen;      /* Data length */
        unsigned short cbd_sc;  /* Control and status info */
        unsigned long cbd_bufaddr;      /* Buffer address */
+#ifdef CONFIG_FEC_PTP
+       unsigned long cbd_esc;
+       unsigned long cbd_prot;
+       unsigned long cbd_bdu;
+       unsigned long ts;
+       unsigned short res0[4];
+#endif
 };
 #else
 struct bufdesc {
@@ -147,6 +160,112 @@ struct bufdesc {
 #define BD_ENET_TX_CSL          ((ushort)0x0001)
 #define BD_ENET_TX_STATS        ((ushort)0x03ff)        /* All status bits */
 
+/*enhanced buffer desciptor control/status used by Ethernet transmit*/
+#define BD_ENET_TX_INT          0x40000000
+#define BD_ENET_TX_TS           0x20000000
+
+
+/* This device has up to three irqs on some platforms */
+#define FEC_IRQ_NUM            3
+
+/* The number of Tx and Rx buffers.  These are allocated from the page
+ * pool.  The code may assume these are power of two, so it it best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter.  We just use
+ * the skbuffer directly.
+ */
+
+#define FEC_ENET_RX_PAGES      8
+#define FEC_ENET_RX_FRSIZE     2048
+#define FEC_ENET_RX_FRPPG      (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE           (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define FEC_ENET_TX_FRSIZE     2048
+#define FEC_ENET_TX_FRPPG      (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+#define TX_RING_SIZE           16      /* Must be power of two */
+#define TX_RING_MOD_MASK       15      /*   for this to work */
+
+#define BD_ENET_RX_INT          0x00800000
+#define BD_ENET_RX_PTP          ((ushort)0x0400)
+
+/* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors.  The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller.  The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions.  The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+       /* Hardware registers of the FEC device */
+       void __iomem *hwp;
+
+       struct net_device *netdev;
+
+       struct clk *clk_ipg;
+       struct clk *clk_ahb;
+#ifdef CONFIG_FEC_PTP
+       struct clk *clk_ptp;
+#endif
+
+       /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+       unsigned char *tx_bounce[TX_RING_SIZE];
+       struct  sk_buff *tx_skbuff[TX_RING_SIZE];
+       struct  sk_buff *rx_skbuff[RX_RING_SIZE];
+       ushort  skb_cur;
+       ushort  skb_dirty;
+
+       /* CPM dual port RAM relative addresses */
+       dma_addr_t      bd_dma;
+       /* Address of Rx and Tx buffers */
+       struct bufdesc  *rx_bd_base;
+       struct bufdesc  *tx_bd_base;
+       /* The next free ring entry */
+       struct bufdesc  *cur_rx, *cur_tx;
+       /* The ring entries to be free()ed */
+       struct bufdesc  *dirty_tx;
+
+       uint    tx_full;
+       /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
+       spinlock_t hw_lock;
+
+       struct  platform_device *pdev;
+
+       int     opened;
+       int     dev_id;
+
+       /* Phylib and MDIO interface */
+       struct  mii_bus *mii_bus;
+       struct  phy_device *phy_dev;
+       int     mii_timeout;
+       uint    phy_speed;
+       phy_interface_t phy_interface;
+       int     link;
+       int     full_duplex;
+       struct  completion mdio_done;
+       int     irq[FEC_IRQ_NUM];
+
+#ifdef CONFIG_FEC_PTP
+       struct ptp_clock *ptp_clock;
+       struct ptp_clock_info ptp_caps;
+       unsigned long last_overflow_check;
+       spinlock_t tmreg_lock;
+       struct cyclecounter cc;
+       struct timecounter tc;
+       int rx_hwtstamp_filter;
+       u32 base_incval;
+       u32 cycle_speed;
+       int hwts_rx_en;
+       int hwts_tx_en;
+       struct timer_list time_keep;
+#endif
+
+};
+
+#ifdef CONFIG_FEC_PTP
+void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev);
+void fec_ptp_start_cyclecounter(struct net_device *ndev);
+int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+#endif
 
 /****************************************************************************/
 #endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
new file mode 100644 (file)
index 0000000..5352140
--- /dev/null
@@ -0,0 +1,385 @@
+/*
+ * Fast Ethernet Controller (ENET) PTP driver for MX6x.
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include "fec.h"
+
+/* FEC 1588 register bits */
+#define FEC_T_CTRL_SLAVE                0x00002000
+#define FEC_T_CTRL_CAPTURE              0x00000800
+#define FEC_T_CTRL_RESTART              0x00000200
+#define FEC_T_CTRL_PERIOD_RST           0x00000030
+#define FEC_T_CTRL_PERIOD_EN           0x00000010
+#define FEC_T_CTRL_ENABLE               0x00000001
+
+#define FEC_T_INC_MASK                  0x0000007f
+#define FEC_T_INC_OFFSET                0
+#define FEC_T_INC_CORR_MASK             0x00007f00
+#define FEC_T_INC_CORR_OFFSET           8
+
+#define FEC_ATIME_CTRL         0x400
+#define FEC_ATIME              0x404
+#define FEC_ATIME_EVT_OFFSET   0x408
+#define FEC_ATIME_EVT_PERIOD   0x40c
+#define FEC_ATIME_CORR         0x410
+#define FEC_ATIME_INC          0x414
+#define FEC_TS_TIMESTAMP       0x418
+
+#define FEC_CC_MULT    (1 << 31)
+/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static cycle_t fec_ptp_read(const struct cyclecounter *cc)
+{
+       struct fec_enet_private *fep =
+               container_of(cc, struct fec_enet_private, cc);
+       u32 tempval;
+
+       tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+       tempval |= FEC_T_CTRL_CAPTURE;
+       writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+       return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
+ * fec_ptp_start_cyclecounter - create the cycle counter from hw
+ * @ndev: network device
+ *
+ * this function initializes the timecounter and cyclecounter
+ * structures for use in generated a ns counter from the arbitrary
+ * fixed point cycles registers in the hardware.
+ */
+void fec_ptp_start_cyclecounter(struct net_device *ndev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       unsigned long flags;
+       int inc;
+
+       inc = 1000000000 / clk_get_rate(fep->clk_ptp);
+
+       /* grab the ptp lock */
+       spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+       /* 1ns counter */
+       writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
+
+       /* use free running count */
+       writel(0, fep->hwp + FEC_ATIME_EVT_PERIOD);
+
+       writel(FEC_T_CTRL_ENABLE, fep->hwp + FEC_ATIME_CTRL);
+
+       memset(&fep->cc, 0, sizeof(fep->cc));
+       fep->cc.read = fec_ptp_read;
+       fep->cc.mask = CLOCKSOURCE_MASK(32);
+       fep->cc.shift = 31;
+       fep->cc.mult = FEC_CC_MULT;
+
+       /* reset the ns time counter */
+       timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
+
+       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+}
+
+/**
+ * fec_ptp_adjfreq - adjust ptp cycle frequency
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ *
+ * Because ENET hardware frequency adjust is complex,
+ * using software method to do that.
+ */
+static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+       u64 diff;
+       unsigned long flags;
+       int neg_adj = 0;
+
+       struct fec_enet_private *fep =
+           container_of(ptp, struct fec_enet_private, ptp_caps);
+
+       if (ppb < 0) {
+               ppb = -ppb;
+               neg_adj = 1;
+       }
+
+       spin_lock_irqsave(&fep->tmreg_lock, flags);
+       /*
+        * dummy read to set cycle_last in tc to now.
+        * So use adjusted mult to calculate when next call
+        * timercounter_read.
+        */
+       timecounter_read(&fep->tc);
+       fep->cc.mult = FEC_CC_MULT;
+       diff = fep->cc.mult;
+       diff *= ppb;
+       diff = div_u64(diff, 1000000000ULL);
+
+       if (neg_adj)
+               fep->cc.mult -= diff;
+       else
+               fep->cc.mult += diff;
+
+       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+       return 0;
+}
+
+/**
+ * fec_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       struct fec_enet_private *fep =
+           container_of(ptp, struct fec_enet_private, ptp_caps);
+       unsigned long flags;
+       u64 now;
+
+       spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+       now = timecounter_read(&fep->tc);
+       now += delta;
+
+       /* reset the timecounter */
+       timecounter_init(&fep->tc, &fep->cc, now);
+
+       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+       return 0;
+}
+
+/**
+ * fec_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+       struct fec_enet_private *adapter =
+           container_of(ptp, struct fec_enet_private, ptp_caps);
+       u64 ns;
+       u32 remainder;
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->tmreg_lock, flags);
+       ns = timecounter_read(&adapter->tc);
+       spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+       ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+       ts->tv_nsec = remainder;
+
+       return 0;
+}
+
+/**
+ * fec_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int fec_ptp_settime(struct ptp_clock_info *ptp,
+                          const struct timespec *ts)
+{
+       struct fec_enet_private *fep =
+           container_of(ptp, struct fec_enet_private, ptp_caps);
+
+       u64 ns;
+       unsigned long flags;
+
+       ns = ts->tv_sec * 1000000000ULL;
+       ns += ts->tv_nsec;
+
+       spin_lock_irqsave(&fep->tmreg_lock, flags);
+       timecounter_init(&fep->tc, &fep->cc, ns);
+       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+       return 0;
+}
+
+/**
+ * fec_ptp_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ *
+ */
+static int fec_ptp_enable(struct ptp_clock_info *ptp,
+                         struct ptp_clock_request *rq, int on)
+{
+       return -EOPNOTSUPP;
+}
+
+/**
+ * fec_ptp_hwtstamp_ioctl - control hardware time stamping
+ * @ndev: pointer to net_device
+ * @ifreq: ioctl data
+ * @cmd: particular ioctl requested
+ */
+int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       struct hwtstamp_config config;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (config.flags)
+               return -EINVAL;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               fep->hwts_tx_en = 0;
+               break;
+       case HWTSTAMP_TX_ON:
+               fep->hwts_tx_en = 1;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               if (fep->hwts_rx_en)
+                       fep->hwts_rx_en = 0;
+               config.rx_filter = HWTSTAMP_FILTER_NONE;
+               break;
+
+       default:
+               /*
+                * register RXMTRL must be set in order to do V1 packets,
+                * therefore it is not possible to time stamp both V1 Sync and
+                * Delay_Req messages and hardware does not support
+                * timestamping all packets => return error
+                */
+               fep->hwts_rx_en = 1;
+               config.rx_filter = HWTSTAMP_FILTER_ALL;
+               break;
+       }
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+           -EFAULT : 0;
+}
+
+/**
+ * fec_time_keep - call timecounter_read every second to avoid timer overrun
+ *                 because ENET just support 32bit counter, will timeout in 4s
+ */
+static void fec_time_keep(unsigned long _data)
+{
+       struct fec_enet_private *fep = (struct fec_enet_private *)_data;
+       u64 ns;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fep->tmreg_lock, flags);
+       ns = timecounter_read(&fep->tc);
+       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+       mod_timer(&fep->time_keep, jiffies + HZ);
+}
+
+/**
+ * fec_ptp_init
+ * @ndev: The FEC network adapter
+ *
+ * This function performs the required steps for enabling ptp
+ * support. If ptp support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+
+void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+
+       fep->ptp_caps.owner = THIS_MODULE;
+       snprintf(fep->ptp_caps.name, 16, "fec ptp");
+
+       fep->ptp_caps.max_adj = 250000000;
+       fep->ptp_caps.n_alarm = 0;
+       fep->ptp_caps.n_ext_ts = 0;
+       fep->ptp_caps.n_per_out = 0;
+       fep->ptp_caps.pps = 0;
+       fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+       fep->ptp_caps.adjtime = fec_ptp_adjtime;
+       fep->ptp_caps.gettime = fec_ptp_gettime;
+       fep->ptp_caps.settime = fec_ptp_settime;
+       fep->ptp_caps.enable = fec_ptp_enable;
+
+       spin_lock_init(&fep->tmreg_lock);
+
+       fec_ptp_start_cyclecounter(ndev);
+
+       init_timer(&fep->time_keep);
+       fep->time_keep.data = (unsigned long)fep;
+       fep->time_keep.function = fec_time_keep;
+       fep->time_keep.expires = jiffies + HZ;
+       add_timer(&fep->time_keep);
+
+       fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
+       if (IS_ERR(fep->ptp_clock)) {
+               fep->ptp_clock = NULL;
+               pr_err("ptp_clock_register failed\n");
+       } else {
+               pr_info("registered PHC device on %s\n", ndev->name);
+       }
+}
index 0cafe4fe9406125af6375cd8859b1919ab0d62d4..73d28d51b5d9e2bbeaa5210b88ce733f7338fe23 100644 (file)
@@ -93,6 +93,7 @@ config E1000E
 config IGB
        tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
        depends on PCI
+       select PTP_1588_CLOCK
        ---help---
          This driver supports Intel(R) 82575/82576 gigabit ethernet family of
          adapters.  For more information on how to identify your adapter, go
@@ -120,19 +121,6 @@ config IGB_DCA
          driver.  DCA is a method for warming the CPU cache before data
          is used, with the intent of lessening the impact of cache misses.
 
-config IGB_PTP
-       bool "PTP Hardware Clock (PHC)"
-       default n
-       depends on IGB && EXPERIMENTAL
-       select PPS
-       select PTP_1588_CLOCK
-       ---help---
-         Say Y here if you want to use PTP Hardware Clock (PHC) in the
-         driver.  Only the basic clock operations have been implemented.
-
-         Every timestamp and clock read operations must consult the
-         overflow counter to form a correct time value.
-
 config IGBVF
        tristate "Intel(R) 82576 Virtual Function Ethernet support"
        depends on PCI
@@ -180,6 +168,7 @@ config IXGBE
        tristate "Intel(R) 10GbE PCI Express adapters support"
        depends on PCI && INET
        select MDIO
+       select PTP_1588_CLOCK
        ---help---
          This driver supports Intel(R) 10GbE PCI Express family of
          adapters.  For more information on how to identify your adapter, go
@@ -222,19 +211,6 @@ config IXGBE_DCB
 
          If unsure, say N.
 
-config IXGBE_PTP
-       bool "PTP Clock Support"
-       default n
-       depends on IXGBE && EXPERIMENTAL
-       select PPS
-       select PTP_1588_CLOCK
-       ---help---
-         Say Y here if you want support for 1588 Timestamping with a
-         PHC device, using the PTP 1588 Clock support. This is
-         required to enable timestamping support for the device.
-
-         If unsure, say N.
-
 config IXGBEVF
        tristate "Intel(R) 82599 Virtual Function Ethernet support"
        depends on PCI_MSI
index 97c197fd4a8e5215fa5dea76d4785cd5d765a6d1..624476cfa727cc32efc39a8846fb3720742077ed 100644 (file)
@@ -34,6 +34,4 @@ obj-$(CONFIG_IGB) += igb.o
 
 igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
            e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
-           e1000_i210.o
-
-igb-$(CONFIG_IGB_PTP) += igb_ptp.o
+           e1000_i210.o igb_ptp.o
index be1971b18297e4f9791ab8a6d8f8c15bc60642e5..796db53954d94aee838e7b4c579cb8189452087c 100644 (file)
 #include "e1000_mac.h"
 #include "e1000_82575.h"
 
-#ifdef CONFIG_IGB_PTP
 #include <linux/clocksource.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IGB_PTP */
 #include <linux/bitops.h>
 #include <linux/if_vlan.h>
 
@@ -376,7 +374,6 @@ struct igb_adapter {
        u32 wvbr;
        u32 *shadow_vfta;
 
-#ifdef CONFIG_IGB_PTP
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_caps;
        struct delayed_work ptp_overflow_work;
@@ -385,7 +382,6 @@ struct igb_adapter {
        spinlock_t tmreg_lock;
        struct cyclecounter cc;
        struct timecounter tc;
-#endif /* CONFIG_IGB_PTP */
 
        char fw_version[32];
 };
@@ -439,7 +435,6 @@ extern bool igb_has_link(struct igb_adapter *adapter);
 extern void igb_set_ethtool_ops(struct net_device *);
 extern void igb_power_up_link(struct igb_adapter *);
 extern void igb_set_fw_version(struct igb_adapter *);
-#ifdef CONFIG_IGB_PTP
 extern void igb_ptp_init(struct igb_adapter *adapter);
 extern void igb_ptp_stop(struct igb_adapter *adapter);
 extern void igb_ptp_reset(struct igb_adapter *adapter);
@@ -461,7 +456,6 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
 
 extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
                                  struct ifreq *ifr, int cmd);
-#endif /* CONFIG_IGB_PTP */
 
 static inline s32 igb_reset_phy(struct e1000_hw *hw)
 {
index 375c0dad8d2999065ef6934017dd93ef30fc8a1a..d8b1bee606c0da88b109900dc0fbfc3c7a5f1a83 100644 (file)
@@ -2310,7 +2310,6 @@ static int igb_get_ts_info(struct net_device *dev,
        struct igb_adapter *adapter = netdev_priv(dev);
 
        switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IGB_PTP
        case e1000_82576:
        case e1000_82580:
        case e1000_i350:
@@ -2346,7 +2345,6 @@ static int igb_get_ts_info(struct net_device *dev,
                                (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 
                return 0;
-#endif /* CONFIG_IGB_PTP */
        default:
                return -EOPNOTSUPP;
        }
index df1e7907bbaff4d74bd16d48c5963633e31b1b7b..082ce73dc6270685837e7b8b08ae215a4e3f853b 100644 (file)
@@ -1708,10 +1708,8 @@ void igb_reset(struct igb_adapter *adapter)
        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
        wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
 
-#ifdef CONFIG_IGB_PTP
        /* Re-enable PTP, where applicable. */
        igb_ptp_reset(adapter);
-#endif /* CONFIG_IGB_PTP */
 
        igb_get_phy_info(hw);
 }
@@ -2119,10 +2117,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
 
 #endif
 
-#ifdef CONFIG_IGB_PTP
        /* do hw tstamp init after resetting */
        igb_ptp_init(adapter);
-#endif /* CONFIG_IGB_PTP */
 
        dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
        /* print bus type/speed/width info */
@@ -2197,9 +2193,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
        struct e1000_hw *hw = &adapter->hw;
 
        pm_runtime_get_noresume(&pdev->dev);
-#ifdef CONFIG_IGB_PTP
        igb_ptp_stop(adapter);
-#endif /* CONFIG_IGB_PTP */
 
        /*
         * The watchdog timer may be rescheduled, so explicitly
@@ -3095,10 +3089,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
        srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
        srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
        srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
-#ifdef CONFIG_IGB_PTP
        if (hw->mac.type >= e1000_82580)
                srrctl |= E1000_SRRCTL_TIMESTAMP;
-#endif /* CONFIG_IGB_PTP */
        /* Only set Drop Enable if we are supporting multiple queues */
        if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
                srrctl |= E1000_SRRCTL_DROP_EN;
@@ -4134,11 +4126,9 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
        if (tx_flags & IGB_TX_FLAGS_VLAN)
                cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
 
-#ifdef CONFIG_IGB_PTP
        /* set timestamp bit if present */
        if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
                cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
-#endif /* CONFIG_IGB_PTP */
 
        /* set segmentation bits for TSO */
        if (tx_flags & IGB_TX_FLAGS_TSO)
@@ -4347,9 +4337,7 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
 netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
                                struct igb_ring *tx_ring)
 {
-#ifdef CONFIG_IGB_PTP
        struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
-#endif /* CONFIG_IGB_PTP */
        struct igb_tx_buffer *first;
        int tso;
        u32 tx_flags = 0;
@@ -4372,7 +4360,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
        first->bytecount = skb->len;
        first->gso_segs = 1;
 
-#ifdef CONFIG_IGB_PTP
        if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
                     !(adapter->ptp_tx_skb))) {
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -4382,7 +4369,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
                if (adapter->hw.mac.type == e1000_82576)
                        schedule_work(&adapter->ptp_tx_work);
        }
-#endif /* CONFIG_IGB_PTP */
 
        if (vlan_tx_tag_present(skb)) {
                tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4776,7 +4762,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-#ifdef CONFIG_IGB_PTP
        if (icr & E1000_ICR_TS) {
                u32 tsicr = rd32(E1000_TSICR);
 
@@ -4787,7 +4772,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
                        schedule_work(&adapter->ptp_tx_work);
                }
        }
-#endif /* CONFIG_IGB_PTP */
 
        wr32(E1000_EIMS, adapter->eims_other);
 
@@ -5539,7 +5523,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-#ifdef CONFIG_IGB_PTP
        if (icr & E1000_ICR_TS) {
                u32 tsicr = rd32(E1000_TSICR);
 
@@ -5550,7 +5533,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
                        schedule_work(&adapter->ptp_tx_work);
                }
        }
-#endif /* CONFIG_IGB_PTP */
 
        napi_schedule(&q_vector->napi);
 
@@ -5593,7 +5575,6 @@ static irqreturn_t igb_intr(int irq, void *data)
                        mod_timer(&adapter->watchdog_timer, jiffies + 1);
        }
 
-#ifdef CONFIG_IGB_PTP
        if (icr & E1000_ICR_TS) {
                u32 tsicr = rd32(E1000_TSICR);
 
@@ -5604,7 +5585,6 @@ static irqreturn_t igb_intr(int irq, void *data)
                        schedule_work(&adapter->ptp_tx_work);
                }
        }
-#endif /* CONFIG_IGB_PTP */
 
        napi_schedule(&q_vector->napi);
 
@@ -5889,14 +5869,12 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
        if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
                unsigned char *va = page_address(page) + rx_buffer->page_offset;
 
-#ifdef CONFIG_IGB_PTP
                if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
                        igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
                        va += IGB_TS_HDR_LEN;
                        size -= IGB_TS_HDR_LEN;
                }
 
-#endif
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
                /* we can reuse buffer as-is, just make sure it is local */
@@ -6221,7 +6199,6 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
         */
        va = skb_frag_address(frag);
 
-#ifdef CONFIG_IGB_PTP
        if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
                /* retrieve timestamp from buffer */
                igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
@@ -6236,7 +6213,6 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
                va += IGB_TS_HDR_LEN;
        }
 
-#endif
        /*
         * we need the header to contain the greater of either ETH_HLEN or
         * 60 bytes if the skb->len is less than 60 for skb_pad.
@@ -6317,9 +6293,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
 
        igb_rx_checksum(rx_ring, rx_desc, skb);
 
-#ifdef CONFIG_IGB_PTP
        igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
-#endif /* CONFIG_IGB_PTP */
 
        if ((dev->features & NETIF_F_HW_VLAN_RX) &&
            igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
@@ -6553,10 +6527,8 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        case SIOCGMIIREG:
        case SIOCSMIIREG:
                return igb_mii_ioctl(netdev, ifr, cmd);
-#ifdef CONFIG_IGB_PTP
        case SIOCSHWTSTAMP:
                return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
-#endif /* CONFIG_IGB_PTP */
        default:
                return -EOPNOTSUPP;
        }
index 89f40e51fc134f0537fe8cf6f9f9c3efca44f94e..f3a632bf8d96f8bd76983e8990c80f1cefe984ac 100644 (file)
@@ -34,11 +34,10 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
 
 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
-              ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
+              ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
 
 ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
                               ixgbe_dcb_82599.o ixgbe_dcb_nl.o
 
-ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
 ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
 ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
index a17116b3c470a946e35d7a4d3213d90177c75ebf..7ff4c4fdcb0d68aa80a91238741f47dbceba6094 100644 (file)
 #include <linux/aer.h>
 #include <linux/if_vlan.h>
 
-#ifdef CONFIG_IXGBE_PTP
 #include <linux/clocksource.h>
 #include <linux/net_tstamp.h>
 #include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IXGBE_PTP */
 
 #include "ixgbe_type.h"
 #include "ixgbe_common.h"
@@ -572,7 +570,6 @@ struct ixgbe_adapter {
        u32 interrupt_event;
        u32 led_reg;
 
-#ifdef CONFIG_IXGBE_PTP
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_caps;
        unsigned long last_overflow_check;
@@ -581,7 +578,6 @@ struct ixgbe_adapter {
        struct timecounter tc;
        int rx_hwtstamp_filter;
        u32 base_incval;
-#endif /* CONFIG_IXGBE_PTP */
 
        /* SR-IOV */
        DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -742,7 +738,6 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
        return netdev_get_tx_queue(ring->netdev, ring->queue_index);
 }
 
-#ifdef CONFIG_IXGBE_PTP
 extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
 extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
 extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
@@ -756,6 +751,5 @@ extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
 extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
 extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
 extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
-#endif /* CONFIG_IXGBE_PTP */
 
 #endif /* _IXGBE_H_ */
index 872c3374ddfa4c3ccd3b2a2b1af2a713c9119120..bbf07bd6ab9d06788b500ff8f4a1a0c4517bfb0f 100644 (file)
@@ -2667,7 +2667,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
        struct ixgbe_adapter *adapter = netdev_priv(dev);
 
        switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IXGBE_PTP
        case ixgbe_mac_X540:
        case ixgbe_mac_82599EB:
                info->so_timestamping =
@@ -2690,7 +2689,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
                        (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
                        (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
                break;
-#endif /* CONFIG_IXGBE_PTP */
        default:
                return ethtool_op_get_ts_info(dev, info);
                break;
index 8b1a38bec2378b585ea1d970d0e38aac216060bf..b54bc40f00b0dbb8c4557b90c2d1c0e0bbd654b1 100644 (file)
@@ -841,10 +841,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                total_bytes += tx_buffer->bytecount;
                total_packets += tx_buffer->gso_segs;
 
-#ifdef CONFIG_IXGBE_PTP
                if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
                        ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
-#endif
 
                /* free the skb */
                dev_kfree_skb_any(tx_buffer->skb);
@@ -1432,9 +1430,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 
        ixgbe_rx_checksum(rx_ring, rx_desc, skb);
 
-#ifdef CONFIG_IXGBE_PTP
        ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
-#endif
 
        if ((dev->features & NETIF_F_HW_VLAN_RX) &&
            ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -2372,10 +2368,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
                break;
        }
 
-#ifdef CONFIG_IXGBE_PTP
        if (adapter->hw.mac.type == ixgbe_mac_X540)
                mask |= IXGBE_EIMS_TIMESYNC;
-#endif
 
        if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
            !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
@@ -2441,10 +2435,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
 
        ixgbe_check_fan_failure(adapter, eicr);
 
-#ifdef CONFIG_IXGBE_PTP
        if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
                ixgbe_ptp_check_pps_event(adapter, eicr);
-#endif
 
        /* re-enable the original interrupt state, no lsc, no queues */
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2636,10 +2628,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        }
 
        ixgbe_check_fan_failure(adapter, eicr);
-#ifdef CONFIG_IXGBE_PTP
        if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
                ixgbe_ptp_check_pps_event(adapter, eicr);
-#endif
 
        /* would disable interrupts here but EIAM disabled it */
        napi_schedule(&q_vector->napi);
@@ -4231,10 +4221,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
        if (hw->mac.san_mac_rar_index)
                hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
 
-#ifdef CONFIG_IXGBE_PTP
        if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
                ixgbe_ptp_reset(adapter);
-#endif
 }
 
 /**
@@ -4941,9 +4929,7 @@ static int ixgbe_open(struct net_device *netdev)
        if (err)
                goto err_set_queues;
 
-#ifdef CONFIG_IXGBE_PTP
        ixgbe_ptp_init(adapter);
-#endif /* CONFIG_IXGBE_PTP*/
 
        ixgbe_up_complete(adapter);
 
@@ -4976,9 +4962,7 @@ static int ixgbe_close(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
-#ifdef CONFIG_IXGBE_PTP
        ixgbe_ptp_stop(adapter);
-#endif
 
        ixgbe_down(adapter);
        ixgbe_free_irq(adapter);
@@ -5541,10 +5525,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
                break;
        }
 
-#ifdef CONFIG_IXGBE_PTP
        if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
                ixgbe_ptp_start_cyclecounter(adapter);
-#endif
 
        e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5589,10 +5571,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
        if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
                adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
 
-#ifdef CONFIG_IXGBE_PTP
        if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
                ixgbe_ptp_start_cyclecounter(adapter);
-#endif
 
        e_info(drv, "NIC Link is Down\n");
        netif_carrier_off(netdev);
@@ -5897,9 +5877,7 @@ static void ixgbe_service_task(struct work_struct *work)
        ixgbe_watchdog_subtask(adapter);
        ixgbe_fdir_reinit_subtask(adapter);
        ixgbe_check_hang_subtask(adapter);
-#ifdef CONFIG_IXGBE_PTP
        ixgbe_ptp_overflow_check(adapter);
-#endif
 
        ixgbe_service_event_complete(adapter);
 }
@@ -6052,10 +6030,8 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
        if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
                cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
 
-#ifdef CONFIG_IXGBE_PTP
        if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
                cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
-#endif
 
        /* set segmentation enable bits for TSO/FSO */
 #ifdef IXGBE_FCOE
@@ -6457,12 +6433,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 
        skb_tx_timestamp(skb);
 
-#ifdef CONFIG_IXGBE_PTP
        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
        }
-#endif
 
 #ifdef CONFIG_PCI_IOV
        /*
@@ -6612,10 +6586,8 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        switch (cmd) {
-#ifdef CONFIG_IXGBE_PTP
        case SIOCSHWTSTAMP:
                return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
-#endif
        default:
                return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
        }
index 5296cc8d3cbaaf2dc20994a71a200b2d6e547cb7..00bc4fc968c7fa76ba3fbd18c733bc7bfb38a435 100644 (file)
@@ -20,19 +20,3 @@ config PCH_GBE
          purpose use.
          ML7223/ML7831 is companion chip for Intel Atom E6xx series.
          ML7223/ML7831 is completely compatible for Intel EG20T PCH.
-
-if PCH_GBE
-
-config PCH_PTP
-       bool "PCH PTP clock support"
-       default n
-       depends on EXPERIMENTAL
-       select PPS
-       select PTP_1588_CLOCK
-       select PTP_1588_CLOCK_PCH
-       ---help---
-         Say Y here if you want to use Precision Time Protocol (PTP) in the
-         driver. PTP is a method to precisely synchronize distributed clocks
-         over Ethernet networks.
-
-endif # PCH_GBE
index b07311eaa69386d3cdbad86537da599b5dcaccb7..7fb7e178c74eaacd1af8c1597b02cca0b15676ee 100644 (file)
@@ -649,7 +649,6 @@ extern void pch_gbe_free_tx_resources(struct pch_gbe_adapter *adapter,
 extern void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
                                       struct pch_gbe_rx_ring *rx_ring);
 extern void pch_gbe_update_stats(struct pch_gbe_adapter *adapter);
-#ifdef CONFIG_PCH_PTP
 extern u32 pch_ch_control_read(struct pci_dev *pdev);
 extern void pch_ch_control_write(struct pci_dev *pdev, u32 val);
 extern u32 pch_ch_event_read(struct pci_dev *pdev);
@@ -659,7 +658,6 @@ extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
 extern u64 pch_rx_snap_read(struct pci_dev *pdev);
 extern u64 pch_tx_snap_read(struct pci_dev *pdev);
 extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
-#endif
 
 /* pch_gbe_param.c */
 extern void pch_gbe_check_options(struct pch_gbe_adapter *adapter);
index b2a94d02a5217db7638164962d59b16f8def20ad..499249a15e88cbb9ffbb1524631662e199496633 100644 (file)
 #include "pch_gbe.h"
 #include "pch_gbe_api.h"
 #include <linux/module.h>
-#ifdef CONFIG_PCH_PTP
 #include <linux/net_tstamp.h>
 #include <linux/ptp_classify.h>
-#endif
 
 #define DRV_VERSION     "1.01"
 const char pch_driver_version[] = DRV_VERSION;
@@ -98,7 +96,6 @@ const char pch_driver_version[] = DRV_VERSION;
 
 #define PCH_GBE_INT_DISABLE_ALL                0
 
-#ifdef CONFIG_PCH_PTP
 /* Macros for ieee1588 */
 /* 0x40 Time Synchronization Channel Control Register Bits */
 #define MASTER_MODE   (1<<0)
@@ -113,7 +110,6 @@ const char pch_driver_version[] = DRV_VERSION;
 
 #define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
 #define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
-#endif
 
 static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
 
@@ -122,7 +118,6 @@ static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
                               int data);
 static void pch_gbe_set_multi(struct net_device *netdev);
 
-#ifdef CONFIG_PCH_PTP
 static struct sock_filter ptp_filter[] = {
        PTP_FILTER
 };
@@ -291,7 +286,6 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 
        return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
 }
-#endif
 
 inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
 {
@@ -1261,9 +1255,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
                  (int)sizeof(struct pch_gbe_tx_desc) * ring_num,
                  &hw->reg->TX_DSC_SW_P);
 
-#ifdef CONFIG_PCH_PTP
        pch_tx_timestamp(adapter, skb);
-#endif
 
        dev_kfree_skb_any(skb);
 }
@@ -1771,9 +1763,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
                        /* Write meta date of skb */
                        skb_put(skb, length);
 
-#ifdef CONFIG_PCH_PTP
                        pch_rx_timestamp(adapter, skb);
-#endif
 
                        skb->protocol = eth_type_trans(skb, netdev);
                        if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
@@ -2369,10 +2359,8 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 
        pr_debug("cmd : 0x%04x\n", cmd);
 
-#ifdef CONFIG_PCH_PTP
        if (cmd == SIOCSHWTSTAMP)
                return hwtstamp_ioctl(netdev, ifr, cmd);
-#endif
 
        return generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
 }
@@ -2665,14 +2653,12 @@ static int pch_gbe_probe(struct pci_dev *pdev,
                goto err_free_netdev;
        }
 
-#ifdef CONFIG_PCH_PTP
        adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
                                               PCI_DEVFN(12, 4));
        if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
                pr_err("Bad ptp filter\n");
                return -EINVAL;
        }
-#endif
 
        netdev->netdev_ops = &pch_gbe_netdev_ops;
        netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
index 25906c1d1b1590825502bc192dffa1aa8d8e494e..fb3cbc27063cccb5ae821989c516195527e32e65 100644 (file)
@@ -34,10 +34,3 @@ config SFC_SRIOV
          This enables support for the SFC9000 I/O Virtualization
          features, allowing accelerated network performance in
          virtualized environments.
-config SFC_PTP
-       bool "Solarflare SFC9000-family PTP support"
-       depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m)
-       default y
-       ---help---
-         This enables support for the Precision Time Protocol (PTP)
-         on SFC9000-family NICs
index e11f2ecf69d9b5dfd0ae5600e33bb0fe1d7382fc..945bf06e69ef11c636849ed1f20263868a6b0947 100644 (file)
@@ -2,9 +2,8 @@ sfc-y                   += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
                           falcon_xmac.o mcdi_mac.o \
                           selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
                           tenxpress.o txc43128_phy.o falcon_boards.o \
-                          mcdi.o mcdi_phy.o mcdi_mon.o
+                          mcdi.o mcdi_phy.o mcdi_mon.o ptp.o
 sfc-$(CONFIG_SFC_MTD)  += mtd.o
 sfc-$(CONFIG_SFC_SRIOV)        += siena_sriov.o
-sfc-$(CONFIG_SFC_PTP)  += ptp.o
 
 obj-$(CONFIG_SFC)      += sfc.o
index 576a31091165492879ee22edbfd00eee4972047a..2487f582ab047dc27601a499d788b7701ebbb321 100644 (file)
@@ -868,9 +868,7 @@ struct efx_nic {
        struct work_struct peer_work;
 #endif
 
-#ifdef CONFIG_SFC_PTP
        struct efx_ptp_data *ptp_data;
-#endif
 
        /* The following fields may be written more often */
 
index 438cef11f7270bd620ed04c42fb1c8ee8a9d3621..7a9647a3c565442ba02017ff40d2605f9b785552 100644 (file)
@@ -252,7 +252,6 @@ extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf,
                                     bool spoofchk);
 
 struct ethtool_ts_info;
-#ifdef CONFIG_SFC_PTP
 extern void efx_ptp_probe(struct efx_nic *efx);
 extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd);
 extern int efx_ptp_get_ts_info(struct net_device *net_dev,
@@ -260,31 +259,6 @@ extern int efx_ptp_get_ts_info(struct net_device *net_dev,
 extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
 extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
 extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-#else
-static inline void efx_ptp_probe(struct efx_nic *efx) {}
-static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd)
-{
-       return -EOPNOTSUPP;
-}
-static inline int efx_ptp_get_ts_info(struct net_device *net_dev,
-                                     struct ethtool_ts_info *ts_info)
-{
-       ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE |
-                                   SOF_TIMESTAMPING_RX_SOFTWARE);
-       ts_info->phc_index = -1;
-
-       return 0;
-}
-static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
-{
-       return false;
-}
-static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
-{
-       return NETDEV_TX_OK;
-}
-static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {}
-#endif
 
 extern const struct efx_nic_type falcon_a1_nic_type;
 extern const struct efx_nic_type falcon_b0_nic_type;
index b26cbda5efa9b5264dd4e2bb885d35ea7e26b692..cbc3905a0a15fa4c756080ee2bf43649684c8233 100644 (file)
@@ -60,6 +60,14 @@ config TI_CPSW
          To compile this driver as a module, choose M here: the module
          will be called cpsw.
 
+config TI_CPTS
+       boolean "TI Common Platform Time Sync (CPTS) Support"
+       depends on TI_CPSW && PTP_1588_CLOCK && !(TI_CPSW=y && PTP_1588_CLOCK=m)
+       ---help---
+         This driver supports the Common Platform Time Sync unit of
+         the CPSW Ethernet Switch. The unit can time stamp PTP UDP/IPv4
+         and Layer 2 packets, and the driver offers a PTP Hardware Clock.
+
 config TLAN
        tristate "TI ThunderLAN support"
        depends on (PCI || EISA)
index 91bd8bba78ffb872faa4b4f60cc43196e4893773..c65148e8aa1d4810e00cbe6d7448fa72572847d7 100644 (file)
@@ -8,4 +8,4 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
 obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
 obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
 obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw_ale.o cpsw.o
+ti_cpsw-y := cpsw_ale.o cpsw.o cpts.o
index df55e240374646e2f076d9b4764fab6dc0a07bab..023d439ef10fe3155e3d21c52c0a12f746fe2655 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/if_ether.h>
 #include <linux/etherdevice.h>
 #include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
 #include <linux/phy.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
@@ -35,6 +36,7 @@
 #include <linux/platform_data/cpsw.h>
 
 #include "cpsw_ale.h"
+#include "cpts.h"
 #include "davinci_cpdma.h"
 
 #define CPSW_DEBUG     (NETIF_MSG_HW           | NETIF_MSG_WOL         | \
@@ -70,10 +72,14 @@ do {                                                                \
                dev_notice(priv->dev, format, ## __VA_ARGS__);  \
 } while (0)
 
+#define ALE_ALL_PORTS          0x7
+
 #define CPSW_MAJOR_VERSION(reg)                (reg >> 8 & 0x7)
 #define CPSW_MINOR_VERSION(reg)                (reg & 0xff)
 #define CPSW_RTL_VERSION(reg)          ((reg >> 11) & 0x1f)
 
+#define CPSW_VERSION_1         0x19010a
+#define CPSW_VERSION_2         0x19010c
 #define CPDMA_RXTHRESH         0x0c0
 #define CPDMA_RXFREE           0x0e0
 #define CPDMA_TXHDP            0x00
@@ -129,7 +135,7 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
 module_param(rx_packet_max, int, 0);
 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
 
-struct cpsw_ss_regs {
+struct cpsw_wr_regs {
        u32     id_ver;
        u32     soft_reset;
        u32     control;
@@ -140,26 +146,98 @@ struct cpsw_ss_regs {
        u32     misc_en;
 };
 
-struct cpsw_regs {
+struct cpsw_ss_regs {
        u32     id_ver;
        u32     control;
        u32     soft_reset;
        u32     stat_port_en;
        u32     ptype;
+       u32     soft_idle;
+       u32     thru_rate;
+       u32     gap_thresh;
+       u32     tx_start_wds;
+       u32     flow_control;
+       u32     vlan_ltype;
+       u32     ts_ltype;
+       u32     dlr_ltype;
 };
 
-struct cpsw_slave_regs {
-       u32     max_blks;
-       u32     blk_cnt;
-       u32     flow_thresh;
-       u32     port_vlan;
-       u32     tx_pri_map;
-       u32     ts_ctl;
-       u32     ts_seq_ltype;
-       u32     ts_vlan;
-       u32     sa_lo;
-       u32     sa_hi;
-};
+/* CPSW_PORT_V1 */
+#define CPSW1_MAX_BLKS      0x00 /* Maximum FIFO Blocks */
+#define CPSW1_BLK_CNT       0x04 /* FIFO Block Usage Count (Read Only) */
+#define CPSW1_TX_IN_CTL     0x08 /* Transmit FIFO Control */
+#define CPSW1_PORT_VLAN     0x0c /* VLAN Register */
+#define CPSW1_TX_PRI_MAP    0x10 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW1_TS_CTL        0x14 /* Time Sync Control */
+#define CPSW1_TS_SEQ_LTYPE  0x18 /* Time Sync Sequence ID Offset and Msg Type */
+#define CPSW1_TS_VLAN       0x1c /* Time Sync VLAN1 and VLAN2 */
+
+/* CPSW_PORT_V2 */
+#define CPSW2_CONTROL       0x00 /* Control Register */
+#define CPSW2_MAX_BLKS      0x08 /* Maximum FIFO Blocks */
+#define CPSW2_BLK_CNT       0x0c /* FIFO Block Usage Count (Read Only) */
+#define CPSW2_TX_IN_CTL     0x10 /* Transmit FIFO Control */
+#define CPSW2_PORT_VLAN     0x14 /* VLAN Register */
+#define CPSW2_TX_PRI_MAP    0x18 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW2_TS_SEQ_MTYPE  0x1c /* Time Sync Sequence ID Offset and Msg Type */
+
+/* CPSW_PORT_V1 and V2 */
+#define SA_LO               0x20 /* CPGMAC_SL Source Address Low */
+#define SA_HI               0x24 /* CPGMAC_SL Source Address High */
+#define SEND_PERCENT        0x28 /* Transmit Queue Send Percentages */
+
+/* CPSW_PORT_V2 only */
+#define RX_DSCP_PRI_MAP0    0x30 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP1    0x34 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP2    0x38 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP3    0x3c /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP4    0x40 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP5    0x44 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP6    0x48 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */
+
+/* Bit definitions for the CPSW2_CONTROL register */
+#define PASS_PRI_TAGGED     (1<<24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN      (1<<21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN      (1<<20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN         (1<<16) /* DSCP Priority Enable */
+#define TS_320              (1<<14) /* Time Sync Dest Port 320 enable */
+#define TS_319              (1<<13) /* Time Sync Dest Port 319 enable */
+#define TS_132              (1<<12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131              (1<<11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130              (1<<10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129              (1<<9)  /* Time Sync Dest IP Addr 129 enable */
+#define TS_BIT8             (1<<8)  /* ts_ttl_nonzero? */
+#define TS_ANNEX_D_EN       (1<<4)  /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN        (1<<3)  /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN        (1<<2)  /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN            (1<<1)  /* Time Sync Transmit Enable */
+#define TS_RX_EN            (1<<0)  /* Time Sync Receive Enable */
+
+#define CTRL_TS_BITS \
+       (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
+        TS_ANNEX_D_EN | TS_LTYPE1_EN)
+
+#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_TX_TS_BITS  (CTRL_TS_BITS | TS_TX_EN)
+#define CTRL_RX_TS_BITS  (CTRL_TS_BITS | TS_RX_EN)
+
+/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
+#define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
+#define TS_SEQ_ID_OFFSET_MASK    (0x3f)
+#define TS_MSG_TYPE_EN_SHIFT     (0)     /* Time Sync Message Type Enable */
+#define TS_MSG_TYPE_EN_MASK      (0xffff)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
+
+/* Bit definitions for the CPSW1_TS_CTL register */
+#define CPSW_V1_TS_RX_EN               BIT(0)
+#define CPSW_V1_TS_TX_EN               BIT(4)
+#define CPSW_V1_MSG_TYPE_OFS           16
+
+/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
+#define CPSW_V1_SEQ_ID_OFS_SHIFT       16
 
 struct cpsw_host_regs {
        u32     max_blks;
@@ -185,7 +263,7 @@ struct cpsw_sliver_regs {
 };
 
 struct cpsw_slave {
-       struct cpsw_slave_regs __iomem  *regs;
+       void __iomem                    *regs;
        struct cpsw_sliver_regs __iomem *sliver;
        int                             slave_num;
        u32                             mac_control;
@@ -193,6 +271,16 @@ struct cpsw_slave {
        struct phy_device               *phy;
 };
 
+static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
+{
+       return __raw_readl(slave->regs + offset);
+}
+
+static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
+{
+       __raw_writel(val, slave->regs + offset);
+}
+
 struct cpsw_priv {
        spinlock_t                      lock;
        struct platform_device          *pdev;
@@ -202,10 +290,11 @@ struct cpsw_priv {
        struct napi_struct              napi;
        struct device                   *dev;
        struct cpsw_platform_data       data;
-       struct cpsw_regs __iomem        *regs;
-       struct cpsw_ss_regs __iomem     *ss_regs;
+       struct cpsw_ss_regs __iomem     *regs;
+       struct cpsw_wr_regs __iomem     *wr_regs;
        struct cpsw_host_regs __iomem   *host_port_regs;
        u32                             msg_enable;
+       u32                             version;
        struct net_device_stats         stats;
        int                             rx_packet_max;
        int                             host_port;
@@ -218,6 +307,7 @@ struct cpsw_priv {
        /* snapshot of IRQ numbers */
        u32 irqs_table[4];
        u32 num_irqs;
+       struct cpts cpts;
 };
 
 #define napi_to_priv(napi)     container_of(napi, struct cpsw_priv, napi)
@@ -228,10 +318,34 @@ struct cpsw_priv {
                        (func)((priv)->slaves + idx, ##arg);    \
        } while (0)
 
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+
+       if (ndev->flags & IFF_PROMISC) {
+               /* Enable promiscuous mode */
+               dev_err(priv->dev, "Ignoring Promiscuous mode\n");
+               return;
+       }
+
+       /* Clear all mcast from ALE */
+       cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+
+       if (!netdev_mc_empty(ndev)) {
+               struct netdev_hw_addr *ha;
+
+               /* program multicast address list into ALE register */
+               netdev_for_each_mc_addr(ha, ndev) {
+                       cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
+                               ALE_ALL_PORTS << priv->host_port, 0, 0);
+               }
+       }
+}
+
 static void cpsw_intr_enable(struct cpsw_priv *priv)
 {
-       __raw_writel(0xFF, &priv->ss_regs->tx_en);
-       __raw_writel(0xFF, &priv->ss_regs->rx_en);
+       __raw_writel(0xFF, &priv->wr_regs->tx_en);
+       __raw_writel(0xFF, &priv->wr_regs->rx_en);
 
        cpdma_ctlr_int_ctrl(priv->dma, true);
        return;
@@ -239,8 +353,8 @@ static void cpsw_intr_enable(struct cpsw_priv *priv)
 
 static void cpsw_intr_disable(struct cpsw_priv *priv)
 {
-       __raw_writel(0, &priv->ss_regs->tx_en);
-       __raw_writel(0, &priv->ss_regs->rx_en);
+       __raw_writel(0, &priv->wr_regs->tx_en);
+       __raw_writel(0, &priv->wr_regs->rx_en);
 
        cpdma_ctlr_int_ctrl(priv->dma, false);
        return;
@@ -254,6 +368,7 @@ void cpsw_tx_handler(void *token, int len, int status)
 
        if (unlikely(netif_queue_stopped(ndev)))
                netif_start_queue(ndev);
+       cpts_tx_timestamp(&priv->cpts, skb);
        priv->stats.tx_packets++;
        priv->stats.tx_bytes += len;
        dev_kfree_skb_any(skb);
@@ -274,6 +389,7 @@ void cpsw_rx_handler(void *token, int len, int status)
        }
        if (likely(status >= 0)) {
                skb_put(skb, len);
+               cpts_rx_timestamp(&priv->cpts, skb);
                skb->protocol = eth_type_trans(skb, ndev);
                netif_receive_skb(skb);
                priv->stats.rx_bytes += len;
@@ -359,8 +475,8 @@ static inline void soft_reset(const char *module, void __iomem *reg)
 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
                               struct cpsw_priv *priv)
 {
-       __raw_writel(mac_hi(priv->mac_addr), &slave->regs->sa_hi);
-       __raw_writel(mac_lo(priv->mac_addr), &slave->regs->sa_lo);
+       slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
+       slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
 }
 
 static void _cpsw_adjust_link(struct cpsw_slave *slave,
@@ -446,7 +562,15 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
 
        /* setup priority mapping */
        __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
-       __raw_writel(TX_PRIORITY_MAPPING, &slave->regs->tx_pri_map);
+
+       switch (priv->version) {
+       case CPSW_VERSION_1:
+               slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+               break;
+       case CPSW_VERSION_2:
+               slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+               break;
+       }
 
        /* setup max packet size, and mac address */
        __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
@@ -506,6 +630,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
        pm_runtime_get_sync(&priv->pdev->dev);
 
        reg = __raw_readl(&priv->regs->id_ver);
+       priv->version = reg;
 
        dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
                 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
@@ -592,6 +717,11 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
+       if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && priv->cpts.tx_enable)
+               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+       skb_tx_timestamp(skb);
+
        ret = cpdma_chan_submit(priv->txch, skb, skb->data,
                                skb->len, GFP_KERNEL);
        if (unlikely(ret != 0)) {
@@ -629,6 +759,130 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
                dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
 }
 
+#ifdef CONFIG_TI_CPTS
+
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+{
+       struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+       u32 ts_en, seq_id;
+
+       if (!priv->cpts.tx_enable && !priv->cpts.rx_enable) {
+               slave_write(slave, 0, CPSW1_TS_CTL);
+               return;
+       }
+
+       seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+       ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
+
+       if (priv->cpts.tx_enable)
+               ts_en |= CPSW_V1_TS_TX_EN;
+
+       if (priv->cpts.rx_enable)
+               ts_en |= CPSW_V1_TS_RX_EN;
+
+       slave_write(slave, ts_en, CPSW1_TS_CTL);
+       slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
+}
+
+static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+{
+       struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave];
+       u32 ctrl, mtype;
+
+       ctrl = slave_read(slave, CPSW2_CONTROL);
+       ctrl &= ~CTRL_ALL_TS_MASK;
+
+       if (priv->cpts.tx_enable)
+               ctrl |= CTRL_TX_TS_BITS;
+
+       if (priv->cpts.rx_enable)
+               ctrl |= CTRL_RX_TS_BITS;
+
+       mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
+
+       slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
+       slave_write(slave, ctrl, CPSW2_CONTROL);
+       __raw_writel(ETH_P_1588, &priv->regs->ts_ltype);
+}
+
+static int cpsw_hwtstamp_ioctl(struct cpsw_priv *priv, struct ifreq *ifr)
+{
+       struct cpts *cpts = &priv->cpts;
+       struct hwtstamp_config cfg;
+
+       if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+               return -EFAULT;
+
+       /* reserved for future extensions */
+       if (cfg.flags)
+               return -EINVAL;
+
+       switch (cfg.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               cpts->tx_enable = 0;
+               break;
+       case HWTSTAMP_TX_ON:
+               cpts->tx_enable = 1;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (cfg.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               cpts->rx_enable = 0;
+               break;
+       case HWTSTAMP_FILTER_ALL:
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               return -ERANGE;
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               cpts->rx_enable = 1;
+               cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       switch (priv->version) {
+       case CPSW_VERSION_1:
+               cpsw_hwtstamp_v1(priv);
+               break;
+       case CPSW_VERSION_2:
+               cpsw_hwtstamp_v2(priv);
+               break;
+       default:
+               return -ENOTSUPP;
+       }
+
+       return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+#endif /*CONFIG_TI_CPTS*/
+
+static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+       struct cpsw_priv *priv = netdev_priv(dev);
+
+       if (!netif_running(dev))
+               return -EINVAL;
+
+#ifdef CONFIG_TI_CPTS
+       if (cmd == SIOCSHWTSTAMP)
+               return cpsw_hwtstamp_ioctl(priv, req);
+#endif
+       return -ENOTSUPP;
+}
+
 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
 {
        struct cpsw_priv *priv = netdev_priv(ndev);
@@ -669,10 +923,12 @@ static const struct net_device_ops cpsw_netdev_ops = {
        .ndo_stop               = cpsw_ndo_stop,
        .ndo_start_xmit         = cpsw_ndo_start_xmit,
        .ndo_change_rx_flags    = cpsw_ndo_change_rx_flags,
+       .ndo_do_ioctl           = cpsw_ndo_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = eth_change_mtu,
        .ndo_tx_timeout         = cpsw_ndo_tx_timeout,
        .ndo_get_stats          = cpsw_ndo_get_stats,
+       .ndo_set_rx_mode        = cpsw_ndo_set_rx_mode,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = cpsw_ndo_poll_controller,
 #endif
@@ -699,11 +955,44 @@ static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
        priv->msg_enable = value;
 }
 
+static int cpsw_get_ts_info(struct net_device *ndev,
+                           struct ethtool_ts_info *info)
+{
+#ifdef CONFIG_TI_CPTS
+       struct cpsw_priv *priv = netdev_priv(ndev);
+
+       info->so_timestamping =
+               SOF_TIMESTAMPING_TX_HARDWARE |
+               SOF_TIMESTAMPING_TX_SOFTWARE |
+               SOF_TIMESTAMPING_RX_HARDWARE |
+               SOF_TIMESTAMPING_RX_SOFTWARE |
+               SOF_TIMESTAMPING_SOFTWARE |
+               SOF_TIMESTAMPING_RAW_HARDWARE;
+       info->phc_index = priv->cpts.phc_index;
+       info->tx_types =
+               (1 << HWTSTAMP_TX_OFF) |
+               (1 << HWTSTAMP_TX_ON);
+       info->rx_filters =
+               (1 << HWTSTAMP_FILTER_NONE) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+#else
+       info->so_timestamping =
+               SOF_TIMESTAMPING_TX_SOFTWARE |
+               SOF_TIMESTAMPING_RX_SOFTWARE |
+               SOF_TIMESTAMPING_SOFTWARE;
+       info->phc_index = -1;
+       info->tx_types = 0;
+       info->rx_filters = 0;
+#endif
+       return 0;
+}
+
 static const struct ethtool_ops cpsw_ethtool_ops = {
        .get_drvinfo    = cpsw_get_drvinfo,
        .get_msglevel   = cpsw_get_msglevel,
        .set_msglevel   = cpsw_set_msglevel,
        .get_link       = ethtool_op_get_link,
+       .get_ts_info    = cpsw_get_ts_info,
 };
 
 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
@@ -734,6 +1023,27 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        }
        data->slaves = prop;
 
+       if (of_property_read_u32(node, "cpts_active_slave", &prop)) {
+               pr_err("Missing cpts_active_slave property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->cpts_active_slave = prop;
+
+       if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
+               pr_err("Missing cpts_clock_mult property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->cpts_clock_mult = prop;
+
+       if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
+               pr_err("Missing cpts_clock_shift property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->cpts_clock_shift = prop;
+
        data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
                                   data->slaves, GFP_KERNEL);
        if (!data->slave_data) {
@@ -799,6 +1109,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
        }
        data->hw_stats_reg_ofs = prop;
 
+       if (of_property_read_u32(node, "cpts_reg_ofs", &prop)) {
+               pr_err("Missing cpts_reg_ofs property in the DT.\n");
+               ret = -EINVAL;
+               goto error_ret;
+       }
+       data->cpts_reg_ofs = prop;
+
        if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
                pr_err("Missing bd_ram_ofs property in the DT.\n");
                ret = -EINVAL;
@@ -951,6 +1268,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
        priv->regs = regs;
        priv->host_port = data->host_port_num;
        priv->host_port_regs = regs + data->host_port_reg_ofs;
+       priv->cpts.reg = regs + data->cpts_reg_ofs;
 
        priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        if (!priv->cpsw_ss_res) {
@@ -972,7 +1290,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
                dev_err(priv->dev, "unable to map i/o region\n");
                goto clean_cpsw_ss_iores_ret;
        }
-       priv->ss_regs = regs;
+       priv->wr_regs = regs;
 
        for_each_slave(priv, cpsw_slave_init, priv);
 
@@ -1072,6 +1390,10 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
                goto clean_irq_ret;
        }
 
+       if (cpts_register(&pdev->dev, &priv->cpts,
+                         data->cpts_clock_mult, data->cpts_clock_shift))
+               dev_err(priv->dev, "error registering cpts device\n");
+
        cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
                  priv->cpsw_res->start, ndev->irq);
 
@@ -1111,6 +1433,7 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
        pr_info("removing device");
        platform_set_drvdata(pdev, NULL);
 
+       cpts_unregister(&priv->cpts);
        free_irq(ndev->irq, priv);
        cpsw_ale_destroy(priv->ale);
        cpdma_chan_destroy(priv->txch);
index ca0d48a7e508059fd35b6c7bff8cd24295caadbf..0e9ccc2cf91fefce7bda15f6c9ad4ee580e6aa5d 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/io.h>
 #include <linux/stat.h>
 #include <linux/sysfs.h>
+#include <linux/etherdevice.h>
 
 #include "cpsw_ale.h"
 
@@ -211,10 +212,34 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
        mask &= ~port_mask;
 
        /* free if only remaining port is host port */
-       if (mask == BIT(ale->params.ale_ports))
-               cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
-       else
+       if (mask)
                cpsw_ale_set_port_mask(ale_entry, mask);
+       else
+               cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+}
+
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+{
+       u32 ale_entry[ALE_ENTRY_WORDS];
+       int ret, idx;
+
+       for (idx = 0; idx < ale->params.ale_entries; idx++) {
+               cpsw_ale_read(ale, idx, ale_entry);
+               ret = cpsw_ale_get_entry_type(ale_entry);
+               if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
+                       continue;
+
+               if (cpsw_ale_get_mcast(ale_entry)) {
+                       u8 addr[6];
+
+                       cpsw_ale_get_addr(ale_entry, addr);
+                       if (!is_broadcast_ether_addr(addr))
+                               cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
+               }
+
+               cpsw_ale_write(ale, idx, ale_entry);
+       }
+       return 0;
 }
 
 static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
index a95b37beb02d56e809c15239247085304c6a2cba..2bd09cbce522dc222fb5a72f9691d368a3b57094 100644 (file)
@@ -80,6 +80,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
 
 int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
 int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
 int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags);
 int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port);
 int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
new file mode 100644 (file)
index 0000000..3377667
--- /dev/null
@@ -0,0 +1,427 @@
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#include <linux/err.h>
+#include <linux/if.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_classify.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+
+#include <plat/clock.h>
+
+#include "cpts.h"
+
+#ifdef CONFIG_TI_CPTS
+
+static struct sock_filter ptp_filter[] = {
+       PTP_FILTER
+};
+
+#define cpts_read32(c, r)      __raw_readl(&c->reg->r)
+#define cpts_write32(c, v, r)  __raw_writel(v, &c->reg->r)
+
+static int event_expired(struct cpts_event *event)
+{
+       return time_after(jiffies, event->tmo);
+}
+
+static int event_type(struct cpts_event *event)
+{
+       return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+}
+
+static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
+{
+       u32 r = cpts_read32(cpts, intstat_raw);
+
+       if (r & TS_PEND_RAW) {
+               *high = cpts_read32(cpts, event_high);
+               *low  = cpts_read32(cpts, event_low);
+               cpts_write32(cpts, EVENT_POP, event_pop);
+               return 0;
+       }
+       return -1;
+}
+
+/*
+ * Returns zero if matching event type was found.
+ */
+static int cpts_fifo_read(struct cpts *cpts, int match)
+{
+       int i, type = -1;
+       u32 hi, lo;
+       struct cpts_event *event;
+
+       for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
+               if (cpts_fifo_pop(cpts, &hi, &lo))
+                       break;
+               if (list_empty(&cpts->pool)) {
+                       pr_err("cpts: event pool is empty\n");
+                       return -1;
+               }
+               event = list_first_entry(&cpts->pool, struct cpts_event, list);
+               event->tmo = jiffies + 2;
+               event->high = hi;
+               event->low = lo;
+               type = event_type(event);
+               switch (type) {
+               case CPTS_EV_PUSH:
+               case CPTS_EV_RX:
+               case CPTS_EV_TX:
+                       list_del_init(&event->list);
+                       list_add_tail(&event->list, &cpts->events);
+                       break;
+               case CPTS_EV_ROLL:
+               case CPTS_EV_HALF:
+               case CPTS_EV_HW:
+                       break;
+               default:
+                       pr_err("cpts: unkown event type\n");
+                       break;
+               }
+               if (type == match)
+                       break;
+       }
+       return type == match ? 0 : -1;
+}
+
+static cycle_t cpts_systim_read(const struct cyclecounter *cc)
+{
+       u64 val = 0;
+       struct cpts_event *event;
+       struct list_head *this, *next;
+       struct cpts *cpts = container_of(cc, struct cpts, cc);
+
+       cpts_write32(cpts, TS_PUSH, ts_push);
+       if (cpts_fifo_read(cpts, CPTS_EV_PUSH))
+               pr_err("cpts: unable to obtain a time stamp\n");
+
+       list_for_each_safe(this, next, &cpts->events) {
+               event = list_entry(this, struct cpts_event, list);
+               if (event_type(event) == CPTS_EV_PUSH) {
+                       list_del_init(&event->list);
+                       list_add(&event->list, &cpts->pool);
+                       val = event->low;
+                       break;
+               }
+       }
+
+       return val;
+}
+
+/* PTP clock operations */
+
+static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+       u64 adj;
+       u32 diff, mult;
+       int neg_adj = 0;
+       unsigned long flags;
+       struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+       if (ppb < 0) {
+               neg_adj = 1;
+               ppb = -ppb;
+       }
+       mult = cpts->cc_mult;
+       adj = mult;
+       adj *= ppb;
+       diff = div_u64(adj, 1000000000ULL);
+
+       spin_lock_irqsave(&cpts->lock, flags);
+
+       timecounter_read(&cpts->tc);
+
+       cpts->cc.mult = neg_adj ? mult - diff : mult + diff;
+
+       spin_unlock_irqrestore(&cpts->lock, flags);
+
+       return 0;
+}
+
+static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+       s64 now;
+       unsigned long flags;
+       struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+       spin_lock_irqsave(&cpts->lock, flags);
+       now = timecounter_read(&cpts->tc);
+       now += delta;
+       timecounter_init(&cpts->tc, &cpts->cc, now);
+       spin_unlock_irqrestore(&cpts->lock, flags);
+
+       return 0;
+}
+
+static int cpts_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+       u64 ns;
+       u32 remainder;
+       unsigned long flags;
+       struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+       spin_lock_irqsave(&cpts->lock, flags);
+       ns = timecounter_read(&cpts->tc);
+       spin_unlock_irqrestore(&cpts->lock, flags);
+
+       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
+       ts->tv_nsec = remainder;
+
+       return 0;
+}
+
+static int cpts_ptp_settime(struct ptp_clock_info *ptp,
+                           const struct timespec *ts)
+{
+       u64 ns;
+       unsigned long flags;
+       struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+       ns = ts->tv_sec * 1000000000ULL;
+       ns += ts->tv_nsec;
+
+       spin_lock_irqsave(&cpts->lock, flags);
+       timecounter_init(&cpts->tc, &cpts->cc, ns);
+       spin_unlock_irqrestore(&cpts->lock, flags);
+
+       return 0;
+}
+
+static int cpts_ptp_enable(struct ptp_clock_info *ptp,
+                          struct ptp_clock_request *rq, int on)
+{
+       return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info cpts_info = {
+       .owner          = THIS_MODULE,
+       .name           = "CTPS timer",
+       .max_adj        = 1000000,
+       .n_ext_ts       = 0,
+       .pps            = 0,
+       .adjfreq        = cpts_ptp_adjfreq,
+       .adjtime        = cpts_ptp_adjtime,
+       .gettime        = cpts_ptp_gettime,
+       .settime        = cpts_ptp_settime,
+       .enable         = cpts_ptp_enable,
+};
+
+static void cpts_overflow_check(struct work_struct *work)
+{
+       struct timespec ts;
+       struct cpts *cpts = container_of(work, struct cpts, overflow_work.work);
+
+       cpts_write32(cpts, CPTS_EN, control);
+       cpts_write32(cpts, TS_PEND_EN, int_enable);
+       cpts_ptp_gettime(&cpts->info, &ts);
+       pr_debug("cpts overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
+       schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
+}
+
+#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
+
+static void cpts_clk_init(struct cpts *cpts)
+{
+       cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
+       if (IS_ERR(cpts->refclk)) {
+               pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
+               cpts->refclk = NULL;
+               return;
+       }
+       clk_enable(cpts->refclk);
+       cpts->freq = cpts->refclk->recalc(cpts->refclk);
+}
+
+static void cpts_clk_release(struct cpts *cpts)
+{
+       clk_disable(cpts->refclk);
+       clk_put(cpts->refclk);
+}
+
+static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
+                     u16 ts_seqid, u8 ts_msgtype)
+{
+       u16 *seqid;
+       unsigned int offset;
+       u8 *msgtype, *data = skb->data;
+
+       switch (ptp_class) {
+       case PTP_CLASS_V1_IPV4:
+       case PTP_CLASS_V2_IPV4:
+               offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+               break;
+       case PTP_CLASS_V1_IPV6:
+       case PTP_CLASS_V2_IPV6:
+               offset = OFF_PTP6;
+               break;
+       case PTP_CLASS_V2_L2:
+               offset = ETH_HLEN;
+               break;
+       case PTP_CLASS_V2_VLAN:
+               offset = ETH_HLEN + VLAN_HLEN;
+               break;
+       default:
+               return 0;
+       }
+
+       if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+               return 0;
+
+       if (unlikely(ptp_class & PTP_CLASS_V1))
+               msgtype = data + offset + OFF_PTP_CONTROL;
+       else
+               msgtype = data + offset;
+
+       seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+       return (ts_msgtype == (*msgtype & 0xf) && ts_seqid == ntohs(*seqid));
+}
+
+static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb, int ev_type)
+{
+       u64 ns = 0;
+       struct cpts_event *event;
+       struct list_head *this, *next;
+       unsigned int class = sk_run_filter(skb, ptp_filter);
+       unsigned long flags;
+       u16 seqid;
+       u8 mtype;
+
+       if (class == PTP_CLASS_NONE)
+               return 0;
+
+       spin_lock_irqsave(&cpts->lock, flags);
+       cpts_fifo_read(cpts, CPTS_EV_PUSH);
+       list_for_each_safe(this, next, &cpts->events) {
+               event = list_entry(this, struct cpts_event, list);
+               if (event_expired(event)) {
+                       list_del_init(&event->list);
+                       list_add(&event->list, &cpts->pool);
+                       continue;
+               }
+               mtype = (event->high >> MESSAGE_TYPE_SHIFT) & MESSAGE_TYPE_MASK;
+               seqid = (event->high >> SEQUENCE_ID_SHIFT) & SEQUENCE_ID_MASK;
+               if (ev_type == event_type(event) &&
+                   cpts_match(skb, class, seqid, mtype)) {
+                       ns = timecounter_cyc2time(&cpts->tc, event->low);
+                       list_del_init(&event->list);
+                       list_add(&event->list, &cpts->pool);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&cpts->lock, flags);
+
+       return ns;
+}
+
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+       u64 ns;
+       struct skb_shared_hwtstamps *ssh;
+
+       if (!cpts->rx_enable)
+               return;
+       ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
+       if (!ns)
+               return;
+       ssh = skb_hwtstamps(skb);
+       memset(ssh, 0, sizeof(*ssh));
+       ssh->hwtstamp = ns_to_ktime(ns);
+}
+
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+       u64 ns;
+       struct skb_shared_hwtstamps ssh;
+
+       if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+               return;
+       ns = cpts_find_ts(cpts, skb, CPTS_EV_TX);
+       if (!ns)
+               return;
+       memset(&ssh, 0, sizeof(ssh));
+       ssh.hwtstamp = ns_to_ktime(ns);
+       skb_tstamp_tx(skb, &ssh);
+}
+
+#endif /*CONFIG_TI_CPTS*/
+
+int cpts_register(struct device *dev, struct cpts *cpts,
+                 u32 mult, u32 shift)
+{
+#ifdef CONFIG_TI_CPTS
+       int err, i;
+       unsigned long flags;
+
+       if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
+               pr_err("cpts: bad ptp filter\n");
+               return -EINVAL;
+       }
+       cpts->info = cpts_info;
+       cpts->clock = ptp_clock_register(&cpts->info, dev);
+       if (IS_ERR(cpts->clock)) {
+               err = PTR_ERR(cpts->clock);
+               cpts->clock = NULL;
+               return err;
+       }
+       spin_lock_init(&cpts->lock);
+
+       cpts->cc.read = cpts_systim_read;
+       cpts->cc.mask = CLOCKSOURCE_MASK(32);
+       cpts->cc_mult = mult;
+       cpts->cc.mult = mult;
+       cpts->cc.shift = shift;
+
+       INIT_LIST_HEAD(&cpts->events);
+       INIT_LIST_HEAD(&cpts->pool);
+       for (i = 0; i < CPTS_MAX_EVENTS; i++)
+               list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+       cpts_clk_init(cpts);
+       cpts_write32(cpts, CPTS_EN, control);
+       cpts_write32(cpts, TS_PEND_EN, int_enable);
+
+       spin_lock_irqsave(&cpts->lock, flags);
+       timecounter_init(&cpts->tc, &cpts->cc, ktime_to_ns(ktime_get_real()));
+       spin_unlock_irqrestore(&cpts->lock, flags);
+
+       INIT_DELAYED_WORK(&cpts->overflow_work, cpts_overflow_check);
+       schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
+
+       cpts->phc_index = ptp_clock_index(cpts->clock);
+#endif
+       return 0;
+}
+
+void cpts_unregister(struct cpts *cpts)
+{
+#ifdef CONFIG_TI_CPTS
+       if (cpts->clock) {
+               ptp_clock_unregister(cpts->clock);
+               cancel_delayed_work_sync(&cpts->overflow_work);
+       }
+       if (cpts->refclk)
+               cpts_clk_release(cpts);
+#endif
+}
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
new file mode 100644 (file)
index 0000000..e1bba3a
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+#ifndef _TI_CPTS_H_
+#define _TI_CPTS_H_
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clocksource.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/skbuff.h>
+
+struct cpsw_cpts {
+       u32 idver;                /* Identification and version */
+       u32 control;              /* Time sync control */
+       u32 res1;
+       u32 ts_push;              /* Time stamp event push */
+       u32 ts_load_val;          /* Time stamp load value */
+       u32 ts_load_en;           /* Time stamp load enable */
+       u32 res2[2];
+       u32 intstat_raw;          /* Time sync interrupt status raw */
+       u32 intstat_masked;       /* Time sync interrupt status masked */
+       u32 int_enable;           /* Time sync interrupt enable */
+       u32 res3;
+       u32 event_pop;            /* Event interrupt pop */
+       u32 event_low;            /* 32 Bit Event Time Stamp */
+       u32 event_high;           /* Event Type Fields */
+};
+
+/* Bit definitions for the IDVER register */
+#define TX_IDENT_SHIFT       (16)    /* TX Identification Value */
+#define TX_IDENT_MASK        (0xffff)
+#define RTL_VER_SHIFT        (11)    /* RTL Version Value */
+#define RTL_VER_MASK         (0x1f)
+#define MAJOR_VER_SHIFT      (8)     /* Major Version Value */
+#define MAJOR_VER_MASK       (0x7)
+#define MINOR_VER_SHIFT      (0)     /* Minor Version Value */
+#define MINOR_VER_MASK       (0xff)
+
+/* Bit definitions for the CONTROL register */
+#define HW4_TS_PUSH_EN       (1<<11) /* Hardware push 4 enable */
+#define HW3_TS_PUSH_EN       (1<<10) /* Hardware push 3 enable */
+#define HW2_TS_PUSH_EN       (1<<9)  /* Hardware push 2 enable */
+#define HW1_TS_PUSH_EN       (1<<8)  /* Hardware push 1 enable */
+#define INT_TEST             (1<<1)  /* Interrupt Test */
+#define CPTS_EN              (1<<0)  /* Time Sync Enable */
+
+/*
+ * Definitions for the single bit resisters:
+ * TS_PUSH TS_LOAD_EN  INTSTAT_RAW INTSTAT_MASKED INT_ENABLE EVENT_POP
+ */
+#define TS_PUSH             (1<<0)  /* Time stamp event push */
+#define TS_LOAD_EN          (1<<0)  /* Time Stamp Load */
+#define TS_PEND_RAW         (1<<0)  /* int read (before enable) */
+#define TS_PEND             (1<<0)  /* masked interrupt read (after enable) */
+#define TS_PEND_EN          (1<<0)  /* masked interrupt enable */
+#define EVENT_POP           (1<<0)  /* writing discards one event */
+
+/* Bit definitions for the EVENT_HIGH register */
+#define PORT_NUMBER_SHIFT    (24)    /* Indicates Ethernet port or HW pin */
+#define PORT_NUMBER_MASK     (0x1f)
+#define EVENT_TYPE_SHIFT     (20)    /* Time sync event type */
+#define EVENT_TYPE_MASK      (0xf)
+#define MESSAGE_TYPE_SHIFT   (16)    /* PTP message type */
+#define MESSAGE_TYPE_MASK    (0xf)
+#define SEQUENCE_ID_SHIFT    (0)     /* PTP message sequence ID */
+#define SEQUENCE_ID_MASK     (0xffff)
+
+enum {
+       CPTS_EV_PUSH, /* Time Stamp Push Event */
+       CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+       CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+       CPTS_EV_HW,   /* Hardware Time Stamp Push Event */
+       CPTS_EV_RX,   /* Ethernet Receive Event */
+       CPTS_EV_TX,   /* Ethernet Transmit Event */
+};
+
+/* This covers any input clock up to about 500 MHz. */
+#define CPTS_OVERFLOW_PERIOD (HZ * 8)
+
+#define CPTS_FIFO_DEPTH 16
+#define CPTS_MAX_EVENTS 32
+
+struct cpts_event {
+       struct list_head list;
+       unsigned long tmo;
+       u32 high;
+       u32 low;
+};
+
+struct cpts {
+       struct cpsw_cpts __iomem *reg;
+       int tx_enable;
+       int rx_enable;
+#ifdef CONFIG_TI_CPTS
+       struct ptp_clock_info info;
+       struct ptp_clock *clock;
+       spinlock_t lock; /* protects time registers */
+       u32 cc_mult; /* for the nominal frequency */
+       struct cyclecounter cc;
+       struct timecounter tc;
+       struct delayed_work overflow_work;
+       int phc_index;
+       struct clk *refclk;
+       unsigned long freq;
+       struct list_head events;
+       struct list_head pool;
+       struct cpts_event pool_data[CPTS_MAX_EVENTS];
+#endif
+};
+
+#ifdef CONFIG_TI_CPTS
+extern void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+extern void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+#else
+static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+#endif
+
+extern int cpts_register(struct device *dev, struct cpts *cpts,
+                        u32 mult, u32 shift);
+extern void cpts_unregister(struct cpts *cpts);
+
+#endif
index eb3f5cefeba3c6ddcbd53a44cb63a759d9b803ef..0b2706abe3e3b06dbd158eff440d8c2ccc4fb50b 100644 (file)
@@ -1034,7 +1034,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return err;
 }
 
-struct rtnl_link_stats64*
+static struct rtnl_link_stats64*
 ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
 {
        struct ppp *ppp = netdev_priv(dev);
index 315751965f3546fa7fe5a688f0007ffe5d3c6537..9e287680cd2ecee393c1acad6f79c925d85bdf72 100644 (file)
@@ -109,16 +109,56 @@ struct tap_filter {
        unsigned char   addr[FLT_EXACT_COUNT][ETH_ALEN];
 };
 
+/* 1024 is probably a high enough limit: modern hypervisors seem to support on
+ * the order of 100-200 CPUs so this leaves us some breathing space if we want
+ * to match a queue per guest CPU.
+ */
+#define MAX_TAP_QUEUES 1024
+
+#define TUN_FLOW_EXPIRE (3 * HZ)
+
+/* A tun_file connects an open character device to a tuntap netdevice. It
+ * also contains all socket related strctures (except sock_fprog and tap_filter)
+ * to serve as one transmit queue for tuntap device. The sock_fprog and
+ * tap_filter were kept in tun_struct since they were used for filtering for the
+ * netdevice not for a specific queue (at least I didn't see the reqirement for
+ * this).
+ *
+ * RCU usage:
+ * The tun_file and tun_struct are loosely coupled, the pointer from on to the
+ * other can only be read while rcu_read_lock or rtnl_lock is held.
+ */
 struct tun_file {
-       atomic_t count;
-       struct tun_struct *tun;
+       struct sock sk;
+       struct socket socket;
+       struct socket_wq wq;
+       struct tun_struct __rcu *tun;
        struct net *net;
+       struct fasync_struct *fasync;
+       /* only used for fasnyc */
+       unsigned int flags;
+       u16 queue_index;
+};
+
+struct tun_flow_entry {
+       struct hlist_node hash_link;
+       struct rcu_head rcu;
+       struct tun_struct *tun;
+
+       u32 rxhash;
+       int queue_index;
+       unsigned long updated;
 };
 
-struct tun_sock;
+#define TUN_NUM_FLOW_ENTRIES 1024
 
+/* Since the socket were moved to tun_file, to preserve the behavior of persist
+ * device, socket fileter, sndbuf and vnet header size were restore when the
+ * file were attached to a persist device.
+ */
 struct tun_struct {
-       struct tun_file         *tfile;
+       struct tun_file __rcu   *tfiles[MAX_TAP_QUEUES];
+       unsigned int            numqueues;
        unsigned int            flags;
        kuid_t                  owner;
        kgid_t                  group;
@@ -127,88 +167,351 @@ struct tun_struct {
        netdev_features_t       set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
                          NETIF_F_TSO6|NETIF_F_UFO)
-       struct fasync_struct    *fasync;
-
-       struct tap_filter       txflt;
-       struct socket           socket;
-       struct socket_wq        wq;
 
        int                     vnet_hdr_sz;
-
+       int                     sndbuf;
+       struct tap_filter       txflt;
+       struct sock_fprog       fprog;
+       /* protected by rtnl lock */
+       bool                    filter_attached;
 #ifdef TUN_DEBUG
        int debug;
 #endif
+       spinlock_t lock;
+       struct kmem_cache *flow_cache;
+       struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
+       struct timer_list flow_gc_timer;
+       unsigned long ageing_time;
 };
 
-struct tun_sock {
-       struct sock             sk;
-       struct tun_struct       *tun;
-};
+static inline u32 tun_hashfn(u32 rxhash)
+{
+       return rxhash & 0x3ff;
+}
 
-static inline struct tun_sock *tun_sk(struct sock *sk)
+static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
 {
-       return container_of(sk, struct tun_sock, sk);
+       struct tun_flow_entry *e;
+       struct hlist_node *n;
+
+       hlist_for_each_entry_rcu(e, n, head, hash_link) {
+               if (e->rxhash == rxhash)
+                       return e;
+       }
+       return NULL;
 }
 
-static int tun_attach(struct tun_struct *tun, struct file *file)
+static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
+                                             struct hlist_head *head,
+                                             u32 rxhash, u16 queue_index)
 {
-       struct tun_file *tfile = file->private_data;
-       int err;
+       struct tun_flow_entry *e = kmem_cache_alloc(tun->flow_cache,
+                                                   GFP_ATOMIC);
+       if (e) {
+               tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
+                         rxhash, queue_index);
+               e->updated = jiffies;
+               e->rxhash = rxhash;
+               e->queue_index = queue_index;
+               e->tun = tun;
+               hlist_add_head_rcu(&e->hash_link, head);
+       }
+       return e;
+}
 
-       ASSERT_RTNL();
+static void tun_flow_free(struct rcu_head *head)
+{
+       struct tun_flow_entry *e
+               = container_of(head, struct tun_flow_entry, rcu);
+       kmem_cache_free(e->tun->flow_cache, e);
+}
 
-       netif_tx_lock_bh(tun->dev);
+static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
+{
+       tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
+                 e->rxhash, e->queue_index);
+       hlist_del_rcu(&e->hash_link);
+       call_rcu(&e->rcu, tun_flow_free);
+}
 
-       err = -EINVAL;
-       if (tfile->tun)
-               goto out;
+static void tun_flow_flush(struct tun_struct *tun)
+{
+       int i;
 
-       err = -EBUSY;
-       if (tun->tfile)
-               goto out;
+       spin_lock_bh(&tun->lock);
+       for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+               struct tun_flow_entry *e;
+               struct hlist_node *h, *n;
 
-       err = 0;
-       tfile->tun = tun;
-       tun->tfile = tfile;
-       tun->socket.file = file;
-       netif_carrier_on(tun->dev);
-       dev_hold(tun->dev);
-       sock_hold(tun->socket.sk);
-       atomic_inc(&tfile->count);
+               hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
+                       tun_flow_delete(tun, e);
+       }
+       spin_unlock_bh(&tun->lock);
+}
 
-out:
-       netif_tx_unlock_bh(tun->dev);
-       return err;
+static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
+{
+       int i;
+
+       spin_lock_bh(&tun->lock);
+       for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+               struct tun_flow_entry *e;
+               struct hlist_node *h, *n;
+
+               hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+                       if (e->queue_index == queue_index)
+                               tun_flow_delete(tun, e);
+               }
+       }
+       spin_unlock_bh(&tun->lock);
 }
 
-static void __tun_detach(struct tun_struct *tun)
+static void tun_flow_cleanup(unsigned long data)
 {
-       /* Detach from net device */
-       netif_tx_lock_bh(tun->dev);
-       netif_carrier_off(tun->dev);
-       tun->tfile = NULL;
-       netif_tx_unlock_bh(tun->dev);
+       struct tun_struct *tun = (struct tun_struct *)data;
+       unsigned long delay = tun->ageing_time;
+       unsigned long next_timer = jiffies + delay;
+       unsigned long count = 0;
+       int i;
 
-       /* Drop read queue */
-       skb_queue_purge(&tun->socket.sk->sk_receive_queue);
+       tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
+
+       spin_lock_bh(&tun->lock);
+       for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
+               struct tun_flow_entry *e;
+               struct hlist_node *h, *n;
+
+               hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
+                       unsigned long this_timer;
+                       count++;
+                       this_timer = e->updated + delay;
+                       if (time_before_eq(this_timer, jiffies))
+                               tun_flow_delete(tun, e);
+                       else if (time_before(this_timer, next_timer))
+                               next_timer = this_timer;
+               }
+       }
 
-       /* Drop the extra count on the net device */
-       dev_put(tun->dev);
+       if (count)
+               mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
+       spin_unlock_bh(&tun->lock);
 }
 
-static void tun_detach(struct tun_struct *tun)
+static void tun_flow_update(struct tun_struct *tun, struct sk_buff *skb,
+                           u16 queue_index)
+{
+       struct hlist_head *head;
+       struct tun_flow_entry *e;
+       unsigned long delay = tun->ageing_time;
+       u32 rxhash = skb_get_rxhash(skb);
+
+       if (!rxhash)
+               return;
+       else
+               head = &tun->flows[tun_hashfn(rxhash)];
+
+       rcu_read_lock();
+
+       if (tun->numqueues == 1)
+               goto unlock;
+
+       e = tun_flow_find(head, rxhash);
+       if (likely(e)) {
+               /* TODO: keep queueing to old queue until it's empty? */
+               e->queue_index = queue_index;
+               e->updated = jiffies;
+       } else {
+               spin_lock_bh(&tun->lock);
+               if (!tun_flow_find(head, rxhash))
+                       tun_flow_create(tun, head, rxhash, queue_index);
+
+               if (!timer_pending(&tun->flow_gc_timer))
+                       mod_timer(&tun->flow_gc_timer,
+                                 round_jiffies_up(jiffies + delay));
+               spin_unlock_bh(&tun->lock);
+       }
+
+unlock:
+       rcu_read_unlock();
+}
+
+/* We try to identify a flow through its rxhash first. The reason that
+ * we do not check rxq no. is becuase some cards(e.g 82599), chooses
+ * the rxq based on the txq where the last packet of the flow comes. As
+ * the userspace application move between processors, we may get a
+ * different rxq no. here. If we could not get rxhash, then we would
+ * hope the rxq no. may help here.
+ */
+static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+       struct tun_struct *tun = netdev_priv(dev);
+       struct tun_flow_entry *e;
+       u32 txq = 0;
+       u32 numqueues = 0;
+
+       rcu_read_lock();
+       numqueues = tun->numqueues;
+
+       txq = skb_get_rxhash(skb);
+       if (txq) {
+               e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
+               if (e)
+                       txq = e->queue_index;
+               else
+                       /* use multiply and shift instead of expensive divide */
+                       txq = ((u64)txq * numqueues) >> 32;
+       } else if (likely(skb_rx_queue_recorded(skb))) {
+               txq = skb_get_rx_queue(skb);
+               while (unlikely(txq >= numqueues))
+                       txq -= numqueues;
+       }
+
+       rcu_read_unlock();
+       return txq;
+}
+
+static inline bool tun_not_capable(struct tun_struct *tun)
+{
+       const struct cred *cred = current_cred();
+
+       return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+                 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+               !capable(CAP_NET_ADMIN);
+}
+
+static void tun_set_real_num_queues(struct tun_struct *tun)
+{
+       netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
+       netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
+}
+
+static void __tun_detach(struct tun_file *tfile, bool clean)
+{
+       struct tun_file *ntfile;
+       struct tun_struct *tun;
+       struct net_device *dev;
+
+       tun = rcu_dereference_protected(tfile->tun,
+                                       lockdep_rtnl_is_held());
+       if (tun) {
+               u16 index = tfile->queue_index;
+               BUG_ON(index >= tun->numqueues);
+               dev = tun->dev;
+
+               rcu_assign_pointer(tun->tfiles[index],
+                                  tun->tfiles[tun->numqueues - 1]);
+               rcu_assign_pointer(tfile->tun, NULL);
+               ntfile = rcu_dereference_protected(tun->tfiles[index],
+                                                  lockdep_rtnl_is_held());
+               ntfile->queue_index = index;
+
+               --tun->numqueues;
+               sock_put(&tfile->sk);
+
+               synchronize_net();
+               tun_flow_delete_by_queue(tun, tun->numqueues + 1);
+               /* Drop read queue */
+               skb_queue_purge(&tfile->sk.sk_receive_queue);
+               tun_set_real_num_queues(tun);
+
+               if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST))
+                       if (dev->reg_state == NETREG_REGISTERED)
+                               unregister_netdevice(dev);
+       }
+
+       if (clean) {
+               BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
+                                &tfile->socket.flags));
+               sk_release_kernel(&tfile->sk);
+       }
+}
+
+static void tun_detach(struct tun_file *tfile, bool clean)
 {
        rtnl_lock();
-       __tun_detach(tun);
+       __tun_detach(tfile, clean);
        rtnl_unlock();
 }
 
+static void tun_detach_all(struct net_device *dev)
+{
+       struct tun_struct *tun = netdev_priv(dev);
+       struct tun_file *tfile;
+       int i, n = tun->numqueues;
+
+       for (i = 0; i < n; i++) {
+               tfile = rcu_dereference_protected(tun->tfiles[i],
+                                                 lockdep_rtnl_is_held());
+               BUG_ON(!tfile);
+               wake_up_all(&tfile->wq.wait);
+               rcu_assign_pointer(tfile->tun, NULL);
+               --tun->numqueues;
+       }
+       BUG_ON(tun->numqueues != 0);
+
+       synchronize_net();
+       for (i = 0; i < n; i++) {
+               tfile = rcu_dereference_protected(tun->tfiles[i],
+                                                 lockdep_rtnl_is_held());
+               /* Drop read queue */
+               skb_queue_purge(&tfile->sk.sk_receive_queue);
+               sock_put(&tfile->sk);
+       }
+}
+
+static int tun_attach(struct tun_struct *tun, struct file *file)
+{
+       struct tun_file *tfile = file->private_data;
+       int err;
+
+       err = -EINVAL;
+       if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
+               goto out;
+
+       err = -EBUSY;
+       if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
+               goto out;
+
+       err = -E2BIG;
+       if (tun->numqueues == MAX_TAP_QUEUES)
+               goto out;
+
+       err = 0;
+
+       /* Re-attach the filter to presist device */
+       if (tun->filter_attached == true) {
+               err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+               if (!err)
+                       goto out;
+       }
+       tfile->queue_index = tun->numqueues;
+       rcu_assign_pointer(tfile->tun, tun);
+       rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+       sock_hold(&tfile->sk);
+       tun->numqueues++;
+
+       tun_set_real_num_queues(tun);
+
+       if (tun->numqueues == 1)
+               netif_carrier_on(tun->dev);
+
+       /* device is allowed to go away first, so no need to hold extra
+        * refcnt.
+        */
+
+out:
+       return err;
+}
+
 static struct tun_struct *__tun_get(struct tun_file *tfile)
 {
-       struct tun_struct *tun = NULL;
+       struct tun_struct *tun;
 
-       if (atomic_inc_not_zero(&tfile->count))
-               tun = tfile->tun;
+       rcu_read_lock();
+       tun = rcu_dereference(tfile->tun);
+       if (tun)
+               dev_hold(tun->dev);
+       rcu_read_unlock();
 
        return tun;
 }
@@ -220,10 +523,7 @@ static struct tun_struct *tun_get(struct file *file)
 
 static void tun_put(struct tun_struct *tun)
 {
-       struct tun_file *tfile = tun->tfile;
-
-       if (atomic_dec_and_test(&tfile->count))
-               tun_detach(tfile->tun);
+       dev_put(tun->dev);
 }
 
 /* TAP filtering */
@@ -343,38 +643,20 @@ static const struct ethtool_ops tun_ethtool_ops;
 /* Net device detach from fd. */
 static void tun_net_uninit(struct net_device *dev)
 {
-       struct tun_struct *tun = netdev_priv(dev);
-       struct tun_file *tfile = tun->tfile;
-
-       /* Inform the methods they need to stop using the dev.
-        */
-       if (tfile) {
-               wake_up_all(&tun->wq.wait);
-               if (atomic_dec_and_test(&tfile->count))
-                       __tun_detach(tun);
-       }
-}
-
-static void tun_free_netdev(struct net_device *dev)
-{
-       struct tun_struct *tun = netdev_priv(dev);
-
-       BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags));
-
-       sk_release_kernel(tun->socket.sk);
+       tun_detach_all(dev);
 }
 
 /* Net device open. */
 static int tun_net_open(struct net_device *dev)
 {
-       netif_start_queue(dev);
+       netif_tx_start_all_queues(dev);
        return 0;
 }
 
 /* Net device close. */
 static int tun_net_close(struct net_device *dev)
 {
-       netif_stop_queue(dev);
+       netif_tx_stop_all_queues(dev);
        return 0;
 }
 
@@ -382,28 +664,39 @@ static int tun_net_close(struct net_device *dev)
 static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
+       int txq = skb->queue_mapping;
+       struct tun_file *tfile;
 
-       tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
+       rcu_read_lock();
+       tfile = rcu_dereference(tun->tfiles[txq]);
 
        /* Drop packet if interface is not attached */
-       if (!tun->tfile)
+       if (txq >= tun->numqueues)
                goto drop;
 
+       tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
+
+       BUG_ON(!tfile);
+
        /* Drop if the filter does not like it.
         * This is a noop if the filter is disabled.
         * Filter can be enabled only for the TAP devices. */
        if (!check_filter(&tun->txflt, skb))
                goto drop;
 
-       if (tun->socket.sk->sk_filter &&
-           sk_filter(tun->socket.sk, skb))
+       if (tfile->socket.sk->sk_filter &&
+           sk_filter(tfile->socket.sk, skb))
                goto drop;
 
-       if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) {
+       /* Limit the number of packets queued by divining txq length with the
+        * number of queues.
+        */
+       if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
+                         >= dev->tx_queue_len / tun->numqueues){
                if (!(tun->flags & TUN_ONE_QUEUE)) {
                        /* Normal queueing mode. */
                        /* Packet scheduler handles dropping of further packets. */
-                       netif_stop_queue(dev);
+                       netif_stop_subqueue(dev, txq);
 
                        /* We won't see all dropped packets individually, so overrun
                         * error is more appropriate. */
@@ -422,18 +715,21 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_orphan(skb);
 
        /* Enqueue packet */
-       skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
+       skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
 
        /* Notify and wake up reader process */
-       if (tun->flags & TUN_FASYNC)
-               kill_fasync(&tun->fasync, SIGIO, POLL_IN);
-       wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
+       if (tfile->flags & TUN_FASYNC)
+               kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
+       wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
                                   POLLRDNORM | POLLRDBAND);
+
+       rcu_read_unlock();
        return NETDEV_TX_OK;
 
 drop:
        dev->stats.tx_dropped++;
        kfree_skb(skb);
+       rcu_read_unlock();
        return NETDEV_TX_OK;
 }
 
@@ -489,6 +785,7 @@ static const struct net_device_ops tun_netdev_ops = {
        .ndo_start_xmit         = tun_net_xmit,
        .ndo_change_mtu         = tun_net_change_mtu,
        .ndo_fix_features       = tun_net_fix_features,
+       .ndo_select_queue       = tun_select_queue,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = tun_poll_controller,
 #endif
@@ -504,11 +801,43 @@ static const struct net_device_ops tap_netdev_ops = {
        .ndo_set_rx_mode        = tun_net_mclist,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
+       .ndo_select_queue       = tun_select_queue,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = tun_poll_controller,
 #endif
 };
 
+static int tun_flow_init(struct tun_struct *tun)
+{
+       int i;
+
+       tun->flow_cache = kmem_cache_create("tun_flow_cache",
+                                           sizeof(struct tun_flow_entry), 0, 0,
+                                           NULL);
+       if (!tun->flow_cache)
+               return -ENOMEM;
+
+       for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
+               INIT_HLIST_HEAD(&tun->flows[i]);
+
+       tun->ageing_time = TUN_FLOW_EXPIRE;
+       setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+       mod_timer(&tun->flow_gc_timer,
+                 round_jiffies_up(jiffies + tun->ageing_time));
+
+       return 0;
+}
+
+static void tun_flow_uninit(struct tun_struct *tun)
+{
+       del_timer_sync(&tun->flow_gc_timer);
+       tun_flow_flush(tun);
+
+       /* Wait for completion of call_rcu()'s */
+       rcu_barrier();
+       kmem_cache_destroy(tun->flow_cache);
+}
+
 /* Initialize net device. */
 static void tun_net_init(struct net_device *dev)
 {
@@ -545,7 +874,7 @@ static void tun_net_init(struct net_device *dev)
 /* Character device part */
 
 /* Poll */
-static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
+static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
 {
        struct tun_file *tfile = file->private_data;
        struct tun_struct *tun = __tun_get(tfile);
@@ -555,11 +884,11 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
        if (!tun)
                return POLLERR;
 
-       sk = tun->socket.sk;
+       sk = tfile->socket.sk;
 
        tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
 
-       poll_wait(file, &tun->wq.wait, wait);
+       poll_wait(file, &tfile->wq.wait, wait);
 
        if (!skb_queue_empty(&sk->sk_receive_queue))
                mask |= POLLIN | POLLRDNORM;
@@ -578,11 +907,11 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
 
 /* prepad is the amount to reserve at front.  len is length after that.
  * linear is a hint as to how much to copy (usually headers). */
-static struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
+static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
                                     size_t prepad, size_t len,
                                     size_t linear, int noblock)
 {
-       struct sock *sk = tun->socket.sk;
+       struct sock *sk = tfile->socket.sk;
        struct sk_buff *skb;
        int err;
 
@@ -682,9 +1011,9 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
 }
 
 /* Get packet from user space buffer */
-static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
-                           const struct iovec *iv, size_t total_len,
-                           size_t count, int noblock)
+static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+                           void *msg_control, const struct iovec *iv,
+                           size_t total_len, size_t count, int noblock)
 {
        struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
        struct sk_buff *skb;
@@ -754,7 +1083,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
        } else
                copylen = len;
 
-       skb = tun_alloc_skb(tun, align, copylen, gso.hdr_len, noblock);
+       skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
        if (IS_ERR(skb)) {
                if (PTR_ERR(skb) != -EAGAIN)
                        tun->dev->stats.rx_dropped++;
@@ -851,6 +1180,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
        tun->dev->stats.rx_packets++;
        tun->dev->stats.rx_bytes += len;
 
+       tun_flow_update(tun, skb, tfile->queue_index);
        return total_len;
 }
 
@@ -859,6 +1189,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
 {
        struct file *file = iocb->ki_filp;
        struct tun_struct *tun = tun_get(file);
+       struct tun_file *tfile = file->private_data;
        ssize_t result;
 
        if (!tun)
@@ -866,8 +1197,8 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
 
        tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
 
-       result = tun_get_user(tun, NULL, iv, iov_length(iv, count), count,
-                             file->f_flags & O_NONBLOCK);
+       result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
+                             count, file->f_flags & O_NONBLOCK);
 
        tun_put(tun);
        return result;
@@ -875,6 +1206,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
 
 /* Put packet to the user space buffer */
 static ssize_t tun_put_user(struct tun_struct *tun,
+                           struct tun_file *tfile,
                            struct sk_buff *skb,
                            const struct iovec *iv, int len)
 {
@@ -954,7 +1286,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
        return total;
 }
 
-static ssize_t tun_do_read(struct tun_struct *tun,
+static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
                           struct kiocb *iocb, const struct iovec *iv,
                           ssize_t len, int noblock)
 {
@@ -965,12 +1297,12 @@ static ssize_t tun_do_read(struct tun_struct *tun,
        tun_debug(KERN_INFO, tun, "tun_chr_read\n");
 
        if (unlikely(!noblock))
-               add_wait_queue(&tun->wq.wait, &wait);
+               add_wait_queue(&tfile->wq.wait, &wait);
        while (len) {
                current->state = TASK_INTERRUPTIBLE;
 
                /* Read frames from the queue */
-               if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
+               if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
                        if (noblock) {
                                ret = -EAGAIN;
                                break;
@@ -988,16 +1320,16 @@ static ssize_t tun_do_read(struct tun_struct *tun,
                        schedule();
                        continue;
                }
-               netif_wake_queue(tun->dev);
+               netif_wake_subqueue(tun->dev, tfile->queue_index);
 
-               ret = tun_put_user(tun, skb, iv, len);
+               ret = tun_put_user(tun, tfile, skb, iv, len);
                kfree_skb(skb);
                break;
        }
 
        current->state = TASK_RUNNING;
        if (unlikely(!noblock))
-               remove_wait_queue(&tun->wq.wait, &wait);
+               remove_wait_queue(&tfile->wq.wait, &wait);
 
        return ret;
 }
@@ -1018,13 +1350,22 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
                goto out;
        }
 
-       ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
+       ret = tun_do_read(tun, tfile, iocb, iv, len,
+                         file->f_flags & O_NONBLOCK);
        ret = min_t(ssize_t, ret, len);
 out:
        tun_put(tun);
        return ret;
 }
 
+static void tun_free_netdev(struct net_device *dev)
+{
+       struct tun_struct *tun = netdev_priv(dev);
+
+       tun_flow_uninit(tun);
+       free_netdev(dev);
+}
+
 static void tun_setup(struct net_device *dev)
 {
        struct tun_struct *tun = netdev_priv(dev);
@@ -1053,7 +1394,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
 
 static void tun_sock_write_space(struct sock *sk)
 {
-       struct tun_struct *tun;
+       struct tun_file *tfile;
        wait_queue_head_t *wqueue;
 
        if (!sock_writeable(sk))
@@ -1067,37 +1408,46 @@ static void tun_sock_write_space(struct sock *sk)
                wake_up_interruptible_sync_poll(wqueue, POLLOUT |
                                                POLLWRNORM | POLLWRBAND);
 
-       tun = tun_sk(sk)->tun;
-       kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
-}
-
-static void tun_sock_destruct(struct sock *sk)
-{
-       free_netdev(tun_sk(sk)->tun->dev);
+       tfile = container_of(sk, struct tun_file, sk);
+       kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
 }
 
 static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
                       struct msghdr *m, size_t total_len)
 {
-       struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
-       return tun_get_user(tun, m->msg_control, m->msg_iov, total_len,
-                           m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+       int ret;
+       struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+       struct tun_struct *tun = __tun_get(tfile);
+
+       if (!tun)
+               return -EBADFD;
+       ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
+                          m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
+       tun_put(tun);
+       return ret;
 }
 
+
 static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
                       struct msghdr *m, size_t total_len,
                       int flags)
 {
-       struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+       struct tun_file *tfile = container_of(sock, struct tun_file, socket);
+       struct tun_struct *tun = __tun_get(tfile);
        int ret;
+
+       if (!tun)
+               return -EBADFD;
+
        if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
                return -EINVAL;
-       ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
+       ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
                          flags & MSG_DONTWAIT);
        if (ret > total_len) {
                m->msg_flags |= MSG_TRUNC;
                ret = flags & MSG_TRUNC ? ret : total_len;
        }
+       tun_put(tun);
        return ret;
 }
 
@@ -1118,7 +1468,7 @@ static const struct proto_ops tun_socket_ops = {
 static struct proto tun_proto = {
        .name           = "tun",
        .owner          = THIS_MODULE,
-       .obj_size       = sizeof(struct tun_sock),
+       .obj_size       = sizeof(struct tun_file),
 };
 
 static int tun_flags(struct tun_struct *tun)
@@ -1139,6 +1489,9 @@ static int tun_flags(struct tun_struct *tun)
        if (tun->flags & TUN_VNET_HDR)
                flags |= IFF_VNET_HDR;
 
+       if (tun->flags & TUN_TAP_MQ)
+               flags |= IFF_MULTI_QUEUE;
+
        return flags;
 }
 
@@ -1175,15 +1528,13 @@ static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
 
 static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
 {
-       struct sock *sk;
        struct tun_struct *tun;
+       struct tun_file *tfile = file->private_data;
        struct net_device *dev;
        int err;
 
        dev = __dev_get_by_name(net, ifr->ifr_name);
        if (dev) {
-               const struct cred *cred = current_cred();
-
                if (ifr->ifr_flags & IFF_TUN_EXCL)
                        return -EBUSY;
                if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
@@ -1193,11 +1544,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                else
                        return -EINVAL;
 
-               if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
-                    (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
-                   !capable(CAP_NET_ADMIN))
+               if (tun_not_capable(tun))
                        return -EPERM;
-               err = security_tun_dev_attach(tun->socket.sk);
+               err = security_tun_dev_attach(tfile->socket.sk);
                if (err < 0)
                        return err;
 
@@ -1230,8 +1579,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                if (*ifr->ifr_name)
                        name = ifr->ifr_name;
 
-               dev = alloc_netdev(sizeof(struct tun_struct), name,
-                                  tun_setup);
+               dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
+                                      tun_setup,
+                                      MAX_TAP_QUEUES, MAX_TAP_QUEUES);
                if (!dev)
                        return -ENOMEM;
 
@@ -1243,46 +1593,35 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                tun->flags = flags;
                tun->txflt.count = 0;
                tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
-               set_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags);
-
-               err = -ENOMEM;
-               sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
-               if (!sk)
-                       goto err_free_dev;
 
-               sk_change_net(sk, net);
-               tun->socket.wq = &tun->wq;
-               init_waitqueue_head(&tun->wq.wait);
-               tun->socket.ops = &tun_socket_ops;
-               sock_init_data(&tun->socket, sk);
-               sk->sk_write_space = tun_sock_write_space;
-               sk->sk_sndbuf = INT_MAX;
-               sock_set_flag(sk, SOCK_ZEROCOPY);
+               tun->filter_attached = false;
+               tun->sndbuf = tfile->socket.sk->sk_sndbuf;
 
-               tun_sk(sk)->tun = tun;
+               spin_lock_init(&tun->lock);
 
-               security_tun_dev_post_create(sk);
+               security_tun_dev_post_create(&tfile->sk);
 
                tun_net_init(dev);
 
+               if (tun_flow_init(tun))
+                       goto err_free_dev;
+
                dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
                        TUN_USER_FEATURES;
                dev->features = dev->hw_features;
 
                err = register_netdevice(tun->dev);
                if (err < 0)
-                       goto err_free_sk;
+                       goto err_free_dev;
 
                if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
                    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
                    device_create_file(&tun->dev->dev, &dev_attr_group))
                        pr_err("Failed to create tun sysfs files\n");
 
-               sk->sk_destruct = tun_sock_destruct;
-
                err = tun_attach(tun, file);
                if (err < 0)
-                       goto failed;
+                       goto err_free_dev;
        }
 
        tun_debug(KERN_INFO, tun, "tun_set_iff\n");
@@ -1302,20 +1641,22 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
        else
                tun->flags &= ~TUN_VNET_HDR;
 
+       if (ifr->ifr_flags & IFF_MULTI_QUEUE)
+               tun->flags |= TUN_TAP_MQ;
+       else
+               tun->flags &= ~TUN_TAP_MQ;
+
        /* Make sure persistent devices do not get stuck in
         * xoff state.
         */
        if (netif_running(tun->dev))
-               netif_wake_queue(tun->dev);
+               netif_tx_wake_all_queues(tun->dev);
 
        strcpy(ifr->ifr_name, tun->dev->name);
        return 0;
 
- err_free_sk:
-       tun_free_netdev(dev);
  err_free_dev:
        free_netdev(dev);
- failed:
        return err;
 }
 
@@ -1370,13 +1711,91 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
        return 0;
 }
 
+static void tun_detach_filter(struct tun_struct *tun, int n)
+{
+       int i;
+       struct tun_file *tfile;
+
+       for (i = 0; i < n; i++) {
+               tfile = rcu_dereference_protected(tun->tfiles[i],
+                                                 lockdep_rtnl_is_held());
+               sk_detach_filter(tfile->socket.sk);
+       }
+
+       tun->filter_attached = false;
+}
+
+static int tun_attach_filter(struct tun_struct *tun)
+{
+       int i, ret = 0;
+       struct tun_file *tfile;
+
+       for (i = 0; i < tun->numqueues; i++) {
+               tfile = rcu_dereference_protected(tun->tfiles[i],
+                                                 lockdep_rtnl_is_held());
+               ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+               if (ret) {
+                       tun_detach_filter(tun, i);
+                       return ret;
+               }
+       }
+
+       tun->filter_attached = true;
+       return ret;
+}
+
+static void tun_set_sndbuf(struct tun_struct *tun)
+{
+       struct tun_file *tfile;
+       int i;
+
+       for (i = 0; i < tun->numqueues; i++) {
+               tfile = rcu_dereference_protected(tun->tfiles[i],
+                                               lockdep_rtnl_is_held());
+               tfile->socket.sk->sk_sndbuf = tun->sndbuf;
+       }
+}
+
+static int tun_set_queue(struct file *file, struct ifreq *ifr)
+{
+       struct tun_file *tfile = file->private_data;
+       struct tun_struct *tun;
+       struct net_device *dev;
+       int ret = 0;
+
+       rtnl_lock();
+
+       if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
+               dev = __dev_get_by_name(tfile->net, ifr->ifr_name);
+               if (!dev) {
+                       ret = -EINVAL;
+                       goto unlock;
+               }
+
+               tun = netdev_priv(dev);
+               if (dev->netdev_ops != &tap_netdev_ops &&
+                       dev->netdev_ops != &tun_netdev_ops)
+                       ret = -EINVAL;
+               else if (tun_not_capable(tun))
+                       ret = -EPERM;
+               else
+                       ret = tun_attach(tun, file);
+       } else if (ifr->ifr_flags & IFF_DETACH_QUEUE)
+               __tun_detach(tfile, false);
+       else
+               ret = -EINVAL;
+
+unlock:
+       rtnl_unlock();
+       return ret;
+}
+
 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                            unsigned long arg, int ifreq_len)
 {
        struct tun_file *tfile = file->private_data;
        struct tun_struct *tun;
        void __user* argp = (void __user*)arg;
-       struct sock_fprog fprog;
        struct ifreq ifr;
        kuid_t owner;
        kgid_t group;
@@ -1384,7 +1803,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        int vnet_hdr_sz;
        int ret;
 
-       if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
+       if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
                if (copy_from_user(&ifr, argp, ifreq_len))
                        return -EFAULT;
        } else {
@@ -1395,10 +1814,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                 * This is needed because we never checked for invalid flags on
                 * TUNSETIFF. */
                return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
-                               IFF_VNET_HDR,
+                               IFF_VNET_HDR | IFF_MULTI_QUEUE,
                                (unsigned int __user*)argp);
-       }
+       } else if (cmd == TUNSETQUEUE)
+               return tun_set_queue(file, &ifr);
 
+       ret = 0;
        rtnl_lock();
 
        tun = __tun_get(tfile);
@@ -1419,7 +1840,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        if (!tun)
                goto unlock;
 
-       tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
+       tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
 
        ret = 0;
        switch (cmd) {
@@ -1441,11 +1862,16 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                break;
 
        case TUNSETPERSIST:
-               /* Disable/Enable persist mode */
-               if (arg)
+               /* Disable/Enable persist mode. Keep an extra reference to the
+                * module to prevent the module being unprobed.
+                */
+               if (arg) {
                        tun->flags |= TUN_PERSIST;
-               else
+                       __module_get(THIS_MODULE);
+               } else {
                        tun->flags &= ~TUN_PERSIST;
+                       module_put(THIS_MODULE);
+               }
 
                tun_debug(KERN_INFO, tun, "persist %s\n",
                          arg ? "enabled" : "disabled");
@@ -1459,7 +1885,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                        break;
                }
                tun->owner = owner;
-               tun_debug(KERN_INFO, tun, "owner set to %d\n",
+               tun_debug(KERN_INFO, tun, "owner set to %u\n",
                          from_kuid(&init_user_ns, tun->owner));
                break;
 
@@ -1471,7 +1897,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                        break;
                }
                tun->group = group;
-               tun_debug(KERN_INFO, tun, "group set to %d\n",
+               tun_debug(KERN_INFO, tun, "group set to %u\n",
                          from_kgid(&init_user_ns, tun->group));
                break;
 
@@ -1523,7 +1949,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                break;
 
        case TUNGETSNDBUF:
-               sndbuf = tun->socket.sk->sk_sndbuf;
+               sndbuf = tfile->socket.sk->sk_sndbuf;
                if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
                        ret = -EFAULT;
                break;
@@ -1534,7 +1960,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                        break;
                }
 
-               tun->socket.sk->sk_sndbuf = sndbuf;
+               tun->sndbuf = sndbuf;
+               tun_set_sndbuf(tun);
                break;
 
        case TUNGETVNETHDRSZ:
@@ -1562,10 +1989,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
                        break;
                ret = -EFAULT;
-               if (copy_from_user(&fprog, argp, sizeof(fprog)))
+               if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
                        break;
 
-               ret = sk_attach_filter(&fprog, tun->socket.sk);
+               ret = tun_attach_filter(tun);
                break;
 
        case TUNDETACHFILTER:
@@ -1573,7 +2000,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                ret = -EINVAL;
                if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
                        break;
-               ret = sk_detach_filter(tun->socket.sk);
+               ret = 0;
+               tun_detach_filter(tun, tun->numqueues);
                break;
 
        default:
@@ -1625,27 +2053,21 @@ static long tun_chr_compat_ioctl(struct file *file,
 
 static int tun_chr_fasync(int fd, struct file *file, int on)
 {
-       struct tun_struct *tun = tun_get(file);
+       struct tun_file *tfile = file->private_data;
        int ret;
 
-       if (!tun)
-               return -EBADFD;
-
-       tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
-
-       if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
+       if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
                goto out;
 
        if (on) {
                ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
                if (ret)
                        goto out;
-               tun->flags |= TUN_FASYNC;
+               tfile->flags |= TUN_FASYNC;
        } else
-               tun->flags &= ~TUN_FASYNC;
+               tfile->flags &= ~TUN_FASYNC;
        ret = 0;
 out:
-       tun_put(tun);
        return ret;
 }
 
@@ -1655,44 +2077,39 @@ static int tun_chr_open(struct inode *inode, struct file * file)
 
        DBG1(KERN_INFO, "tunX: tun_chr_open\n");
 
-       tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
+       tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
+                                           &tun_proto);
        if (!tfile)
                return -ENOMEM;
-       atomic_set(&tfile->count, 0);
-       tfile->tun = NULL;
+       rcu_assign_pointer(tfile->tun, NULL);
        tfile->net = get_net(current->nsproxy->net_ns);
+       tfile->flags = 0;
+
+       rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
+       init_waitqueue_head(&tfile->wq.wait);
+
+       tfile->socket.file = file;
+       tfile->socket.ops = &tun_socket_ops;
+
+       sock_init_data(&tfile->socket, &tfile->sk);
+       sk_change_net(&tfile->sk, tfile->net);
+
+       tfile->sk.sk_write_space = tun_sock_write_space;
+       tfile->sk.sk_sndbuf = INT_MAX;
+
        file->private_data = tfile;
+       set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
+
        return 0;
 }
 
 static int tun_chr_close(struct inode *inode, struct file *file)
 {
        struct tun_file *tfile = file->private_data;
-       struct tun_struct *tun;
+       struct net *net = tfile->net;
 
-       tun = __tun_get(tfile);
-       if (tun) {
-               struct net_device *dev = tun->dev;
-
-               tun_debug(KERN_INFO, tun, "tun_chr_close\n");
-
-               __tun_detach(tun);
-
-               /* If desirable, unregister the netdevice. */
-               if (!(tun->flags & TUN_PERSIST)) {
-                       rtnl_lock();
-                       if (dev->reg_state == NETREG_REGISTERED)
-                               unregister_netdevice(dev);
-                       rtnl_unlock();
-               }
-       }
-
-       tun = tfile->tun;
-       if (tun)
-               sock_put(tun->socket.sk);
-
-       put_net(tfile->net);
-       kfree(tfile);
+       tun_detach(tfile, true);
+       put_net(net);
 
        return 0;
 }
@@ -1819,14 +2236,13 @@ static void tun_cleanup(void)
  * holding a reference to the file for as long as the socket is in use. */
 struct socket *tun_get_socket(struct file *file)
 {
-       struct tun_struct *tun;
+       struct tun_file *tfile;
        if (file->f_op != &tun_fops)
                return ERR_PTR(-EINVAL);
-       tun = tun_get(file);
-       if (!tun)
+       tfile = file->private_data;
+       if (!tfile)
                return ERR_PTR(-EBADFD);
-       tun_put(tun);
-       return &tun->socket;
+       return &tfile->socket;
 }
 EXPORT_SYMBOL_GPL(tun_get_socket);
 
index e522ff70444cd0d7e8f1ce34132055e438ded7ce..24f6b27e169fd18ab14002544de4e71fe6897f7d 100644 (file)
@@ -264,6 +264,7 @@ static void veth_setup(struct net_device *dev)
        ether_setup(dev);
 
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
        dev->netdev_ops = &veth_netdev_ops;
        dev->ethtool_ops = &veth_ethtool_ops;
index 258ca596e1bcce7fc0af5229fddd7b0bfef4174a..982d16b5a846f0e1575fca4d101c91f32fa93ba4 100644 (file)
@@ -6,7 +6,6 @@ menu "PPS support"
 
 config PPS
        tristate "PPS support"
-       depends on EXPERIMENTAL
        ---help---
          PPS (Pulse Per Second) is a special pulse provided by some GPS
          antennae. Userland can use it to get a high-precision time
index ffdf712f9a67c7e725b790ff3fed54fc43985901..70c5836ebfc9a04d1da431cb522070acd8b13cee 100644 (file)
@@ -4,13 +4,9 @@
 
 menu "PTP clock support"
 
-comment "Enable Device Drivers -> PPS to see the PTP clock options."
-       depends on PPS=n
-
 config PTP_1588_CLOCK
        tristate "PTP clock support"
-       depends on EXPERIMENTAL
-       depends on PPS
+       select PPS
        help
          The IEEE 1588 standard defines a method to precisely
          synchronize distributed clocks over Ethernet networks. The
@@ -29,8 +25,9 @@ config PTP_1588_CLOCK
 
 config PTP_1588_CLOCK_GIANFAR
        tristate "Freescale eTSEC as PTP clock"
-       depends on PTP_1588_CLOCK
        depends on GIANFAR
+       select PTP_1588_CLOCK
+       default y
        help
          This driver adds support for using the eTSEC as a PTP
          clock. This clock is only useful if your PTP programs are
@@ -42,8 +39,9 @@ config PTP_1588_CLOCK_GIANFAR
 
 config PTP_1588_CLOCK_IXP46X
        tristate "Intel IXP46x as PTP clock"
-       depends on PTP_1588_CLOCK
        depends on IXP4XX_ETH
+       select PTP_1588_CLOCK
+       default y
        help
          This driver adds support for using the IXP46X as a PTP
          clock. This clock is only useful if your PTP programs are
@@ -54,13 +52,13 @@ config PTP_1588_CLOCK_IXP46X
          will be called ptp_ixp46x.
 
 comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks."
-       depends on PTP_1588_CLOCK && (PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n)
+       depends on PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n
 
 config DP83640_PHY
        tristate "Driver for the National Semiconductor DP83640 PHYTER"
-       depends on PTP_1588_CLOCK
        depends on NETWORK_PHY_TIMESTAMPING
        depends on PHYLIB
+       select PTP_1588_CLOCK
        ---help---
          Supports the DP83640 PHYTER with IEEE 1588 features.
 
@@ -74,8 +72,9 @@ config DP83640_PHY
 
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
-       depends on PTP_1588_CLOCK
        depends on PCH_GBE
+       select PTP_1588_CLOCK
+       default y
        help
          This driver adds support for using the PCH EG20T as a PTP
          clock. The hardware supports time stamping of PTP packets
index e7f301da290286153180fc5f1a9e02cdd9bb5f01..4f8ae8057a7e2a6e1e72cc9b607e4f21e8f5c195 100644 (file)
@@ -33,9 +33,13 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
 {
        struct ptp_clock_caps caps;
        struct ptp_clock_request req;
+       struct ptp_sys_offset sysoff;
        struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
        struct ptp_clock_info *ops = ptp->info;
+       struct ptp_clock_time *pct;
+       struct timespec ts;
        int enable, err = 0;
+       unsigned int i;
 
        switch (cmd) {
 
@@ -88,6 +92,34 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                err = ops->enable(ops, &req, enable);
                break;
 
+       case PTP_SYS_OFFSET:
+               if (copy_from_user(&sysoff, (void __user *)arg,
+                                  sizeof(sysoff))) {
+                       err = -EFAULT;
+                       break;
+               }
+               if (sysoff.n_samples > PTP_MAX_SAMPLES) {
+                       err = -EINVAL;
+                       break;
+               }
+               pct = &sysoff.ts[0];
+               for (i = 0; i < sysoff.n_samples; i++) {
+                       getnstimeofday(&ts);
+                       pct->sec = ts.tv_sec;
+                       pct->nsec = ts.tv_nsec;
+                       pct++;
+                       ptp->info->gettime(ptp->info, &ts);
+                       pct->sec = ts.tv_sec;
+                       pct->nsec = ts.tv_nsec;
+                       pct++;
+               }
+               getnstimeofday(&ts);
+               pct->sec = ts.tv_sec;
+               pct->nsec = ts.tv_nsec;
+               if (copy_to_user((void __user *)arg, &sysoff, sizeof(sysoff)))
+                       err = -EFAULT;
+               break;
+
        default:
                err = -ENOTTY;
                break;
index c9f0005c35e22d6b0e1df0a2b8f5f327ee1daa78..c45eabc135e1f00b7825d017b37df1666acb203e 100644 (file)
@@ -45,6 +45,7 @@ extern void sk_unattached_filter_destroy(struct sk_filter *fp);
 extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 extern int sk_detach_filter(struct sock *sk);
 extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
+extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
 
 #ifdef CONFIG_BPF_JIT
 extern void bpf_jit_compile(struct sk_filter *fp);
index c4e23d0294987a64a3fbc6202df4bb3dbe6f8ff5..b5c16c3df4581909ee35ad7d1a5f5c5ff10ef50b 100644 (file)
@@ -33,6 +33,9 @@ struct cpsw_platform_data {
 
        u32     slaves;         /* number of slave cpgmac ports */
        struct cpsw_slave_data  *slave_data;
+       u32     cpts_active_slave; /* time stamping slave */
+       u32     cpts_clock_mult;  /* convert input clock ticks to nanoseconds */
+       u32     cpts_clock_shift; /* convert input clock ticks to nanoseconds */
 
        u32     ale_reg_ofs;    /* address lookup engine reg offset */
        u32     ale_entries;    /* ale table size */
@@ -41,6 +44,7 @@ struct cpsw_platform_data {
        u32     host_port_num; /* The port number for the host port */
 
        u32     hw_stats_reg_ofs;  /* cpsw hardware statistics counters */
+       u32     cpts_reg_ofs;      /* cpts registers */
 
        u32     bd_ram_ofs;   /* embedded buffer descriptor RAM offset*/
        u32     bd_ram_size;  /*buffer descriptor ram size */
diff --git a/include/linux/timecompare.h b/include/linux/timecompare.h
deleted file mode 100644 (file)
index 546e223..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * Utility code which helps transforming between two different time
- * bases, called "source" and "target" time in this code.
- *
- * Source time has to be provided via the timecounter API while target
- * time is accessed via a function callback whose prototype
- * intentionally matches ktime_get() and ktime_get_real(). These
- * interfaces where chosen like this so that the code serves its
- * initial purpose without additional glue code.
- *
- * This purpose is synchronizing a hardware clock in a NIC with system
- * time, in order to implement the Precision Time Protocol (PTP,
- * IEEE1588) with more accurate hardware assisted time stamping.  In
- * that context only synchronization against system time (=
- * ktime_get_real()) is currently needed. But this utility code might
- * become useful in other situations, which is why it was written as
- * general purpose utility code.
- *
- * The source timecounter is assumed to return monotonically
- * increasing time (but this code does its best to compensate if that
- * is not the case) whereas target time may jump.
- *
- * The target time corresponding to a source time is determined by
- * reading target time, reading source time, reading target time
- * again, then assuming that average target time corresponds to source
- * time. In other words, the assumption is that reading the source
- * time is slow and involves equal time for sending the request and
- * receiving the reply, whereas reading target time is assumed to be
- * fast.
- *
- * Copyright (C) 2009 Intel Corporation.
- * Author: Patrick Ohly <patrick.ohly@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- */
-#ifndef _LINUX_TIMECOMPARE_H
-#define _LINUX_TIMECOMPARE_H
-
-#include <linux/clocksource.h>
-#include <linux/ktime.h>
-
-/**
- * struct timecompare - stores state and configuration for the two clocks
- *
- * Initialize to zero, then set source/target/num_samples.
- *
- * Transformation between source time and target time is done with:
- * target_time = source_time + offset +
- *               (source_time - last_update) * skew /
- *               TIMECOMPARE_SKEW_RESOLUTION
- *
- * @source:          used to get source time stamps via timecounter_read()
- * @target:          function returning target time (for example, ktime_get
- *                   for monotonic time, or ktime_get_real for wall clock)
- * @num_samples:     number of times that source time and target time are to
- *                   be compared when determining their offset
- * @offset:          (target time - source time) at the time of the last update
- * @skew:            average (target time - source time) / delta source time *
- *                   TIMECOMPARE_SKEW_RESOLUTION
- * @last_update:     last source time stamp when time offset was measured
- */
-struct timecompare {
-       struct timecounter *source;
-       ktime_t (*target)(void);
-       int num_samples;
-
-       s64 offset;
-       s64 skew;
-       u64 last_update;
-};
-
-/**
- * timecompare_transform - transform source time stamp into target time base
- * @sync:            context for time sync
- * @source_tstamp:   the result of timecounter_read() or
- *                   timecounter_cyc2time()
- */
-extern ktime_t timecompare_transform(struct timecompare *sync,
-                                    u64 source_tstamp);
-
-/**
- * timecompare_offset - measure current (target time - source time) offset
- * @sync:            context for time sync
- * @offset:          average offset during sample period returned here
- * @source_tstamp:   average source time during sample period returned here
- *
- * Returns number of samples used. Might be zero (= no result) in the
- * unlikely case that target time was monotonically decreasing for all
- * samples (= broken).
- */
-extern int timecompare_offset(struct timecompare *sync,
-                             s64 *offset,
-                             u64 *source_tstamp);
-
-extern void __timecompare_update(struct timecompare *sync,
-                                u64 source_tstamp);
-
-/**
- * timecompare_update - update offset and skew by measuring current offset
- * @sync:            context for time sync
- * @source_tstamp:   the result of timecounter_read() or
- *                   timecounter_cyc2time(), pass zero to force update
- *
- * Updates are only done at most once per second.
- */
-static inline void timecompare_update(struct timecompare *sync,
-                                     u64 source_tstamp)
-{
-       if (!source_tstamp ||
-           (s64)(source_tstamp - sync->last_update) >= NSEC_PER_SEC)
-               __timecompare_update(sync, source_tstamp);
-}
-
-#endif /* _LINUX_TIMECOMPARE_H */
index b1bea03274d5dd0ca36d31e90b361d0dc63225ec..2d32d073a6f9e4275d47c27cbd63b169a3267b9a 100644 (file)
@@ -43,6 +43,7 @@
 /* Socket filtering */
 #define SO_ATTACH_FILTER       26
 #define SO_DETACH_FILTER       27
+#define SO_GET_FILTER          SO_ATTACH_FILTER
 
 #define SO_PEERNAME            28
 #define SO_TIMESTAMP           29
index 25a585ce23e68243e1f0a1610f240624aaef6bde..958497ad5bb563f40c6a46cf7e8aac951ef98d36 100644 (file)
@@ -34,6 +34,7 @@
 #define TUN_ONE_QUEUE  0x0080
 #define TUN_PERSIST    0x0100  
 #define TUN_VNET_HDR   0x0200
+#define TUN_TAP_MQ      0x0400
 
 /* Ioctl defines */
 #define TUNSETNOCSUM  _IOW('T', 200, int) 
@@ -53,6 +54,7 @@
 #define TUNDETACHFILTER _IOW('T', 214, struct sock_fprog)
 #define TUNGETVNETHDRSZ _IOR('T', 215, int)
 #define TUNSETVNETHDRSZ _IOW('T', 216, int)
+#define TUNSETQUEUE  _IOW('T', 217, int)
 
 /* TUNSETIFF ifr flags */
 #define IFF_TUN                0x0001
@@ -61,6 +63,9 @@
 #define IFF_ONE_QUEUE  0x2000
 #define IFF_VNET_HDR   0x4000
 #define IFF_TUN_EXCL   0x8000
+#define IFF_MULTI_QUEUE 0x0100
+#define IFF_ATTACH_QUEUE 0x0200
+#define IFF_DETACH_QUEUE 0x0400
 
 /* Features for GSO (TUNSETOFFLOAD). */
 #define TUN_F_CSUM     0x01    /* You can hand me unchecksummed packets. */
index d0513726711f5ac7f596633183843622525b9cc9..75dcbc587fb5db5a64103ca5e65f1d93a4914289 100644 (file)
@@ -12,6 +12,7 @@ enum {
        NETCONFA_UNSPEC,
        NETCONFA_IFINDEX,
        NETCONFA_FORWARDING,
+       NETCONFA_RP_FILTER,
        __NETCONFA_MAX
 };
 #define NETCONFA_MAX   (__NETCONFA_MAX - 1)
index 94e981f810a21d938e863797f9fad25652ef5068..b65c834f83e903c6dd00c062362a93bfd9a51aea 100644 (file)
@@ -67,12 +67,26 @@ struct ptp_perout_request {
        unsigned int rsv[4];          /* Reserved for future use. */
 };
 
+#define PTP_MAX_SAMPLES 25 /* Maximum allowed offset measurement samples. */
+
+struct ptp_sys_offset {
+       unsigned int n_samples; /* Desired number of measurements. */
+       unsigned int rsv[3];    /* Reserved for future use. */
+       /*
+        * Array of interleaved system/phc time stamps. The kernel
+        * will provide 2*n_samples + 1 time stamps, with the last
+        * one as a system time stamp.
+        */
+       struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
+};
+
 #define PTP_CLK_MAGIC '='
 
 #define PTP_CLOCK_GETCAPS  _IOR(PTP_CLK_MAGIC, 1, struct ptp_clock_caps)
 #define PTP_EXTTS_REQUEST  _IOW(PTP_CLK_MAGIC, 2, struct ptp_extts_request)
 #define PTP_PEROUT_REQUEST _IOW(PTP_CLK_MAGIC, 3, struct ptp_perout_request)
 #define PTP_ENABLE_PPS     _IOW(PTP_CLK_MAGIC, 4, int)
+#define PTP_SYS_OFFSET     _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset)
 
 struct ptp_extts_event {
        struct ptp_clock_time t; /* Time event occured. */
index e2fd74b8e8c250cb9de5ec5650eb32f5d4dcd93d..ff7d9d2ab504ec69da98156f3df39a9b5a868478 100644 (file)
@@ -1,4 +1,4 @@
-obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
+obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o
 obj-y += timeconv.o posix-clock.o alarmtimer.o
 
 obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD)                += clockevents.o
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
deleted file mode 100644 (file)
index a9ae369..0000000
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- * Copyright (C) 2009 Intel Corporation.
- * Author: Patrick Ohly <patrick.ohly@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/timecompare.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/math64.h>
-#include <linux/kernel.h>
-
-/*
- * fixed point arithmetic scale factor for skew
- *
- * Usually one would measure skew in ppb (parts per billion, 1e9), but
- * using a factor of 2 simplifies the math.
- */
-#define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
-
-ktime_t timecompare_transform(struct timecompare *sync,
-                             u64 source_tstamp)
-{
-       u64 nsec;
-
-       nsec = source_tstamp + sync->offset;
-       nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
-               TIMECOMPARE_SKEW_RESOLUTION;
-
-       return ns_to_ktime(nsec);
-}
-EXPORT_SYMBOL_GPL(timecompare_transform);
-
-int timecompare_offset(struct timecompare *sync,
-                      s64 *offset,
-                      u64 *source_tstamp)
-{
-       u64 start_source = 0, end_source = 0;
-       struct {
-               s64 offset;
-               s64 duration_target;
-       } buffer[10], sample, *samples;
-       int counter = 0, i;
-       int used;
-       int index;
-       int num_samples = sync->num_samples;
-
-       if (num_samples > ARRAY_SIZE(buffer)) {
-               samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
-               if (!samples) {
-                       samples = buffer;
-                       num_samples = ARRAY_SIZE(buffer);
-               }
-       } else {
-               samples = buffer;
-       }
-
-       /* run until we have enough valid samples, but do not try forever */
-       i = 0;
-       counter = 0;
-       while (1) {
-               u64 ts;
-               ktime_t start, end;
-
-               start = sync->target();
-               ts = timecounter_read(sync->source);
-               end = sync->target();
-
-               if (!i)
-                       start_source = ts;
-
-               /* ignore negative durations */
-               sample.duration_target = ktime_to_ns(ktime_sub(end, start));
-               if (sample.duration_target >= 0) {
-                       /*
-                        * assume symetric delay to and from source:
-                        * average target time corresponds to measured
-                        * source time
-                        */
-                       sample.offset =
-                               (ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
-                               ts;
-
-                       /* simple insertion sort based on duration */
-                       index = counter - 1;
-                       while (index >= 0) {
-                               if (samples[index].duration_target <
-                                   sample.duration_target)
-                                       break;
-                               samples[index + 1] = samples[index];
-                               index--;
-                       }
-                       samples[index + 1] = sample;
-                       counter++;
-               }
-
-               i++;
-               if (counter >= num_samples || i >= 100000) {
-                       end_source = ts;
-                       break;
-               }
-       }
-
-       *source_tstamp = (end_source + start_source) / 2;
-
-       /* remove outliers by only using 75% of the samples */
-       used = counter * 3 / 4;
-       if (!used)
-               used = counter;
-       if (used) {
-               /* calculate average */
-               s64 off = 0;
-               for (index = 0; index < used; index++)
-                       off += samples[index].offset;
-               *offset = div_s64(off, used);
-       }
-
-       if (samples && samples != buffer)
-               kfree(samples);
-
-       return used;
-}
-EXPORT_SYMBOL_GPL(timecompare_offset);
-
-void __timecompare_update(struct timecompare *sync,
-                         u64 source_tstamp)
-{
-       s64 offset;
-       u64 average_time;
-
-       if (!timecompare_offset(sync, &offset, &average_time))
-               return;
-
-       if (!sync->last_update) {
-               sync->last_update = average_time;
-               sync->offset = offset;
-               sync->skew = 0;
-       } else {
-               s64 delta_nsec = average_time - sync->last_update;
-
-               /* avoid division by negative or small deltas */
-               if (delta_nsec >= 10000) {
-                       s64 delta_offset_nsec = offset - sync->offset;
-                       s64 skew; /* delta_offset_nsec *
-                                    TIMECOMPARE_SKEW_RESOLUTION /
-                                    delta_nsec */
-                       u64 divisor;
-
-                       /* div_s64() is limited to 32 bit divisor */
-                       skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
-                       divisor = delta_nsec;
-                       while (unlikely(divisor >= ((s64)1) << 32)) {
-                               /* divide both by 2; beware, right shift
-                                  of negative value has undefined
-                                  behavior and can only be used for
-                                  the positive divisor */
-                               skew = div_s64(skew, 2);
-                               divisor >>= 1;
-                       }
-                       skew = div_s64(skew, divisor);
-
-                       /*
-                        * Calculate new overall skew as 4/16 the
-                        * old value and 12/16 the new one. This is
-                        * a rather arbitrary tradeoff between
-                        * only using the latest measurement (0/16 and
-                        * 16/16) and even more weight on past measurements.
-                        */
-#define TIMECOMPARE_NEW_SKEW_PER_16 12
-                       sync->skew =
-                               div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
-                                       sync->skew +
-                                       TIMECOMPARE_NEW_SKEW_PER_16 * skew,
-                                       16);
-                       sync->last_update = average_time;
-                       sync->offset = offset;
-               }
-       }
-}
-EXPORT_SYMBOL_GPL(__timecompare_update);
index ee070722a3a36f1916b385e8872ea505105d06bb..30ee4bc0f7ccbda12a7fa577be4edbebd1710a15 100644 (file)
@@ -294,7 +294,7 @@ static void vlan_transfer_features(struct net_device *dev,
        else
                vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
 
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
        vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
 #endif
 
index 402442402af710d5acedf500347cca48f89a6be4..6da96d43317511a188dfd8d1518bfb0f11f6151d 100644 (file)
@@ -409,7 +409,7 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
        return err;
 }
 
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
 static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
                                   struct scatterlist *sgl, unsigned int sgc)
 {
@@ -564,7 +564,7 @@ static int vlan_dev_init(struct net_device *dev)
        if (is_zero_ether_addr(dev->broadcast))
                memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
 
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
        dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
 #endif
 
@@ -741,7 +741,7 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_do_ioctl           = vlan_dev_ioctl,
        .ndo_neigh_setup        = vlan_dev_neigh_setup,
        .ndo_get_stats64        = vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#if IS_ENABLED(CONFIG_FCOE)
        .ndo_fcoe_ddp_setup     = vlan_dev_fcoe_ddp_setup,
        .ndo_fcoe_ddp_done      = vlan_dev_fcoe_ddp_done,
        .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
index 5a114d41bf1130fa9424f5b9c2125f4ae5e515b0..c23543cba132bc19a213ed242e47386e8e41ca65 100644 (file)
@@ -760,3 +760,133 @@ int sk_detach_filter(struct sock *sk)
        return ret;
 }
 EXPORT_SYMBOL_GPL(sk_detach_filter);
+
+static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
+{
+       static const u16 decodes[] = {
+               [BPF_S_ALU_ADD_K]       = BPF_ALU|BPF_ADD|BPF_K,
+               [BPF_S_ALU_ADD_X]       = BPF_ALU|BPF_ADD|BPF_X,
+               [BPF_S_ALU_SUB_K]       = BPF_ALU|BPF_SUB|BPF_K,
+               [BPF_S_ALU_SUB_X]       = BPF_ALU|BPF_SUB|BPF_X,
+               [BPF_S_ALU_MUL_K]       = BPF_ALU|BPF_MUL|BPF_K,
+               [BPF_S_ALU_MUL_X]       = BPF_ALU|BPF_MUL|BPF_X,
+               [BPF_S_ALU_DIV_X]       = BPF_ALU|BPF_DIV|BPF_X,
+               [BPF_S_ALU_MOD_K]       = BPF_ALU|BPF_MOD|BPF_K,
+               [BPF_S_ALU_MOD_X]       = BPF_ALU|BPF_MOD|BPF_X,
+               [BPF_S_ALU_AND_K]       = BPF_ALU|BPF_AND|BPF_K,
+               [BPF_S_ALU_AND_X]       = BPF_ALU|BPF_AND|BPF_X,
+               [BPF_S_ALU_OR_K]        = BPF_ALU|BPF_OR|BPF_K,
+               [BPF_S_ALU_OR_X]        = BPF_ALU|BPF_OR|BPF_X,
+               [BPF_S_ALU_XOR_K]       = BPF_ALU|BPF_XOR|BPF_K,
+               [BPF_S_ALU_XOR_X]       = BPF_ALU|BPF_XOR|BPF_X,
+               [BPF_S_ALU_LSH_K]       = BPF_ALU|BPF_LSH|BPF_K,
+               [BPF_S_ALU_LSH_X]       = BPF_ALU|BPF_LSH|BPF_X,
+               [BPF_S_ALU_RSH_K]       = BPF_ALU|BPF_RSH|BPF_K,
+               [BPF_S_ALU_RSH_X]       = BPF_ALU|BPF_RSH|BPF_X,
+               [BPF_S_ALU_NEG]         = BPF_ALU|BPF_NEG,
+               [BPF_S_LD_W_ABS]        = BPF_LD|BPF_W|BPF_ABS,
+               [BPF_S_LD_H_ABS]        = BPF_LD|BPF_H|BPF_ABS,
+               [BPF_S_LD_B_ABS]        = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_PROTOCOL]    = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_PKTTYPE]     = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_IFINDEX]     = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_NLATTR]      = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_MARK]        = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_QUEUE]       = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_HATYPE]      = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_RXHASH]      = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_CPU]         = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_ALU_XOR_X]   = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_VLAN_TAG]    = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
+               [BPF_S_LD_W_LEN]        = BPF_LD|BPF_W|BPF_LEN,
+               [BPF_S_LD_W_IND]        = BPF_LD|BPF_W|BPF_IND,
+               [BPF_S_LD_H_IND]        = BPF_LD|BPF_H|BPF_IND,
+               [BPF_S_LD_B_IND]        = BPF_LD|BPF_B|BPF_IND,
+               [BPF_S_LD_IMM]          = BPF_LD|BPF_IMM,
+               [BPF_S_LDX_W_LEN]       = BPF_LDX|BPF_W|BPF_LEN,
+               [BPF_S_LDX_B_MSH]       = BPF_LDX|BPF_B|BPF_MSH,
+               [BPF_S_LDX_IMM]         = BPF_LDX|BPF_IMM,
+               [BPF_S_MISC_TAX]        = BPF_MISC|BPF_TAX,
+               [BPF_S_MISC_TXA]        = BPF_MISC|BPF_TXA,
+               [BPF_S_RET_K]           = BPF_RET|BPF_K,
+               [BPF_S_RET_A]           = BPF_RET|BPF_A,
+               [BPF_S_ALU_DIV_K]       = BPF_ALU|BPF_DIV|BPF_K,
+               [BPF_S_LD_MEM]          = BPF_LD|BPF_MEM,
+               [BPF_S_LDX_MEM]         = BPF_LDX|BPF_MEM,
+               [BPF_S_ST]              = BPF_ST,
+               [BPF_S_STX]             = BPF_STX,
+               [BPF_S_JMP_JA]          = BPF_JMP|BPF_JA,
+               [BPF_S_JMP_JEQ_K]       = BPF_JMP|BPF_JEQ|BPF_K,
+               [BPF_S_JMP_JEQ_X]       = BPF_JMP|BPF_JEQ|BPF_X,
+               [BPF_S_JMP_JGE_K]       = BPF_JMP|BPF_JGE|BPF_K,
+               [BPF_S_JMP_JGE_X]       = BPF_JMP|BPF_JGE|BPF_X,
+               [BPF_S_JMP_JGT_K]       = BPF_JMP|BPF_JGT|BPF_K,
+               [BPF_S_JMP_JGT_X]       = BPF_JMP|BPF_JGT|BPF_X,
+               [BPF_S_JMP_JSET_K]      = BPF_JMP|BPF_JSET|BPF_K,
+               [BPF_S_JMP_JSET_X]      = BPF_JMP|BPF_JSET|BPF_X,
+       };
+       u16 code;
+
+       code = filt->code;
+
+       to->code = decodes[code];
+       to->jt = filt->jt;
+       to->jf = filt->jf;
+
+       if (code == BPF_S_ALU_DIV_K) {
+               /*
+                * When loaded this rule user gave us X, which was
+                * translated into R = r(X). Now we calculate the
+                * RR = r(R) and report it back. If next time this
+                * value is loaded and RRR = r(RR) is calculated
+                * then the R == RRR will be true.
+                *
+                * One exception. X == 1 translates into R == 0 and
+                * we can't calculate RR out of it with r().
+                */
+
+               if (filt->k == 0)
+                       to->k = 1;
+               else
+                       to->k = reciprocal_value(filt->k);
+
+               BUG_ON(reciprocal_value(to->k) != filt->k);
+       } else
+               to->k = filt->k;
+}
+
+int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
+{
+       struct sk_filter *filter;
+       int i, ret;
+
+       lock_sock(sk);
+       filter = rcu_dereference_protected(sk->sk_filter,
+                       sock_owned_by_user(sk));
+       ret = 0;
+       if (!filter)
+               goto out;
+       ret = filter->len;
+       if (!len)
+               goto out;
+       ret = -EINVAL;
+       if (len < filter->len)
+               goto out;
+
+       ret = -EFAULT;
+       for (i = 0; i < filter->len; i++) {
+               struct sock_filter fb;
+
+               sk_decode_filter(&filter->insns[i], &fb);
+               if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))
+                       goto out;
+       }
+
+       ret = filter->len;
+out:
+       release_sock(sk);
+       return ret;
+}
index 0a023b8daa554867fb8b2a76335d997aa61f5520..06286006a2cc2be32a472a4453df7d088e1e17b6 100644 (file)
@@ -1077,6 +1077,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
        case SO_BINDTODEVICE:
                v.val = sk->sk_bound_dev_if;
                break;
+       case SO_GET_FILTER:
+               len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
+               if (len < 0)
+                       return len;
+
+               goto lenout;
        default:
                return -ENOPROTOOPT;
        }
index f8b1e0494d7539fc864c373a6eefb41108aa2a88..f6db227c1fd9282c63d12848539b4c350945534d 100644 (file)
@@ -1451,6 +1451,8 @@ static int inet_netconf_msgsize_devconf(int type)
        /* type -1 is used for ALL */
        if (type == -1 || type == NETCONFA_FORWARDING)
                size += nla_total_size(4);
+       if (type == -1 || type == NETCONFA_RP_FILTER)
+               size += nla_total_size(4);
 
        return size;
 }
@@ -1479,6 +1481,10 @@ static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
            nla_put_s32(skb, NETCONFA_FORWARDING,
                        IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
                goto nla_put_failure;
+       if ((type == -1 || type == NETCONFA_RP_FILTER) &&
+           nla_put_s32(skb, NETCONFA_RP_FILTER,
+                       IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
+               goto nla_put_failure;
 
        return nlmsg_end(skb, nlh);
 
@@ -1515,6 +1521,7 @@ errout:
 static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
        [NETCONFA_IFINDEX]      = { .len = sizeof(int) },
        [NETCONFA_FORWARDING]   = { .len = sizeof(int) },
+       [NETCONFA_RP_FILTER]    = { .len = sizeof(int) },
 };
 
 static int inet_netconf_get_devconf(struct sk_buff *in_skb,
@@ -1647,6 +1654,23 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
                    i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
                        if ((new_value == 0) && (old_value != 0))
                                rt_cache_flush(net);
+               if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
+                   new_value != old_value) {
+                       int ifindex;
+
+                       if (cnf == net->ipv4.devconf_dflt)
+                               ifindex = NETCONFA_IFINDEX_DEFAULT;
+                       else if (cnf == net->ipv4.devconf_all)
+                               ifindex = NETCONFA_IFINDEX_ALL;
+                       else {
+                               struct in_device *idev =
+                                       container_of(cnf, struct in_device,
+                                                    cnf);
+                               ifindex = idev->dev->ifindex;
+                       }
+                       inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
+                                                   ifindex, cnf);
+               }
        }
 
        return ret;
index 8f0b12a67131a54c5665fdb1e1a5cb7aaea90bf3..387b813f227d576b8532b60ab2f564658770c10d 100644 (file)
@@ -402,7 +402,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
        if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
                ndev->cnf.accept_dad = -1;
 
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
        if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
                pr_info("%s: Disabled Multicast RS\n", dev->name);
                ndev->cnf.rtr_solicits = 0;
@@ -1838,7 +1838,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
           This thing is done here expecting that the whole
           class of non-broadcast devices need not cloning.
         */
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
        if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
                cfg.fc_flags |= RTF_NONEXTHOP;
 #endif
@@ -1898,7 +1898,7 @@ static void addrconf_add_mroute(struct net_device *dev)
        ip6_route_add(&cfg);
 }
 
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
 static void sit_route_add(struct net_device *dev)
 {
        struct fib6_config cfg = {
@@ -2250,7 +2250,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
        if (dev == NULL)
                goto err_exit;
 
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
        if (dev->type == ARPHRD_SIT) {
                const struct net_device_ops *ops = dev->netdev_ops;
                struct ifreq ifr;
@@ -2461,7 +2461,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
        }
 }
 
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
 static void sit_add_v4_addrs(struct inet6_dev *idev)
 {
        struct in6_addr addr;
@@ -2580,7 +2580,7 @@ static void addrconf_dev_config(struct net_device *dev)
                addrconf_add_linklocal(idev, &addr);
 }
 
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
 static void addrconf_sit_config(struct net_device *dev)
 {
        struct inet6_dev *idev;
@@ -2617,7 +2617,7 @@ static void addrconf_sit_config(struct net_device *dev)
 }
 #endif
 
-#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+#if IS_ENABLED(CONFIG_NET_IPGRE)
 static void addrconf_gre_config(struct net_device *dev)
 {
        struct inet6_dev *idev;
@@ -2747,12 +2747,12 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                }
 
                switch (dev->type) {
-#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_SIT)
                case ARPHRD_SIT:
                        addrconf_sit_config(dev);
                        break;
 #endif
-#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
+#if IS_ENABLED(CONFIG_NET_IPGRE)
                case ARPHRD_IPGRE:
                        addrconf_gre_config(dev);
                        break;
@@ -3340,7 +3340,7 @@ void if6_proc_exit(void)
 }
 #endif /* CONFIG_PROC_FS */
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 /* Check if address is a home address configured on any interface. */
 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
 {
index 7e6139508ee7616f10996f33d75ae88e56bc5ca8..ecc35b93314bb1b73c70df219c9cf8f125c77908 100644 (file)
@@ -44,7 +44,7 @@
 #define IPV6HDR_BASELEN 8
 
 struct tmp_ext {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                struct in6_addr saddr;
 #endif
                struct in6_addr daddr;
@@ -152,7 +152,7 @@ bad:
        return false;
 }
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 /**
  *     ipv6_rearrange_destopt - rearrange IPv6 destination options header
  *     @iph: IPv6 header
@@ -320,7 +320,7 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
        memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
 
        if (extlen) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                memcpy(&top_iph->saddr, iph_ext, extlen);
 #else
                memcpy(&top_iph->daddr, iph_ext, extlen);
@@ -385,7 +385,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
        memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
 
        if (extlen) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                memcpy(iph_ext, &top_iph->saddr, extlen);
 #else
                memcpy(iph_ext, &top_iph->daddr, extlen);
@@ -434,7 +434,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
        memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
 
        if (extlen) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                memcpy(&top_iph->saddr, iph_ext, extlen);
 #else
                memcpy(&top_iph->daddr, iph_ext, extlen);
index be2b67d631e5195fcc7df5b48a4b3d172d5eccdc..93cbad2c0aa724b47c1049e851d55755d7859285 100644 (file)
@@ -769,7 +769,7 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
                        rthdr = (struct ipv6_rt_hdr *)CMSG_DATA(cmsg);
 
                        switch (rthdr->type) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                        case IPV6_SRCRT_TYPE_2:
                                if (rthdr->hdrlen != 2 ||
                                    rthdr->segments_left != 1) {
index fa3d9c3280927934bd7b082077674c3c31d4ad4c..f005acc58b2aad4db9119e307b45d4029f2bc159 100644 (file)
@@ -43,7 +43,7 @@
 #include <net/ndisc.h>
 #include <net/ip6_route.h>
 #include <net/addrconf.h>
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 #include <net/xfrm.h>
 #endif
 
@@ -224,7 +224,7 @@ bad:
   Destination options header.
  *****************************/
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
 {
        struct ipv6_destopt_hao *hao;
@@ -288,7 +288,7 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff)
 #endif
 
 static const struct tlvtype_proc tlvprocdestopt_lst[] = {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
        {
                .type   = IPV6_TLV_HAO,
                .func   = ipv6_dest_hao,
@@ -300,7 +300,7 @@ static const struct tlvtype_proc tlvprocdestopt_lst[] = {
 static int ipv6_destopt_rcv(struct sk_buff *skb)
 {
        struct inet6_skb_parm *opt = IP6CB(skb);
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
        __u16 dstbuf;
 #endif
        struct dst_entry *dst = skb_dst(skb);
@@ -315,14 +315,14 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
        }
 
        opt->lastopt = opt->dst1 = skb_network_header_len(skb);
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
        dstbuf = opt->dst1;
 #endif
 
        if (ip6_parse_tlv(tlvprocdestopt_lst, skb)) {
                skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
                opt = IP6CB(skb);
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                opt->nhoff = dstbuf;
 #else
                opt->nhoff = opt->dst1;
@@ -378,7 +378,7 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
 looped_back:
        if (hdr->segments_left == 0) {
                switch (hdr->type) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                case IPV6_SRCRT_TYPE_2:
                        /* Silently discard type 2 header unless it was
                         * processed by own
@@ -404,7 +404,7 @@ looped_back:
        }
 
        switch (hdr->type) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
        case IPV6_SRCRT_TYPE_2:
                if (accept_source_route < 0)
                        goto unknown_rh;
@@ -461,7 +461,7 @@ looped_back:
        addr += i - 1;
 
        switch (hdr->type) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
        case IPV6_SRCRT_TYPE_2:
                if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
                                     (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
index 24d69dbca4d68e2f1d1444796cb3d5c33b0a1fe2..b4a9fd51dae74bd8143b2d32e791e089f1d8562f 100644 (file)
@@ -280,7 +280,7 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
        return 0;
 }
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 static void mip6_addr_swap(struct sk_buff *skb)
 {
        struct ipv6hdr *iph = ipv6_hdr(skb);
index aece3e792f84ad4cfcc88ada01817c70242b396d..e10c77b4fbecf8e1ea5bc96361eced7c19af5ef9 100644 (file)
@@ -538,8 +538,7 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
        to->tc_index = from->tc_index;
 #endif
        nf_copy(to, from);
-#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
-    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
        to->nf_trace = from->nf_trace;
 #endif
        skb_copy_secmark(to, from);
@@ -564,7 +563,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
                        found_rhdr = 1;
                        break;
                case NEXTHDR_DEST:
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                        if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
                                break;
 #endif
index ba6d13d1f1e162254fa4e4421e82e4a204a9785c..a7bee6a9133505b1ab095b86ab0b72ac7f7418e0 100644 (file)
@@ -397,7 +397,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                if (optname == IPV6_RTHDR && opt && opt->srcrt) {
                        struct ipv6_rt_hdr *rthdr = opt->srcrt;
                        switch (rthdr->type) {
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                        case IPV6_SRCRT_TYPE_2:
                                if (rthdr->hdrlen != 2 ||
                                    rthdr->segments_left != 1)
index d7cb04506c3dac8266780c30a74e1a3fa1320ade..10ce76a2cb94b8431583b7a59b730028eb59d6be 100644 (file)
@@ -207,8 +207,7 @@ ip6t_get_target_c(const struct ip6t_entry *e)
        return ip6t_get_target((struct ip6t_entry *)e);
 }
 
-#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
-    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
 /* This cries for unification! */
 static const char *const hooknames[] = {
        [NF_INET_PRE_ROUTING]           = "PREROUTING",
@@ -381,8 +380,7 @@ ip6t_do_table(struct sk_buff *skb,
                t = ip6t_get_target_c(e);
                IP_NF_ASSERT(t->u.kernel.target);
 
-#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
-    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
                /* The packet is traced: log it */
                if (unlikely(skb->nf_trace))
                        trace_packet(skb, hook, in, out,
index 8860d23e61cfff646e3e034da7fe4c985fbb5575..ccb5cbe9354945583c8000e9019e3a8750c05025 100644 (file)
@@ -295,7 +295,7 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
        },
 };
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -346,7 +346,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 __read_mostly = {
        .invert_tuple           = ipv6_invert_tuple,
        .print_tuple            = ipv6_print_tuple,
        .get_l4proto            = ipv6_get_l4proto,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = ipv6_tuple_to_nlattr,
        .nlattr_tuple_size      = ipv6_nlattr_tuple_size,
        .nlattr_to_tuple        = ipv6_nlattr_to_tuple,
index 2d54b2061d68ef3e947168b82a5b041f84ffea50..24df3dde0076bc58d6081f306832c73d568d72a9 100644 (file)
@@ -232,7 +232,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
        return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -375,7 +375,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
        .get_timeouts           = icmpv6_get_timeouts,
        .new                    = icmpv6_new,
        .error                  = icmpv6_error,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = icmpv6_tuple_to_nlattr,
        .nlattr_tuple_size      = icmpv6_nlattr_tuple_size,
        .nlattr_to_tuple        = icmpv6_nlattr_to_tuple,
index cdd6d045e42e7ff2098adebe149640811ad109e0..aacd121fe8c54365607f49e40679d7fd08dcd8e8 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <linux/netfilter_ipv6.h>
 #include <linux/netfilter_bridge.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_helper.h>
 #include <net/netfilter/nf_conntrack_l4proto.h>
@@ -35,7 +35,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
 {
        u16 zone = NF_CT_DEFAULT_ZONE;
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (skb->nfct)
                zone = nf_ct_zone((struct nf_conn *)skb->nfct);
 #endif
@@ -60,7 +60,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
 {
        struct sk_buff *reasm;
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        /* Previously seen (loopback)?  */
        if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
                return NF_ACCEPT;
index 5d6da784305bb26bee73f21f4cc6f2b2f8d4ea17..61aaf70f376e9eafee55cae3cb3b909916d0554e 100644 (file)
@@ -84,7 +84,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_icmpv6 = {
        .manip_pkt              = icmpv6_manip_pkt,
        .in_range               = icmpv6_in_range,
        .unique_tuple           = icmpv6_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index d8e95c77db99e5bac5b7cab803c82a731e14a697..6cd29b1e8b926e26a7bc0df5b5c5263fc2e23d84 100644 (file)
@@ -50,7 +50,7 @@
 #include <net/udp.h>
 #include <net/inet_common.h>
 #include <net/tcp_states.h>
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 #include <net/mip6.h>
 #endif
 #include <linux/mroute6.h>
@@ -123,7 +123,7 @@ static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
        return 1;
 }
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
 
 static mh_filter_t __rcu *mh_filter __read_mostly;
@@ -184,7 +184,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
                        filtered = icmpv6_filter(sk, skb);
                        break;
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                case IPPROTO_MH:
                {
                        /* XXX: To validate MH only once for each packet,
index f8c4c08ffb609d2840adb721e395fcbe99a636d7..f3ed8ca59b942497ba0193268fed95801c364d7a 100644 (file)
@@ -20,7 +20,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
 #include <net/mip6.h>
 #endif
 
@@ -182,7 +182,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
                        fl6->flowi6_proto = nexthdr;
                        return;
 
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
                case IPPROTO_MH:
                        if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
                                struct ip6_mh *mh;
index 3f2f7c4ab7210d5948c22a6e417bc0046b1631b0..d8c70b8efc24231358ab50a32f354b69064f7860 100644 (file)
@@ -101,7 +101,7 @@ static int __xfrm6_state_sort_cmp(void *p)
                        return 1;
                else
                        return 3;
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
        case XFRM_MODE_ROUTEOPTIMIZATION:
        case XFRM_MODE_IN_TRIGGER:
                return 2;
@@ -134,7 +134,7 @@ static int __xfrm6_tmpl_sort_cmp(void *p)
        switch (v->mode) {
        case XFRM_MODE_TRANSPORT:
                return 1;
-#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
        case XFRM_MODE_ROUTEOPTIMIZATION:
        case XFRM_MODE_IN_TRIGGER:
                return 2;