Merge branch 'vhost-net-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mst...
authorDavid S. Miller <davem@davemloft.net>
Wed, 6 Oct 2010 20:07:22 +0000 (13:07 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 6 Oct 2010 20:07:22 +0000 (13:07 -0700)
50 files changed:
Documentation/networking/bonding.txt
drivers/net/bna/bfa_ioc.c
drivers/net/bna/bfa_ioc.h
drivers/net/bna/bfa_ioc_ct.c
drivers/net/bna/bfa_sm.h
drivers/net/bna/bna.h
drivers/net/bna/bna_ctrl.c
drivers/net/bna/bna_hw.h
drivers/net/bna/bna_txrx.c
drivers/net/bna/bnad.c
drivers/net/bna/bnad.h
drivers/net/bna/cna_fwimg.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/ehea/ehea.h
drivers/net/ehea/ehea_main.c
drivers/net/ixgbevf/mbx.c
drivers/net/ixgbevf/mbx.h
drivers/net/ixgbevf/vf.c
drivers/net/loopback.c
include/linux/if_bonding.h
include/linux/netdevice.h
include/net/fib_rules.h
include/net/ip_fib.h
include/net/neighbour.h
net/8021q/vlan.h
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/atm/clip.c
net/core/dev.c
net/core/fib_rules.c
net/core/neighbour.c
net/decnet/dn_neigh.c
net/ipv4/arp.c
net/ipv4/fib_frontend.c
net/ipv4/fib_hash.c
net/ipv4/fib_lookup.h
net/ipv4/fib_rules.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/igmp.c
net/ipv4/ip_gre.c
net/ipv4/ipip.c
net/ipv4/route.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/ndisc.c
net/ipv6/sit.c
net/unix/af_unix.c

index d2b62b71b61753f0ffb272d7394d2532b160090e..5dc638791d975116bf1a1e590fdfc44a6ae5c33c 100644 (file)
@@ -765,6 +765,14 @@ xmit_hash_policy
        does not exist, and the layer2 policy is the only policy.  The
        layer2+3 value was added for bonding version 3.2.2.
 
+resend_igmp
+
+       Specifies the number of IGMP membership reports to be issued after
+       a failover event. One membership report is issued immediately after
+       the failover, subsequent packets are sent in each 200ms interval.
+
+       The valid range is 0 - 255; the default value is 1. This option
+       was added for bonding version 3.7.0.
 
 3. Configuring Bonding Devices
 ==============================
index 73493de98de5dcee80b5df4b9104d3da06bbf689..e94e5aa975150ba742909f70ef39129f1c48799a 100644 (file)
@@ -65,7 +65,7 @@
                        (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
                        readl((__ioc)->ioc_regs.hfn_mbox_cmd))
 
-bool bfa_nw_auto_recover = true;
+static bool bfa_nw_auto_recover = true;
 
 /*
  * forward declarations
@@ -1276,12 +1276,6 @@ bfa_nw_ioc_auto_recover(bool auto_recover)
        bfa_nw_auto_recover = auto_recover;
 }
 
-bool
-bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
-{
-       return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
-}
-
 static void
 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
 {
index 7f0719e17efc4526133fd5034e3a3e83c4e062ad..a73d84ec808c76391c3d37067079af54a68bbaa4 100644 (file)
@@ -271,7 +271,6 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
 void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
 
 void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
-bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
 
 void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
 void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
index 462857cbab9b23fd61138276cb77730972597d5c..121cfd6d48b1eb7fe8a5f15223e4f4b3d40cea76 100644 (file)
@@ -34,7 +34,7 @@ static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc);
 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
 
-struct bfa_ioc_hwif nw_hwif_ct;
+static struct bfa_ioc_hwif nw_hwif_ct;
 
 /**
  * Called from bfa_ioc_attach() to map asic specific calls.
index 1d3d975d6f681f880c34d55faad191917e9cda63..46462c49b6f9b1105ba21a6e2f1ec1e7438f318d 100644 (file)
@@ -77,7 +77,7 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
        ((_fsm)->fsm == (bfa_fsm_t)(_state))
 
 static inline int
-bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm)
+bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
 {
        int     i = 0;
 
index 6a2b3291c190f23ce505a920a6f6a817bbe4bc78..df6676bbc84ed91fbdde77dd2bc3f25cc18eefb0 100644 (file)
@@ -19,8 +19,7 @@
 #include "bfi_ll.h"
 #include "bna_types.h"
 
-extern u32 bna_dim_vector[][BNA_BIAS_T_MAX];
-extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
+extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
 
 /**
  *
@@ -344,9 +343,6 @@ do {                                                                        \
  * BNA
  */
 
-/* Internal APIs */
-void bna_adv_res_req(struct bna_res_info *res_info);
-
 /* APIs for BNAD */
 void bna_res_req(struct bna_res_info *res_info);
 void bna_init(struct bna *bna, struct bnad *bnad,
@@ -354,7 +350,6 @@ void bna_init(struct bna *bna, struct bnad *bnad,
                        struct bna_res_info *res_info);
 void bna_uninit(struct bna *bna);
 void bna_stats_get(struct bna *bna);
-void bna_stats_clr(struct bna *bna);
 void bna_get_perm_mac(struct bna *bna, u8 *mac);
 
 /* APIs for Rx */
@@ -376,18 +371,6 @@ void bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod,
  * DEVICE
  */
 
-/* Interanl APIs */
-void bna_adv_device_init(struct bna_device *device, struct bna *bna,
-                       struct bna_res_info *res_info);
-
-/* APIs for BNA */
-void bna_device_init(struct bna_device *device, struct bna *bna,
-                    struct bna_res_info *res_info);
-void bna_device_uninit(struct bna_device *device);
-void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
-int bna_device_status_get(struct bna_device *device);
-int bna_device_state_get(struct bna_device *device);
-
 /* APIs for BNAD */
 void bna_device_enable(struct bna_device *device);
 void bna_device_disable(struct bna_device *device,
@@ -397,12 +380,6 @@ void bna_device_disable(struct bna_device *device,
  * MBOX
  */
 
-/* APIs for DEVICE */
-void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna);
-void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod);
-void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod);
-void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod);
-
 /* APIs for PORT, TX, RX */
 void bna_mbox_handler(struct bna *bna, u32 intr_status);
 void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
@@ -411,17 +388,6 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
  * PORT
  */
 
-/* APIs for BNA */
-void bna_port_init(struct bna_port *port, struct bna *bna);
-void bna_port_uninit(struct bna_port *port);
-int bna_port_state_get(struct bna_port *port);
-int bna_llport_state_get(struct bna_llport *llport);
-
-/* APIs for DEVICE */
-void bna_port_start(struct bna_port *port);
-void bna_port_stop(struct bna_port *port);
-void bna_port_fail(struct bna_port *port);
-
 /* API for RX */
 int bna_port_mtu_get(struct bna_port *port);
 void bna_llport_admin_up(struct bna_llport *llport);
@@ -437,12 +403,6 @@ void bna_port_pause_config(struct bna_port *port,
 void bna_port_mtu_set(struct bna_port *port, int mtu,
                      void (*cbfn)(struct bnad *, enum bna_cb_status));
 void bna_port_mac_get(struct bna_port *port, mac_t *mac);
-void bna_port_type_set(struct bna_port *port, enum bna_port_type type);
-void bna_port_linkcbfn_set(struct bna_port *port,
-                          void (*linkcbfn)(struct bnad *,
-                                           enum bna_link_status));
-void bna_port_admin_up(struct bna_port *port);
-void bna_port_admin_down(struct bna_port *port);
 
 /* Callbacks for TX, RX */
 void bna_port_cb_tx_stopped(struct bna_port *port,
@@ -450,11 +410,6 @@ void bna_port_cb_tx_stopped(struct bna_port *port,
 void bna_port_cb_rx_stopped(struct bna_port *port,
                            enum bna_cb_status status);
 
-/* Callbacks for MBOX */
-void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
-                        int status);
-void bna_port_cb_link_down(struct bna_port *port, int status);
-
 /**
  * IB
  */
@@ -464,25 +419,10 @@ void bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
                     struct bna_res_info *res_info);
 void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod);
 
-/* APIs for TX, RX */
-struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod,
-                           enum bna_intr_type intr_type, int vector);
-void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib);
-int bna_ib_reserve_idx(struct bna_ib *ib);
-void bna_ib_release_idx(struct bna_ib *ib, int idx);
-int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config);
-void bna_ib_start(struct bna_ib *ib);
-void bna_ib_stop(struct bna_ib *ib);
-void bna_ib_fail(struct bna_ib *ib);
-void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo);
-
 /**
  * TX MODULE AND TX
  */
 
-/* Internal APIs */
-void bna_tx_prio_changed(struct bna_tx *tx, int prio);
-
 /* APIs for BNA */
 void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
                     struct bna_res_info *res_info);
@@ -508,10 +448,6 @@ void bna_tx_enable(struct bna_tx *tx);
 void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
                    void (*cbfn)(void *, struct bna_tx *,
                                 enum bna_cb_status));
-enum bna_cb_status
-bna_tx_prio_set(struct bna_tx *tx, int prio,
-               void (*cbfn)(struct bnad *, struct bna_tx *,
-                            enum bna_cb_status));
 void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
 
 /**
@@ -564,35 +500,20 @@ void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
                    void (*cbfn)(void *, struct bna_rx *,
                                 enum bna_cb_status));
 void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo);
-void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]);
+void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]);
 void bna_rx_dim_update(struct bna_ccb *ccb);
 enum bna_cb_status
 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
                 void (*cbfn)(struct bnad *, struct bna_rx *,
                              enum bna_cb_status));
 enum bna_cb_status
-bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac,
-                void (*cbfn)(struct bnad *, struct bna_rx *,
-                             enum bna_cb_status));
-enum bna_cb_status
-bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac,
-                void (*cbfn)(struct bnad *, struct bna_rx *,
-                             enum bna_cb_status));
-enum bna_cb_status
 bna_rx_mcast_add(struct bna_rx *rx, u8 *mcmac,
                 void (*cbfn)(struct bnad *, struct bna_rx *,
                              enum bna_cb_status));
 enum bna_cb_status
-bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac,
-                void (*cbfn)(struct bnad *, struct bna_rx *,
-                             enum bna_cb_status));
-enum bna_cb_status
 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac,
                     void (*cbfn)(struct bnad *, struct bna_rx *,
                                  enum bna_cb_status));
-void bna_rx_mcast_delall(struct bna_rx *rx,
-                        void (*cbfn)(struct bnad *, struct bna_rx *,
-                                     enum bna_cb_status));
 enum bna_cb_status
 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
                enum bna_rxmode bitmask,
@@ -601,36 +522,12 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
 void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
 void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
 void bna_rx_vlanfilter_enable(struct bna_rx *rx);
-void bna_rx_vlanfilter_disable(struct bna_rx *rx);
-void bna_rx_rss_enable(struct bna_rx *rx);
-void bna_rx_rss_disable(struct bna_rx *rx);
-void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config);
-void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors,
-                       int nvectors);
 void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config,
                       void (*cbfn)(struct bnad *, struct bna_rx *,
                                    enum bna_cb_status));
 void bna_rx_hds_disable(struct bna_rx *rx,
                        void (*cbfn)(struct bnad *, struct bna_rx *,
                                     enum bna_cb_status));
-void bna_rx_receive_pause(struct bna_rx *rx,
-                         void (*cbfn)(struct bnad *, struct bna_rx *,
-                                      enum bna_cb_status));
-void bna_rx_receive_resume(struct bna_rx *rx,
-                          void (*cbfn)(struct bnad *, struct bna_rx *,
-                                       enum bna_cb_status));
-
-/* RxF APIs for RX */
-void bna_rxf_start(struct bna_rxf *rxf);
-void bna_rxf_stop(struct bna_rxf *rxf);
-void bna_rxf_fail(struct bna_rxf *rxf);
-void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx,
-                 struct bna_rx_config *q_config);
-void bna_rxf_uninit(struct bna_rxf *rxf);
-
-/* Callback from RXF to RX */
-void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status);
-void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
 
 /**
  * BNAD
@@ -639,7 +536,6 @@ void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status);
 /* Callbacks for BNA */
 void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
                       struct bna_stats *stats);
-void bnad_cb_stats_clr(struct bnad *bnad);
 
 /* Callbacks for DEVICE */
 void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
index ddd922f210c7ae2cb10582f5ef39013da79139ce..07b26598546e36c9bc46c0fe100ced62c7fb47c0 100644 (file)
 #include "bfa_sm.h"
 #include "bfa_wc.h"
 
+static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
+
+static void
+bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
+                       int status)
+{
+       int i;
+       u8 prio_map;
+
+       port->llport.link_status = BNA_LINK_UP;
+       if (aen->cee_linkup)
+               port->llport.link_status = BNA_CEE_UP;
+
+       /* Compute the priority */
+       prio_map = aen->prio_map;
+       if (prio_map) {
+               for (i = 0; i < 8; i++) {
+                       if ((prio_map >> i) & 0x1)
+                               break;
+               }
+               port->priority = i;
+       } else
+               port->priority = 0;
+
+       /* Dispatch events */
+       bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
+       bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
+       port->link_cbfn(port->bna->bnad, port->llport.link_status);
+}
+
+static void
+bna_port_cb_link_down(struct bna_port *port, int status)
+{
+       port->llport.link_status = BNA_LINK_DOWN;
+
+       /* Dispatch events */
+       bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
+       port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
+}
+
 /**
  * MBOX
  */
@@ -96,7 +136,7 @@ bna_ll_isr(void *llarg, struct bfi_mbmsg *msg)
                bna_mbox_aen_callback(bna, msg);
 }
 
-void
+static void
 bna_err_handler(struct bna *bna, u32 intr_status)
 {
        u32 init_halt;
@@ -140,7 +180,7 @@ bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe)
        }
 }
 
-void
+static void
 bna_mbox_flush_q(struct bna *bna, struct list_head *q)
 {
        struct bna_mbox_qe *mb_qe = NULL;
@@ -166,18 +206,18 @@ bna_mbox_flush_q(struct bna *bna, struct list_head *q)
        bna->mbox_mod.state = BNA_MBOX_FREE;
 }
 
-void
+static void
 bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod)
 {
 }
 
-void
+static void
 bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod)
 {
        bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q);
 }
 
-void
+static void
 bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
 {
        bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna);
@@ -187,7 +227,7 @@ bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna)
        mbox_mod->bna = bna;
 }
 
-void
+static void
 bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod)
 {
        mbox_mod->bna = NULL;
@@ -538,7 +578,7 @@ bna_fw_cb_llport_down(void *arg, int status)
        bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN);
 }
 
-void
+static void
 bna_port_cb_llport_stopped(struct bna_port *port,
                                enum bna_cb_status status)
 {
@@ -591,7 +631,7 @@ bna_llport_fail(struct bna_llport *llport)
        bfa_fsm_send_event(llport, LLPORT_E_FAIL);
 }
 
-int
+static int
 bna_llport_state_get(struct bna_llport *llport)
 {
        return bfa_sm_to_state(llport_sm_table, llport->fsm);
@@ -1109,7 +1149,7 @@ bna_port_cb_chld_stopped(void *arg)
        bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED);
 }
 
-void
+static void
 bna_port_init(struct bna_port *port, struct bna *bna)
 {
        port->bna = bna;
@@ -1137,7 +1177,7 @@ bna_port_init(struct bna_port *port, struct bna *bna)
        bna_llport_init(&port->llport, bna);
 }
 
-void
+static void
 bna_port_uninit(struct bna_port *port)
 {
        bna_llport_uninit(&port->llport);
@@ -1147,13 +1187,13 @@ bna_port_uninit(struct bna_port *port)
        port->bna = NULL;
 }
 
-int
+static int
 bna_port_state_get(struct bna_port *port)
 {
        return bfa_sm_to_state(port_sm_table, port->fsm);
 }
 
-void
+static void
 bna_port_start(struct bna_port *port)
 {
        port->flags |= BNA_PORT_F_DEVICE_READY;
@@ -1161,7 +1201,7 @@ bna_port_start(struct bna_port *port)
                bfa_fsm_send_event(port, PORT_E_START);
 }
 
-void
+static void
 bna_port_stop(struct bna_port *port)
 {
        port->stop_cbfn = bna_device_cb_port_stopped;
@@ -1171,7 +1211,7 @@ bna_port_stop(struct bna_port *port)
        bfa_fsm_send_event(port, PORT_E_STOP);
 }
 
-void
+static void
 bna_port_fail(struct bna_port *port)
 {
        port->flags &= ~BNA_PORT_F_DEVICE_READY;
@@ -1190,44 +1230,6 @@ bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status)
        bfa_wc_down(&port->chld_stop_wc);
 }
 
-void
-bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen,
-                       int status)
-{
-       int i;
-       u8 prio_map;
-
-       port->llport.link_status = BNA_LINK_UP;
-       if (aen->cee_linkup)
-               port->llport.link_status = BNA_CEE_UP;
-
-       /* Compute the priority */
-       prio_map = aen->prio_map;
-       if (prio_map) {
-               for (i = 0; i < 8; i++) {
-                       if ((prio_map >> i) & 0x1)
-                               break;
-               }
-               port->priority = i;
-       } else
-               port->priority = 0;
-
-       /* Dispatch events */
-       bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup);
-       bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority);
-       port->link_cbfn(port->bna->bnad, port->llport.link_status);
-}
-
-void
-bna_port_cb_link_down(struct bna_port *port, int status)
-{
-       port->llport.link_status = BNA_LINK_DOWN;
-
-       /* Dispatch events */
-       bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN);
-       port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
-}
-
 int
 bna_port_mtu_get(struct bna_port *port)
 {
@@ -1292,54 +1294,6 @@ bna_port_mac_get(struct bna_port *port, mac_t *mac)
        *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc);
 }
 
-/**
- * Should be called only when port is disabled
- */
-void
-bna_port_type_set(struct bna_port *port, enum bna_port_type type)
-{
-       port->type = type;
-       port->llport.type = type;
-}
-
-/**
- * Should be called only when port is disabled
- */
-void
-bna_port_linkcbfn_set(struct bna_port *port,
-                     void (*linkcbfn)(struct bnad *, enum bna_link_status))
-{
-       port->link_cbfn = linkcbfn;
-}
-
-void
-bna_port_admin_up(struct bna_port *port)
-{
-       struct bna_llport *llport = &port->llport;
-
-       if (llport->flags & BNA_LLPORT_F_ENABLED)
-               return;
-
-       llport->flags |= BNA_LLPORT_F_ENABLED;
-
-       if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
-               bfa_fsm_send_event(llport, LLPORT_E_UP);
-}
-
-void
-bna_port_admin_down(struct bna_port *port)
-{
-       struct bna_llport *llport = &port->llport;
-
-       if (!(llport->flags & BNA_LLPORT_F_ENABLED))
-               return;
-
-       llport->flags &= ~BNA_LLPORT_F_ENABLED;
-
-       if (llport->flags & BNA_LLPORT_F_RX_ENABLED)
-               bfa_fsm_send_event(llport, LLPORT_E_DOWN);
-}
-
 /**
  * DEVICE
  */
@@ -1357,7 +1311,7 @@ do {\
        bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
 } while (0)
 
-const struct bna_chip_regs_offset reg_offset[] =
+static const struct bna_chip_regs_offset reg_offset[] =
 {{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS,
        HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0},
 {HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS,
@@ -1642,7 +1596,34 @@ static struct bfa_ioc_cbfn bfa_iocll_cbfn = {
        bna_device_cb_iocll_reset
 };
 
-void
+/* device */
+static void
+bna_adv_device_init(struct bna_device *device, struct bna *bna,
+               struct bna_res_info *res_info)
+{
+       u8 *kva;
+       u64 dma;
+
+       device->bna = bna;
+
+       kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+
+       /**
+        * Attach common modules (Diag, SFP, CEE, Port) and claim respective
+        * DMA memory.
+        */
+       BNA_GET_DMA_ADDR(
+               &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
+       kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
+
+       bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
+       bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
+       kva += bfa_nw_cee_meminfo();
+       dma += bfa_nw_cee_meminfo();
+
+}
+
+static void
 bna_device_init(struct bna_device *device, struct bna *bna,
                struct bna_res_info *res_info)
 {
@@ -1681,7 +1662,7 @@ bna_device_init(struct bna_device *device, struct bna *bna,
        bfa_fsm_set_state(device, bna_device_sm_stopped);
 }
 
-void
+static void
 bna_device_uninit(struct bna_device *device)
 {
        bna_mbox_mod_uninit(&device->bna->mbox_mod);
@@ -1691,7 +1672,7 @@ bna_device_uninit(struct bna_device *device)
        device->bna = NULL;
 }
 
-void
+static void
 bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
 {
        struct bna_device *device = (struct bna_device *)arg;
@@ -1699,7 +1680,7 @@ bna_device_cb_port_stopped(void *arg, enum bna_cb_status status)
        bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED);
 }
 
-int
+static int
 bna_device_status_get(struct bna_device *device)
 {
        return device->fsm == (bfa_fsm_t)bna_device_sm_ready;
@@ -1733,24 +1714,13 @@ bna_device_disable(struct bna_device *device, enum bna_cleanup_type type)
        bfa_fsm_send_event(device, DEVICE_E_DISABLE);
 }
 
-int
+static int
 bna_device_state_get(struct bna_device *device)
 {
        return bfa_sm_to_state(device_sm_table, device->fsm);
 }
 
-u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
-       {12, 20},
-       {10, 18},
-       {8, 16},
-       {6, 12},
-       {4, 8},
-       {3, 6},
-       {2, 4},
-       {1, 2},
-};
-
-u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
+const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
        {12, 12},
        {6, 10},
        {5, 10},
@@ -1761,36 +1731,9 @@ u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
        {1, 2},
 };
 
-/* device */
-void
-bna_adv_device_init(struct bna_device *device, struct bna *bna,
-               struct bna_res_info *res_info)
-{
-       u8 *kva;
-       u64 dma;
-
-       device->bna = bna;
-
-       kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
-
-       /**
-        * Attach common modules (Diag, SFP, CEE, Port) and claim respective
-        * DMA memory.
-        */
-       BNA_GET_DMA_ADDR(
-               &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
-       kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
-
-       bfa_nw_cee_attach(&bna->cee, &device->ioc, bna);
-       bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
-       kva += bfa_nw_cee_meminfo();
-       dma += bfa_nw_cee_meminfo();
-
-}
-
 /* utils */
 
-void
+static void
 bna_adv_res_req(struct bna_res_info *res_info)
 {
        /* DMA memory for COMMON_MODULE */
@@ -2044,36 +1987,6 @@ bna_fw_stats_get(struct bna *bna)
        bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1];
 }
 
-static void
-bna_fw_cb_stats_clr(void *arg, int status)
-{
-       struct bna *bna = (struct bna *)arg;
-
-       bfa_q_qe_init(&bna->mbox_qe.qe);
-
-       memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats));
-       memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats));
-
-       bnad_cb_stats_clr(bna->bnad);
-}
-
-static void
-bna_fw_stats_clr(struct bna *bna)
-{
-       struct bfi_ll_stats_req ll_req;
-
-       bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
-       ll_req.stats_mask = htons(BFI_LL_STATS_ALL);
-       ll_req.rxf_id_mask[0] = htonl(0xffffffff);
-       ll_req.rxf_id_mask[1] = htonl(0xffffffff);
-       ll_req.txf_id_mask[0] = htonl(0xffffffff);
-       ll_req.txf_id_mask[1] = htonl(0xffffffff);
-
-       bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req),
-                               bna_fw_cb_stats_clr, bna);
-       bna_mbox_send(bna, &bna->mbox_qe);
-}
-
 void
 bna_stats_get(struct bna *bna)
 {
@@ -2083,22 +1996,8 @@ bna_stats_get(struct bna *bna)
                bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
 }
 
-void
-bna_stats_clr(struct bna *bna)
-{
-       if (bna_device_status_get(&bna->device))
-               bna_fw_stats_clr(bna);
-       else {
-               memset(&bna->stats.sw_stats, 0,
-                               sizeof(struct bna_sw_stats));
-               memset(bna->stats.hw_stats, 0,
-                               sizeof(struct bfi_ll_stats));
-               bnad_cb_stats_clr(bna->bnad);
-       }
-}
-
 /* IB */
-void
+static void
 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
 {
        ib->ib_config.coalescing_timeo = coalescing_timeo;
@@ -2157,7 +2056,7 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
        bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
 }
 
-void
+static void
 __rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
 {
        struct bna_rx_fndb_ram *rx_fndb_ram;
@@ -2553,7 +2452,7 @@ rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
  *     0 = no h/w change
  *     1 = need h/w change
  */
-int
+static int
 rxf_promisc_enable(struct bna_rxf *rxf)
 {
        struct bna *bna = rxf->rx->bna;
@@ -2584,7 +2483,7 @@ rxf_promisc_enable(struct bna_rxf *rxf)
  *     0 = no h/w change
  *     1 = need h/w change
  */
-int
+static int
 rxf_promisc_disable(struct bna_rxf *rxf)
 {
        struct bna *bna = rxf->rx->bna;
@@ -2623,7 +2522,7 @@ rxf_promisc_disable(struct bna_rxf *rxf)
  *     0 = no h/w change
  *     1 = need h/w change
  */
-int
+static int
 rxf_default_enable(struct bna_rxf *rxf)
 {
        struct bna *bna = rxf->rx->bna;
@@ -2654,7 +2553,7 @@ rxf_default_enable(struct bna_rxf *rxf)
  *     0 = no h/w change
  *     1 = need h/w change
  */
-int
+static int
 rxf_default_disable(struct bna_rxf *rxf)
 {
        struct bna *bna = rxf->rx->bna;
@@ -2693,7 +2592,7 @@ rxf_default_disable(struct bna_rxf *rxf)
  *     0 = no h/w change
  *     1 = need h/w change
  */
-int
+static int
 rxf_allmulti_enable(struct bna_rxf *rxf)
 {
        int ret = 0;
@@ -2721,7 +2620,7 @@ rxf_allmulti_enable(struct bna_rxf *rxf)
  *     0 = no h/w change
  *     1 = need h/w change
  */
-int
+static int
 rxf_allmulti_disable(struct bna_rxf *rxf)
 {
        int ret = 0;
@@ -2745,159 +2644,6 @@ rxf_allmulti_disable(struct bna_rxf *rxf)
        return ret;
 }
 
-/* RxF <- bnad */
-void
-bna_rx_mcast_delall(struct bna_rx *rx,
-                   void (*cbfn)(struct bnad *, struct bna_rx *,
-                                enum bna_cb_status))
-{
-       struct bna_rxf *rxf = &rx->rxf;
-       struct list_head *qe;
-       struct bna_mac *mac;
-       int need_hw_config = 0;
-
-       /* Purge all entries from pending_add_q */
-       while (!list_empty(&rxf->mcast_pending_add_q)) {
-               bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
-               mac = (struct bna_mac *)qe;
-               bfa_q_qe_init(&mac->qe);
-               bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
-       }
-
-       /* Schedule all entries in active_q for deletion */
-       while (!list_empty(&rxf->mcast_active_q)) {
-               bfa_q_deq(&rxf->mcast_active_q, &qe);
-               mac = (struct bna_mac *)qe;
-               bfa_q_qe_init(&mac->qe);
-               list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
-               need_hw_config = 1;
-       }
-
-       if (need_hw_config) {
-               rxf->cam_fltr_cbfn = cbfn;
-               rxf->cam_fltr_cbarg = rx->bna->bnad;
-               bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
-               return;
-       }
-
-       if (cbfn)
-               (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
-}
-
-/* RxF <- Rx */
-void
-bna_rx_receive_resume(struct bna_rx *rx,
-                     void (*cbfn)(struct bnad *, struct bna_rx *,
-                                  enum bna_cb_status))
-{
-       struct bna_rxf *rxf = &rx->rxf;
-
-       if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) {
-               rxf->oper_state_cbfn = cbfn;
-               rxf->oper_state_cbarg = rx->bna->bnad;
-               bfa_fsm_send_event(rxf, RXF_E_RESUME);
-       } else if (cbfn)
-               (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
-}
-
-void
-bna_rx_receive_pause(struct bna_rx *rx,
-                    void (*cbfn)(struct bnad *, struct bna_rx *,
-                                 enum bna_cb_status))
-{
-       struct bna_rxf *rxf = &rx->rxf;
-
-       if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) {
-               rxf->oper_state_cbfn = cbfn;
-               rxf->oper_state_cbarg = rx->bna->bnad;
-               bfa_fsm_send_event(rxf, RXF_E_PAUSE);
-       } else if (cbfn)
-               (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
-}
-
-/* RxF <- bnad */
-enum bna_cb_status
-bna_rx_ucast_add(struct bna_rx *rx, u8 *addr,
-                void (*cbfn)(struct bnad *, struct bna_rx *,
-                             enum bna_cb_status))
-{
-       struct bna_rxf *rxf = &rx->rxf;
-       struct list_head *qe;
-       struct bna_mac *mac;
-
-       /* Check if already added */
-       list_for_each(qe, &rxf->ucast_active_q) {
-               mac = (struct bna_mac *)qe;
-               if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
-                       if (cbfn)
-                               (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
-                       return BNA_CB_SUCCESS;
-               }
-       }
-
-       /* Check if pending addition */
-       list_for_each(qe, &rxf->ucast_pending_add_q) {
-               mac = (struct bna_mac *)qe;
-               if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
-                       if (cbfn)
-                               (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
-                       return BNA_CB_SUCCESS;
-               }
-       }
-
-       mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
-       if (mac == NULL)
-               return BNA_CB_UCAST_CAM_FULL;
-       bfa_q_qe_init(&mac->qe);
-       memcpy(mac->addr, addr, ETH_ALEN);
-       list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
-
-       rxf->cam_fltr_cbfn = cbfn;
-       rxf->cam_fltr_cbarg = rx->bna->bnad;
-
-       bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
-
-       return BNA_CB_SUCCESS;
-}
-
-/* RxF <- bnad */
-enum bna_cb_status
-bna_rx_ucast_del(struct bna_rx *rx, u8 *addr,
-                void (*cbfn)(struct bnad *, struct bna_rx *,
-                             enum bna_cb_status))
-{
-       struct bna_rxf *rxf = &rx->rxf;
-       struct list_head *qe;
-       struct bna_mac *mac;
-
-       list_for_each(qe, &rxf->ucast_pending_add_q) {
-               mac = (struct bna_mac *)qe;
-               if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
-                       list_del(qe);
-                       bfa_q_qe_init(qe);
-                       bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
-                       if (cbfn)
-                               (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
-                       return BNA_CB_SUCCESS;
-               }
-       }
-
-       list_for_each(qe, &rxf->ucast_active_q) {
-               mac = (struct bna_mac *)qe;
-               if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
-                       list_del(qe);
-                       bfa_q_qe_init(qe);
-                       list_add_tail(qe, &rxf->ucast_pending_del_q);
-                       rxf->cam_fltr_cbfn = cbfn;
-                       rxf->cam_fltr_cbarg = rx->bna->bnad;
-                       bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
-                       return BNA_CB_SUCCESS;
-               }
-       }
-
-       return BNA_CB_INVALID_MAC;
-}
-
 /* RxF <- bnad */
 enum bna_cb_status
 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
@@ -2978,39 +2724,6 @@ err_return:
        return BNA_CB_FAIL;
 }
 
-/* RxF <- bnad */
-void
-bna_rx_rss_enable(struct bna_rx *rx)
-{
-       struct bna_rxf *rxf = &rx->rxf;
-
-       rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
-       rxf->rss_status = BNA_STATUS_T_ENABLED;
-       bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
-}
-
-/* RxF <- bnad */
-void
-bna_rx_rss_disable(struct bna_rx *rx)
-{
-       struct bna_rxf *rxf = &rx->rxf;
-
-       rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
-       rxf->rss_status = BNA_STATUS_T_DISABLED;
-       bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
-}
-
-/* RxF <- bnad */
-void
-bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config)
-{
-       struct bna_rxf *rxf = &rx->rxf;
-       rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
-       rxf->rss_status = BNA_STATUS_T_ENABLED;
-       rxf->rss_cfg = *rss_config;
-       bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
-}
-
 void
 /* RxF <- bnad */
 bna_rx_vlanfilter_enable(struct bna_rx *rx)
@@ -3024,68 +2737,8 @@ bna_rx_vlanfilter_enable(struct bna_rx *rx)
        }
 }
 
-/* RxF <- bnad */
-void
-bna_rx_vlanfilter_disable(struct bna_rx *rx)
-{
-       struct bna_rxf *rxf = &rx->rxf;
-
-       if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
-               rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
-               rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
-               bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
-       }
-}
-
 /* Rx */
 
-struct bna_rxp *
-bna_rx_get_rxp(struct bna_rx *rx, int vector)
-{
-       struct bna_rxp *rxp;
-       struct list_head *qe;
-
-       list_for_each(qe, &rx->rxp_q) {
-               rxp = (struct bna_rxp *)qe;
-               if (rxp->vector == vector)
-                       return rxp;
-       }
-       return NULL;
-}
-
-/*
- * bna_rx_rss_rit_set()
- * Sets the Q ids for the specified msi-x vectors in the RIT.
- * Maximum rit size supported is 64, which should be the max size of the
- * vectors array.
- */
-
-void
-bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors)
-{
-       int i;
-       struct bna_rxp *rxp;
-       struct bna_rxq *q0 = NULL, *q1 = NULL;
-       struct bna *bna;
-       struct bna_rxf *rxf;
-
-       /* Build the RIT contents for this RX */
-       bna = rx->bna;
-
-       rxf = &rx->rxf;
-       for (i = 0; i < nvectors; i++) {
-               rxp = bna_rx_get_rxp(rx, vectors[i]);
-
-               GET_RXQS(rxp, q0, q1);
-               rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id;
-               rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0);
-       }
-
-       rxf->rit_segment->rit_size = nvectors;
-
-       /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */
-}
-
 /* Rx <- bnad */
 void
 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
@@ -3102,7 +2755,7 @@ bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
 
 /* Rx <- bnad */
 void
-bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX])
+bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
 {
        int i, j;
 
@@ -3164,22 +2817,6 @@ bna_rx_dim_update(struct bna_ccb *ccb)
 }
 
 /* Tx */
-/* TX <- bnad */
-enum bna_cb_status
-bna_tx_prio_set(struct bna_tx *tx, int prio,
-               void (*cbfn)(struct bnad *, struct bna_tx *,
-                            enum bna_cb_status))
-{
-       if (tx->flags & BNA_TX_F_PRIO_LOCK)
-               return BNA_CB_FAIL;
-       else {
-               tx->prio_change_cbfn = cbfn;
-               bna_tx_prio_changed(tx, prio);
-       }
-
-       return BNA_CB_SUCCESS;
-}
-
 /* TX <- bnad */
 void
 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
index 67eb376c5c7e69639ba7d8f66c8603af8c512cfc..806b224a4c63fcfaa9c5660581664f4317c11809 100644 (file)
@@ -1282,7 +1282,6 @@ struct bna_chip_regs_offset {
        u32 fn_int_mask;
        u32 msix_idx;
 };
-extern const struct bna_chip_regs_offset reg_offset[];
 
 struct bna_chip_regs {
        void __iomem *page_addr;
index 890846d55502c8f14eac4aebbf67e2011b1dca80..ad93fdb0f427a0b294b6c5575a3a8e2cc56ebd78 100644 (file)
@@ -195,7 +195,7 @@ bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
        ib_mod->bna = NULL;
 }
 
-struct bna_ib *
+static struct bna_ib *
 bna_ib_get(struct bna_ib_mod *ib_mod,
                enum bna_intr_type intr_type,
                int vector)
@@ -240,7 +240,7 @@ bna_ib_get(struct bna_ib_mod *ib_mod,
        return ib;
 }
 
-void
+static void
 bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
 {
        bna_intr_put(ib_mod, ib->intr);
@@ -255,7 +255,7 @@ bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
 }
 
 /* Returns index offset - starting from 0 */
-int
+static int
 bna_ib_reserve_idx(struct bna_ib *ib)
 {
        struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
@@ -309,7 +309,7 @@ bna_ib_reserve_idx(struct bna_ib *ib)
        return idx;
 }
 
-void
+static void
 bna_ib_release_idx(struct bna_ib *ib, int idx)
 {
        struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
@@ -356,7 +356,7 @@ bna_ib_release_idx(struct bna_ib *ib, int idx)
        }
 }
 
-int
+static int
 bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
 {
        if (ib->start_count)
@@ -374,7 +374,7 @@ bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
        return 0;
 }
 
-void
+static void
 bna_ib_start(struct bna_ib *ib)
 {
        struct bna_ib_blk_mem ib_cfg;
@@ -450,7 +450,7 @@ bna_ib_start(struct bna_ib *ib)
        }
 }
 
-void
+static void
 bna_ib_stop(struct bna_ib *ib)
 {
        u32 intx_mask;
@@ -468,7 +468,7 @@ bna_ib_stop(struct bna_ib *ib)
        }
 }
 
-void
+static void
 bna_ib_fail(struct bna_ib *ib)
 {
        ib->start_count = 0;
@@ -1394,7 +1394,7 @@ rxf_reset_packet_filter(struct bna_rxf *rxf)
        rxf_reset_packet_filter_allmulti(rxf);
 }
 
-void
+static void
 bna_rxf_init(struct bna_rxf *rxf,
                struct bna_rx *rx,
                struct bna_rx_config *q_config)
@@ -1444,7 +1444,7 @@ bna_rxf_init(struct bna_rxf *rxf,
        bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
 }
 
-void
+static void
 bna_rxf_uninit(struct bna_rxf *rxf)
 {
        struct bna_mac *mac;
@@ -1476,7 +1476,18 @@ bna_rxf_uninit(struct bna_rxf *rxf)
        rxf->rx = NULL;
 }
 
-void
+static void
+bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
+{
+       bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
+       if (rx->rxf.rxf_id < 32)
+               rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
+       else
+               rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
+                               1 << (rx->rxf.rxf_id - 32));
+}
+
+static void
 bna_rxf_start(struct bna_rxf *rxf)
 {
        rxf->start_cbfn = bna_rx_cb_rxf_started;
@@ -1485,7 +1496,18 @@ bna_rxf_start(struct bna_rxf *rxf)
        bfa_fsm_send_event(rxf, RXF_E_START);
 }
 
-void
+static void
+bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
+{
+       bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
+       if (rx->rxf.rxf_id < 32)
+               rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
+       else
+               rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
+                               1 << (rx->rxf.rxf_id - 32);
+}
+
+static void
 bna_rxf_stop(struct bna_rxf *rxf)
 {
        rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
@@ -1493,7 +1515,7 @@ bna_rxf_stop(struct bna_rxf *rxf)
        bfa_fsm_send_event(rxf, RXF_E_STOP);
 }
 
-void
+static void
 bna_rxf_fail(struct bna_rxf *rxf)
 {
        rxf->rxf_flags |= BNA_RXF_FL_FAILED;
@@ -1575,43 +1597,6 @@ bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
        return BNA_CB_SUCCESS;
 }
 
-enum bna_cb_status
-bna_rx_mcast_del(struct bna_rx *rx, u8 *addr,
-                void (*cbfn)(struct bnad *, struct bna_rx *,
-                             enum bna_cb_status))
-{
-       struct bna_rxf *rxf = &rx->rxf;
-       struct list_head *qe;
-       struct bna_mac *mac;
-
-       list_for_each(qe, &rxf->mcast_pending_add_q) {
-               mac = (struct bna_mac *)qe;
-               if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
-                       list_del(qe);
-                       bfa_q_qe_init(qe);
-                       bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
-                       if (cbfn)
-                               (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
-                       return BNA_CB_SUCCESS;
-               }
-       }
-
-       list_for_each(qe, &rxf->mcast_active_q) {
-               mac = (struct bna_mac *)qe;
-               if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
-                       list_del(qe);
-                       bfa_q_qe_init(qe);
-                       list_add_tail(qe, &rxf->mcast_pending_del_q);
-                       rxf->cam_fltr_cbfn = cbfn;
-                       rxf->cam_fltr_cbarg = rx->bna->bnad;
-                       bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
-                       return BNA_CB_SUCCESS;
-               }
-       }
-
-       return BNA_CB_INVALID_MAC;
-}
-
 enum bna_cb_status
 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
                     void (*cbfn)(struct bnad *, struct bna_rx *,
@@ -1862,7 +1847,7 @@ bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
 bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
        struct bna_rx, enum bna_rx_event);
 
-static struct bfa_sm_table rx_sm_table[] = {
+static const struct bfa_sm_table rx_sm_table[] = {
        {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
        {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
        {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
@@ -2247,7 +2232,7 @@ bna_rit_create(struct bna_rx *rx)
        }
 }
 
-int
+static int
 _rx_can_satisfy(struct bna_rx_mod *rx_mod,
                struct bna_rx_config *rx_cfg)
 {
@@ -2272,7 +2257,7 @@ _rx_can_satisfy(struct bna_rx_mod *rx_mod,
        return 1;
 }
 
-struct bna_rxq *
+static struct bna_rxq *
 _get_free_rxq(struct bna_rx_mod *rx_mod)
 {
        struct bna_rxq *rxq = NULL;
@@ -2286,7 +2271,7 @@ _get_free_rxq(struct bna_rx_mod *rx_mod)
        return rxq;
 }
 
-void
+static void
 _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
 {
        bfa_q_qe_init(&rxq->qe);
@@ -2294,7 +2279,7 @@ _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
        rx_mod->rxq_free_count++;
 }
 
-struct bna_rxp *
+static struct bna_rxp *
 _get_free_rxp(struct bna_rx_mod *rx_mod)
 {
        struct list_head        *qe = NULL;
@@ -2310,7 +2295,7 @@ _get_free_rxp(struct bna_rx_mod *rx_mod)
        return rxp;
 }
 
-void
+static void
 _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
 {
        bfa_q_qe_init(&rxp->qe);
@@ -2318,7 +2303,7 @@ _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
        rx_mod->rxp_free_count++;
 }
 
-struct bna_rx *
+static struct bna_rx *
 _get_free_rx(struct bna_rx_mod *rx_mod)
 {
        struct list_head        *qe = NULL;
@@ -2336,7 +2321,7 @@ _get_free_rx(struct bna_rx_mod *rx_mod)
        return rx;
 }
 
-void
+static void
 _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
 {
        bfa_q_qe_init(&rx->qe);
@@ -2344,7 +2329,7 @@ _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
        rx_mod->rx_free_count++;
 }
 
-void
+static void
 _rx_init(struct bna_rx *rx, struct bna *bna)
 {
        rx->bna = bna;
@@ -2360,7 +2345,7 @@ _rx_init(struct bna_rx *rx, struct bna *bna)
        rx->stop_cbarg = NULL;
 }
 
-void
+static void
 _rxp_add_rxqs(struct bna_rxp *rxp,
                struct bna_rxq *q0,
                struct bna_rxq *q1)
@@ -2383,7 +2368,7 @@ _rxp_add_rxqs(struct bna_rxp *rxp,
        }
 }
 
-void
+static void
 _rxq_qpt_init(struct bna_rxq *rxq,
                struct bna_rxp *rxp,
                u32 page_count,
@@ -2412,7 +2397,7 @@ _rxq_qpt_init(struct bna_rxq *rxq,
        }
 }
 
-void
+static void
 _rxp_cqpt_setup(struct bna_rxp *rxp,
                u32 page_count,
                u32 page_size,
@@ -2441,13 +2426,13 @@ _rxp_cqpt_setup(struct bna_rxp *rxp,
        }
 }
 
-void
+static void
 _rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
 {
        list_add_tail(&rxp->qe, &rx->rxp_q);
 }
 
-void
+static void
 _init_rxmod_queues(struct bna_rx_mod *rx_mod)
 {
        INIT_LIST_HEAD(&rx_mod->rx_free_q);
@@ -2460,7 +2445,7 @@ _init_rxmod_queues(struct bna_rx_mod *rx_mod)
        rx_mod->rxp_free_count = 0;
 }
 
-void
+static void
 _rx_ctor(struct bna_rx *rx, int id)
 {
        bfa_q_qe_init(&rx->qe);
@@ -2492,7 +2477,7 @@ bna_rx_cb_rxq_stopped_all(void *arg)
        bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
 }
 
-void
+static void
 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
                         enum bna_cb_status status)
 {
@@ -2501,7 +2486,7 @@ bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
        bfa_wc_down(&rx_mod->rx_stop_wc);
 }
 
-void
+static void
 bna_rx_mod_cb_rx_stopped_all(void *arg)
 {
        struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
@@ -2511,7 +2496,7 @@ bna_rx_mod_cb_rx_stopped_all(void *arg)
        rx_mod->stop_cbfn = NULL;
 }
 
-void
+static void
 bna_rx_start(struct bna_rx *rx)
 {
        rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
@@ -2519,7 +2504,7 @@ bna_rx_start(struct bna_rx *rx)
                bfa_fsm_send_event(rx, RX_E_START);
 }
 
-void
+static void
 bna_rx_stop(struct bna_rx *rx)
 {
        rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
@@ -2532,7 +2517,7 @@ bna_rx_stop(struct bna_rx *rx)
        }
 }
 
-void
+static void
 bna_rx_fail(struct bna_rx *rx)
 {
        /* Indicate port is not enabled, and failed */
@@ -2541,28 +2526,6 @@ bna_rx_fail(struct bna_rx *rx)
        bfa_fsm_send_event(rx, RX_E_FAIL);
 }
 
-void
-bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
-{
-       bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
-       if (rx->rxf.rxf_id < 32)
-               rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
-       else
-               rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
-                               1 << (rx->rxf.rxf_id - 32));
-}
-
-void
-bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
-{
-       bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
-       if (rx->rxf.rxf_id < 32)
-               rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
-       else
-               rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
-                               1 << (rx->rxf.rxf_id - 32);
-}
-
 void
 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
 {
@@ -3731,7 +3694,7 @@ bna_tx_fail(struct bna_tx *tx)
        bfa_fsm_send_event(tx, TX_E_FAIL);
 }
 
-void
+static void
 bna_tx_prio_changed(struct bna_tx *tx, int prio)
 {
        struct bna_txq *txq;
index e380c0e88f4fb4c629eb60fa150528f5a8b16036..74c64d6c88019e0c5ee6641be309e0216a51dc9a 100644 (file)
@@ -28,7 +28,7 @@
 #include "bna.h"
 #include "cna.h"
 
-DEFINE_MUTEX(bnad_fwimg_mutex);
+static DEFINE_MUTEX(bnad_fwimg_mutex);
 
 /*
  * Module params
@@ -46,7 +46,7 @@ MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
  */
 u32 bnad_rxqs_per_cq = 2;
 
-const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+static const u8 bnad_bcast_addr[] =  {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
 
 /*
  * Local MACROS
@@ -564,9 +564,11 @@ bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
 static void
 bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
 {
-       spin_lock_irq(&bnad->bna_lock); /* Because of polling context */
+       unsigned long flags;
+
+       spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */
        bnad_enable_rx_irq_unsafe(ccb);
-       spin_unlock_irq(&bnad->bna_lock);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
 }
 
 static void
@@ -599,7 +601,7 @@ static irqreturn_t
 bnad_msix_mbox_handler(int irq, void *data)
 {
        u32 intr_status;
-       unsigned long  flags;
+       unsigned long flags;
        struct net_device *netdev = data;
        struct bnad *bnad;
 
@@ -630,13 +632,15 @@ bnad_isr(int irq, void *data)
        struct bnad_rx_info *rx_info;
        struct bnad_rx_ctrl *rx_ctrl;
 
-       spin_lock_irqsave(&bnad->bna_lock, flags);
+       if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
+               return IRQ_NONE;
 
        bna_intr_status_get(&bnad->bna, intr_status);
-       if (!intr_status) {
-               spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+       if (unlikely(!intr_status))
                return IRQ_NONE;
-       }
+
+       spin_lock_irqsave(&bnad->bna_lock, flags);
 
        if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
                bna_mbox_handler(&bnad->bna, intr_status);
@@ -672,11 +676,10 @@ bnad_enable_mbox_irq(struct bnad *bnad)
 {
        int irq = BNAD_GET_MBOX_IRQ(bnad);
 
-       if (!(bnad->cfg_flags & BNAD_CF_MSIX))
-               return;
-
        if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
-               enable_irq(irq);
+               if (bnad->cfg_flags & BNAD_CF_MSIX)
+                       enable_irq(irq);
+
        BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
 }
 
@@ -684,16 +687,16 @@ bnad_enable_mbox_irq(struct bnad *bnad)
  * Called with bnad->bna_lock held b'cos of
  * bnad->cfg_flags access.
  */
-void
+static void
 bnad_disable_mbox_irq(struct bnad *bnad)
 {
        int irq = BNAD_GET_MBOX_IRQ(bnad);
 
-       if (!(bnad->cfg_flags & BNAD_CF_MSIX))
-               return;
 
        if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
-               disable_irq_nosync(irq);
+               if (bnad->cfg_flags & BNAD_CF_MSIX)
+                       disable_irq_nosync(irq);
+
        BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
 }
 
@@ -953,11 +956,6 @@ bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
                  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
 }
 
-void
-bnad_cb_stats_clr(struct bnad *bnad)
-{
-}
-
 /* Resource allocation, free functions */
 
 static void
@@ -1045,14 +1043,12 @@ bnad_mbox_irq_free(struct bnad *bnad,
                return;
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-
        bnad_disable_mbox_irq(bnad);
+       spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        irq = BNAD_GET_MBOX_IRQ(bnad);
        free_irq(irq, bnad->netdev);
 
-       spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
        kfree(intr_info->idl);
 }
 
@@ -1094,8 +1090,15 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
 
        sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
 
+       /*
+        * Set the Mbox IRQ disable flag, so that the IRQ handler
+        * called from request_irq() for SHARED IRQs do not execute
+        */
+       set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
+
        err = request_irq(irq, irq_handler, flags,
                          bnad->mbox_irq_name, bnad->netdev);
+
        if (err) {
                kfree(intr_info->idl);
                intr_info->idl = NULL;
@@ -1103,7 +1106,10 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
        }
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
-       bnad_disable_mbox_irq(bnad);
+
+       if (bnad->cfg_flags & BNAD_CF_MSIX)
+               disable_irq_nosync(irq);
+
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
        return 0;
 }
@@ -1485,7 +1491,6 @@ bnad_stats_timer_start(struct bnad *bnad)
                          jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
        }
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
 }
 
 /*
@@ -2170,7 +2175,6 @@ bnad_device_disable(struct bnad *bnad)
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        wait_for_completion(&bnad->bnad_completions.ioc_comp);
-
 }
 
 static int
@@ -2236,7 +2240,6 @@ static void
 bnad_enable_msix(struct bnad *bnad)
 {
        int i, ret;
-       u32 tot_msix_num;
        unsigned long flags;
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
@@ -2249,18 +2252,16 @@ bnad_enable_msix(struct bnad *bnad)
        if (bnad->msix_table)
                return;
 
-       tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
-
        bnad->msix_table =
-               kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL);
+               kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
 
        if (!bnad->msix_table)
                goto intx_mode;
 
-       for (i = 0; i < tot_msix_num; i++)
+       for (i = 0; i < bnad->msix_num; i++)
                bnad->msix_table[i].entry = i;
 
-       ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num);
+       ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
        if (ret > 0) {
                /* Not enough MSI-X vectors. */
 
@@ -2273,12 +2274,11 @@ bnad_enable_msix(struct bnad *bnad)
                        + (bnad->num_rx
                        * bnad->num_rxp_per_rx) +
                         BNAD_MAILBOX_MSIX_VECTORS;
-               tot_msix_num = bnad->msix_num + bnad->msix_diag_num;
 
                /* Try once more with adjusted numbers */
                /* If this fails, fall back to INTx */
                ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
-                                     tot_msix_num);
+                                     bnad->msix_num);
                if (ret)
                        goto intx_mode;
 
@@ -2291,7 +2291,6 @@ intx_mode:
        kfree(bnad->msix_table);
        bnad->msix_table = NULL;
        bnad->msix_num = 0;
-       bnad->msix_diag_num = 0;
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bnad->cfg_flags &= ~BNAD_CF_MSIX;
        bnad_q_num_init(bnad);
@@ -2939,7 +2938,6 @@ bnad_init(struct bnad *bnad,
        bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
                (bnad->num_rx * bnad->num_rxp_per_rx) +
                         BNAD_MAILBOX_MSIX_VECTORS;
-       bnad->msix_diag_num = 2;        /* 1 for Tx, 1 for Rx */
 
        bnad->txq_depth = BNAD_TXQ_DEPTH;
        bnad->rxq_depth = BNAD_RXQ_DEPTH;
@@ -3108,7 +3106,6 @@ bnad_pci_probe(struct pci_dev *pdev,
 
        spin_lock_irqsave(&bnad->bna_lock, flags);
        bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
-
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
 
        bnad->stats.bna_stats = &bna->stats;
@@ -3211,7 +3208,7 @@ bnad_pci_remove(struct pci_dev *pdev)
        free_netdev(netdev);
 }
 
-const struct pci_device_id bnad_pci_id_table[] = {
+static const struct pci_device_id bnad_pci_id_table[] = {
        {
                PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
                        PCI_DEVICE_ID_BROCADE_CT),
index ee377888b905739e19ac84fd4480df9cb17d7bb0..ebc3a9078642d6b3fd5447c17f824fcdc29715f3 100644 (file)
@@ -248,7 +248,6 @@ struct bnad {
        u64             mmio_len;
 
        u32             msix_num;
-       u32             msix_diag_num;
        struct msix_entry       *msix_table;
 
        struct mutex            conf_mutex;
index 0bd1d3790a27e7c118e251af7f0ee2470ab7ea21..e8f4ecd9ebb5cd8c70235d891899673059bb76a3 100644 (file)
@@ -22,7 +22,7 @@ const struct firmware *bfi_fw;
 static u32 *bfi_image_ct_cna;
 static u32 bfi_image_ct_cna_size;
 
-u32 *
+static u32 *
 cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
                        u32 *bfi_image_size, char *fw_name)
 {
index fb70c3e12927194ec0f16ac37955471cfd5333e5..6f5e6b453da69b0add1f1a51bc0a936d7b0a2500 100644 (file)
@@ -109,6 +109,7 @@ static char *arp_validate;
 static char *fail_over_mac;
 static int all_slaves_active = 0;
 static struct bond_params bonding_defaults;
+static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
 
 module_param(max_bonds, int, 0);
 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
@@ -163,6 +164,8 @@ module_param(all_slaves_active, int, 0);
 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
                                     "by setting active flag for all slaves.  "
                                     "0 for never (default), 1 for always.");
+module_param(resend_igmp, int, 0);
+MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link failure");
 
 /*----------------------------- Global variables ----------------------------*/
 
@@ -865,18 +868,13 @@ static void bond_mc_del(struct bonding *bond, void *addr)
 }
 
 
-/*
- * Retrieve the list of registered multicast addresses for the bonding
- * device and retransmit an IGMP JOIN request to the current active
- * slave.
- */
-static void bond_resend_igmp_join_requests(struct bonding *bond)
+static void __bond_resend_igmp_join_requests(struct net_device *dev)
 {
        struct in_device *in_dev;
        struct ip_mc_list *im;
 
        rcu_read_lock();
-       in_dev = __in_dev_get_rcu(bond->dev);
+       in_dev = __in_dev_get_rcu(dev);
        if (in_dev) {
                for (im = in_dev->mc_list; im; im = im->next)
                        ip_mc_rejoin_group(im);
@@ -885,6 +883,44 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
        rcu_read_unlock();
 }
 
+/*
+ * Retrieve the list of registered multicast addresses for the bonding
+ * device and retransmit an IGMP JOIN request to the current active
+ * slave.
+ */
+static void bond_resend_igmp_join_requests(struct bonding *bond)
+{
+       struct net_device *vlan_dev;
+       struct vlan_entry *vlan;
+
+       read_lock(&bond->lock);
+
+       /* rejoin all groups on bond device */
+       __bond_resend_igmp_join_requests(bond->dev);
+
+       /* rejoin all groups on vlan devices */
+       if (bond->vlgrp) {
+               list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+                       vlan_dev = vlan_group_get_device(bond->vlgrp,
+                                                        vlan->vlan_id);
+                       if (vlan_dev)
+                               __bond_resend_igmp_join_requests(vlan_dev);
+               }
+       }
+
+       if (--bond->igmp_retrans > 0)
+               queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
+
+       read_unlock(&bond->lock);
+}
+
+void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
+{
+       struct bonding *bond = container_of(work, struct bonding,
+                                                       mcast_work.work);
+       bond_resend_igmp_join_requests(bond);
+}
+
 /*
  * flush all members of flush->mc_list from device dev->mc_list
  */
@@ -944,7 +980,6 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
 
                netdev_for_each_mc_addr(ha, bond->dev)
                        dev_mc_add(new_active->dev, ha->addr);
-               bond_resend_igmp_join_requests(bond);
        }
 }
 
@@ -1180,9 +1215,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                }
        }
 
-       /* resend IGMP joins since all were sent on curr_active_slave */
-       if (bond->params.mode == BOND_MODE_ROUNDROBIN) {
-               bond_resend_igmp_join_requests(bond);
+       /* resend IGMP joins since active slave has changed or
+        * all were sent on curr_active_slave */
+       if ((USES_PRIMARY(bond->params.mode) && new_active) ||
+           bond->params.mode == BOND_MODE_ROUNDROBIN) {
+               bond->igmp_retrans = bond->params.resend_igmp;
+               queue_delayed_work(bond->wq, &bond->mcast_work, 0);
        }
 }
 
@@ -3744,6 +3782,8 @@ static int bond_open(struct net_device *bond_dev)
 
        bond->kill_timers = 0;
 
+       INIT_DELAYED_WORK(&bond->mcast_work, bond_resend_igmp_join_requests_delayed);
+
        if (bond_is_lb(bond)) {
                /* bond_alb_initialize must be called before the timer
                 * is started.
@@ -3828,6 +3868,8 @@ static int bond_close(struct net_device *bond_dev)
                break;
        }
 
+       if (delayed_work_pending(&bond->mcast_work))
+               cancel_delayed_work(&bond->mcast_work);
 
        if (bond_is_lb(bond)) {
                /* Must be called only after all
@@ -4703,6 +4745,9 @@ static void bond_work_cancel_all(struct bonding *bond)
        if (bond->params.mode == BOND_MODE_8023AD &&
            delayed_work_pending(&bond->ad_work))
                cancel_delayed_work(&bond->ad_work);
+
+       if (delayed_work_pending(&bond->mcast_work))
+               cancel_delayed_work(&bond->mcast_work);
 }
 
 /*
@@ -4895,6 +4940,13 @@ static int bond_check_params(struct bond_params *params)
                all_slaves_active = 0;
        }
 
+       if (resend_igmp < 0 || resend_igmp > 255) {
+               pr_warning("Warning: resend_igmp (%d) should be between "
+                          "0 and 255, resetting to %d\n",
+                          resend_igmp, BOND_DEFAULT_RESEND_IGMP);
+               resend_igmp = BOND_DEFAULT_RESEND_IGMP;
+       }
+
        /* reset values for TLB/ALB */
        if ((bond_mode == BOND_MODE_TLB) ||
            (bond_mode == BOND_MODE_ALB)) {
@@ -5067,6 +5119,7 @@ static int bond_check_params(struct bond_params *params)
        params->fail_over_mac = fail_over_mac_value;
        params->tx_queues = tx_queues;
        params->all_slaves_active = all_slaves_active;
+       params->resend_igmp = resend_igmp;
 
        if (primary) {
                strncpy(params->primary, primary, IFNAMSIZ);
index c311aed9bd022c2870b33fdc8c88a953d6cceb93..01b4c3f5d9e743d29101fca7668853ec23a3aecf 100644 (file)
@@ -1592,6 +1592,49 @@ out:
 static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR,
                   bonding_show_slaves_active, bonding_store_slaves_active);
 
+/*
+ * Show and set the number of IGMP membership reports to send on link failure
+ */
+static ssize_t bonding_show_resend_igmp(struct device *d,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct bonding *bond = to_bond(d);
+
+       return sprintf(buf, "%d\n", bond->params.resend_igmp);
+}
+
+static ssize_t bonding_store_resend_igmp(struct device *d,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t count)
+{
+       int new_value, ret = count;
+       struct bonding *bond = to_bond(d);
+
+       if (sscanf(buf, "%d", &new_value) != 1) {
+               pr_err("%s: no resend_igmp value specified.\n",
+                      bond->dev->name);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (new_value < 0) {
+               pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n",
+                      bond->dev->name, new_value);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       pr_info("%s: Setting resend_igmp to %d.\n",
+               bond->dev->name, new_value);
+       bond->params.resend_igmp = new_value;
+out:
+       return ret;
+}
+
+static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR,
+                  bonding_show_resend_igmp, bonding_store_resend_igmp);
+
 static struct attribute *per_bond_attrs[] = {
        &dev_attr_slaves.attr,
        &dev_attr_mode.attr,
@@ -1619,6 +1662,7 @@ static struct attribute *per_bond_attrs[] = {
        &dev_attr_ad_partner_mac.attr,
        &dev_attr_queue_id.attr,
        &dev_attr_all_slaves_active.attr,
+       &dev_attr_resend_igmp.attr,
        NULL,
 };
 
index c6fdd851579a77c953d259de7611c4298fec824f..c15f21347486586322c95c36316a88d2669d76ac 100644 (file)
@@ -136,6 +136,7 @@ struct bond_params {
        __be32 arp_targets[BOND_MAX_ARP_TARGETS];
        int tx_queues;
        int all_slaves_active;
+       int resend_igmp;
 };
 
 struct bond_parm_tbl {
@@ -202,6 +203,7 @@ struct bonding {
        s8       send_grat_arp;
        s8       send_unsol_na;
        s8       setup_by_slave;
+       s8       igmp_retrans;
 #ifdef CONFIG_PROC_FS
        struct   proc_dir_entry *proc_entry;
        char     proc_file_name[IFNAMSIZ];
@@ -223,6 +225,7 @@ struct bonding {
        struct   delayed_work arp_work;
        struct   delayed_work alb_work;
        struct   delayed_work ad_work;
+       struct   delayed_work mcast_work;
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        struct   in6_addr master_ipv6;
 #endif
index 1846623c6ae65d18b6223ada9be4531809f64a99..1321cb6401cfc9fd2a68ed0ba524a6fc83c17a32 100644 (file)
@@ -491,6 +491,8 @@ struct ehea_port {
        u8 full_duplex;
        u8 autoneg;
        u8 num_def_qps;
+       wait_queue_head_t swqe_avail_wq;
+       wait_queue_head_t restart_wq;
 };
 
 struct port_res_cfg {
index 190fb691d20b107619e1fde16ebb1b204393426f..15401af30108bfc26ef4b307f2d6b85acb917055 100644 (file)
@@ -786,6 +786,7 @@ static void reset_sq_restart_flag(struct ehea_port *port)
                struct ehea_port_res *pr = &port->port_res[i];
                pr->sq_restart_flag = 0;
        }
+       wake_up(&port->restart_wq);
 }
 
 static void check_sqs(struct ehea_port *port)
@@ -796,6 +797,7 @@ static void check_sqs(struct ehea_port *port)
 
        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
                struct ehea_port_res *pr = &port->port_res[i];
+               int ret;
                k = 0;
                swqe = ehea_get_swqe(pr->qp, &swqe_index);
                memset(swqe, 0, SWQE_HEADER_SIZE);
@@ -809,13 +811,14 @@ static void check_sqs(struct ehea_port *port)
 
                ehea_post_swqe(pr->qp, swqe);
 
-               while (pr->sq_restart_flag == 0) {
-                       msleep(5);
-                       if (++k == 100) {
-                               ehea_error("HW/SW queues out of sync");
-                               ehea_schedule_port_reset(pr->port);
-                               return;
-                       }
+               ret = wait_event_timeout(port->restart_wq,
+                                        pr->sq_restart_flag == 0,
+                                        msecs_to_jiffies(100));
+
+               if (!ret) {
+                       ehea_error("HW/SW queues out of sync");
+                       ehea_schedule_port_reset(pr->port);
+                       return;
                }
        }
 }
@@ -888,6 +891,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
                pr->queue_stopped = 0;
        }
        spin_unlock_irqrestore(&pr->netif_queue, flags);
+       wake_up(&pr->port->swqe_avail_wq);
 
        return cqe;
 }
@@ -2652,6 +2656,9 @@ static int ehea_open(struct net_device *dev)
                netif_start_queue(dev);
        }
 
+       init_waitqueue_head(&port->swqe_avail_wq);
+       init_waitqueue_head(&port->restart_wq);
+
        mutex_unlock(&port->port_lock);
 
        return ret;
@@ -2724,13 +2731,15 @@ static void ehea_flush_sq(struct ehea_port *port)
        for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
                struct ehea_port_res *pr = &port->port_res[i];
                int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
-               int k = 0;
-               while (atomic_read(&pr->swqe_avail) < swqe_max) {
-                       msleep(5);
-                       if (++k == 20) {
-                               ehea_error("WARNING: sq not flushed completely");
-                               break;
-                       }
+               int ret;
+
+               ret = wait_event_timeout(port->swqe_avail_wq,
+                        atomic_read(&pr->swqe_avail) >= swqe_max,
+                        msecs_to_jiffies(100));
+
+               if (!ret) {
+                       ehea_error("WARNING: sq not flushed completely");
+                       break;
                }
        }
 }
index b8143501e6fc04eca2a06f285c3d0c42c53f97e8..84ac486f4a65f9f4abb65b5d94236c646b56bec0 100644 (file)
@@ -308,7 +308,7 @@ out_no_read:
  *
  *  Initializes the hw->mbx struct to correct values for vf mailbox
  */
-s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
+static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
 
index 1b0e0bf4c0f5c05934ba1027e9c124c85cabdc39..8c063bebee7f13a5f1b6d28054c4d1fe5658fc3f 100644 (file)
@@ -95,6 +95,4 @@
 /* forward declaration of the HW struct */
 struct ixgbe_hw;
 
-s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
-
 #endif /* _IXGBE_MBX_H_ */
index f6f929958ba0beb3cb3ec60970042976f1062247..bfe42c1fcfafa681c660d10e2b399e20d12f22ca 100644 (file)
@@ -368,7 +368,7 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
        return 0;
 }
 
-struct ixgbe_mac_operations ixgbevf_mac_ops = {
+static struct ixgbe_mac_operations ixgbevf_mac_ops = {
        .init_hw             = ixgbevf_init_hw_vf,
        .reset_hw            = ixgbevf_reset_hw_vf,
        .start_hw            = ixgbevf_start_hw_vf,
index 4b0e30b564e55fc0809a542b861747e1f354c19b..2d9663a1c54d91b3e25246f671ab35c51b606921 100644 (file)
@@ -64,7 +64,6 @@ struct pcpu_lstats {
        u64                     packets;
        u64                     bytes;
        struct u64_stats_sync   syncp;
-       unsigned long           drops;
 };
 
 /*
@@ -90,8 +89,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
                lb_stats->bytes += len;
                lb_stats->packets++;
                u64_stats_update_end(&lb_stats->syncp);
-       } else
-               lb_stats->drops++;
+       }
 
        return NETDEV_TX_OK;
 }
@@ -101,7 +99,6 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
 {
        u64 bytes = 0;
        u64 packets = 0;
-       u64 drops = 0;
        int i;
 
        for_each_possible_cpu(i) {
@@ -115,14 +112,11 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
                        tbytes = lb_stats->bytes;
                        tpackets = lb_stats->packets;
                } while (u64_stats_fetch_retry(&lb_stats->syncp, start));
-               drops   += lb_stats->drops;
                bytes   += tbytes;
                packets += tpackets;
        }
        stats->rx_packets = packets;
        stats->tx_packets = packets;
-       stats->rx_dropped = drops;
-       stats->rx_errors  = drops;
        stats->rx_bytes   = bytes;
        stats->tx_bytes   = bytes;
        return stats;
index 2c7994372bde965fe5b37bd6335963ef3d490c3d..a17edda8a7816c2ba92e7c1b7aee3cce23b93f8f 100644 (file)
@@ -84,6 +84,9 @@
 #define BOND_DEFAULT_MAX_BONDS  1   /* Default maximum number of devices to support */
 
 #define BOND_DEFAULT_TX_QUEUES 16   /* Default number of tx queues per device */
+
+#define BOND_DEFAULT_RESEND_IGMP       1 /* Default number of IGMP membership reports */
+
 /* hashing types */
 #define BOND_XMIT_POLICY_LAYER2                0 /* layer 2 (MAC only), default */
 #define BOND_XMIT_POLICY_LAYER34       1 /* layer 3+4 (IP ^ (TCP || UDP)) */
index 92d81edd58087e4ac3ef835338c1bec1b7ab627a..6abcef67b1784e2309394976ac41133c5bbd337f 100644 (file)
@@ -884,6 +884,9 @@ struct net_device {
        int                     iflink;
 
        struct net_device_stats stats;
+       atomic_long_t           rx_dropped; /* dropped packets by core network
+                                            * Do not use this in drivers.
+                                            */
 
 #ifdef CONFIG_WIRELESS_EXT
        /* List of functions to handle Wireless Extensions (instead of ioctl).
index ac2fd002812ef03ef52fde39c347d91faf956e1e..106f3097d38452e9764e60115f88ea768d6cb15c 100644 (file)
@@ -31,6 +31,8 @@ struct fib_lookup_arg {
        void                    *lookup_ptr;
        void                    *result;
        struct fib_rule         *rule;
+       int                     flags;
+#define FIB_LOOKUP_NOREF       1
 };
 
 struct fib_rules_ops {
index c93f94edc610fc440f38ca5f191b7b6c7e7dd72c..ba3666d31766864b354687cc543d9ba3a365193d 100644 (file)
@@ -86,6 +86,7 @@ struct fib_info {
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        int                     fib_power;
 #endif
+       struct rcu_head         rcu;
        struct fib_nh           fib_nh[0];
 #define fib_dev                fib_nh[0].nh_dev
 };
@@ -148,7 +149,7 @@ struct fib_table {
 };
 
 extern int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
-                           struct fib_result *res);
+                           struct fib_result *res, int fib_flags);
 extern int fib_table_insert(struct fib_table *, struct fib_config *);
 extern int fib_table_delete(struct fib_table *, struct fib_config *);
 extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
@@ -185,11 +186,11 @@ static inline int fib_lookup(struct net *net, const struct flowi *flp,
        struct fib_table *table;
 
        table = fib_get_table(net, RT_TABLE_LOCAL);
-       if (!fib_table_lookup(table, flp, res))
+       if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF))
                return 0;
 
        table = fib_get_table(net, RT_TABLE_MAIN);
-       if (!fib_table_lookup(table, flp, res))
+       if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF))
                return 0;
        return -ENETUNREACH;
 }
@@ -254,16 +255,6 @@ static inline void fib_info_put(struct fib_info *fi)
                free_fib_info(fi);
 }
 
-static inline void fib_res_put(struct fib_result *res)
-{
-       if (res->fi)
-               fib_info_put(res->fi);
-#ifdef CONFIG_IP_MULTIPLE_TABLES
-       if (res->r)
-               fib_rule_put(res->r);
-#endif
-}
-
 #ifdef CONFIG_PROC_FS
 extern int __net_init  fib_proc_init(struct net *net);
 extern void __net_exit fib_proc_exit(struct net *net);
index 7d08fd1062f092634daa1dc1d659ac9d34a8c4e8..37845dae6488b6df58e2e200ac0d7a7732098323 100644 (file)
@@ -138,13 +138,22 @@ struct pneigh_entry {
  *     neighbour table manipulation
  */
 
+struct neigh_hash_table {
+       struct neighbour        **hash_buckets;
+       unsigned int            hash_mask;
+       __u32                   hash_rnd;
+       struct rcu_head         rcu;
+};
+
 
 struct neigh_table {
        struct neigh_table      *next;
        int                     family;
        int                     entry_size;
        int                     key_len;
-       __u32                   (*hash)(const void *pkey, const struct net_device *);
+       __u32                   (*hash)(const void *pkey,
+                                       const struct net_device *dev,
+                                       __u32 hash_rnd);
        int                     (*constructor)(struct neighbour *);
        int                     (*pconstructor)(struct pneigh_entry *);
        void                    (*pdestructor)(struct pneigh_entry *);
@@ -165,9 +174,7 @@ struct neigh_table {
        unsigned long           last_rand;
        struct kmem_cache       *kmem_cachep;
        struct neigh_statistics __percpu *stats;
-       struct neighbour        **hash_buckets;
-       unsigned int            hash_mask;
-       __u32                   hash_rnd;
+       struct neigh_hash_table __rcu *nht;
        struct pneigh_entry     **phash_buckets;
 };
 
@@ -237,6 +244,7 @@ extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_en
 struct neigh_seq_state {
        struct seq_net_private p;
        struct neigh_table *tbl;
+       struct neigh_hash_table *nht;
        void *(*neigh_sub_iter)(struct neigh_seq_state *state,
                                struct neighbour *n, loff_t *pos);
        unsigned int bucket;
index b26ce343072c65a6a1776e1b42d35a0373f40fd6..8d9503ad01daa65729212de33f579b32aeffc1dc 100644 (file)
@@ -25,7 +25,6 @@ struct vlan_priority_tci_mapping {
  *     @rx_multicast: number of received multicast packets
  *     @syncp: synchronization point for 64bit counters
  *     @rx_errors: number of errors
- *     @rx_dropped: number of dropped packets
  */
 struct vlan_rx_stats {
        u64                     rx_packets;
@@ -33,7 +32,6 @@ struct vlan_rx_stats {
        u64                     rx_multicast;
        struct u64_stats_sync   syncp;
        unsigned long           rx_errors;
-       unsigned long           rx_dropped;
 };
 
 /**
index b6d55a9304f2bcb39a3bf9c4ee4f586412532f8e..dee727ce0291ea3cf8887a894fb08185414ba48c 100644 (file)
@@ -33,6 +33,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
        return polling ? netif_receive_skb(skb) : netif_rx(skb);
 
 drop:
+       atomic_long_inc(&skb->dev->rx_dropped);
        dev_kfree_skb_any(skb);
        return NET_RX_DROP;
 }
@@ -123,6 +124,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
        return dev_gro_receive(napi, skb);
 
 drop:
+       atomic_long_inc(&skb->dev->rx_dropped);
        return GRO_DROP;
 }
 
index f6fbcc0f1af9e8694f8c55a5f277bc79d2f86539..f54251edd40dfc09373c1d21e9eb434e19a5a064 100644 (file)
@@ -225,16 +225,15 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
                }
        }
 
-       if (unlikely(netif_rx(skb) == NET_RX_DROP)) {
-               if (rx_stats)
-                       rx_stats->rx_dropped++;
-       }
+       netif_rx(skb);
+
        rcu_read_unlock();
        return NET_RX_SUCCESS;
 
 err_unlock:
        rcu_read_unlock();
 err_free:
+       atomic_long_inc(&dev->rx_dropped);
        kfree_skb(skb);
        return NET_RX_DROP;
 }
@@ -846,15 +845,13 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
                        accum.rx_packets += rxpackets;
                        accum.rx_bytes   += rxbytes;
                        accum.rx_multicast += rxmulticast;
-                       /* rx_errors, rx_dropped are ulong, not protected by syncp */
+                       /* rx_errors is ulong, not protected by syncp */
                        accum.rx_errors  += p->rx_errors;
-                       accum.rx_dropped += p->rx_dropped;
                }
                stats->rx_packets = accum.rx_packets;
                stats->rx_bytes   = accum.rx_bytes;
                stats->rx_errors  = accum.rx_errors;
                stats->multicast  = accum.rx_multicast;
-               stats->rx_dropped = accum.rx_dropped;
        }
        return stats;
 }
index 95fdd1185067a859d49d0f5f5ea246e4fcad0893..ff956d1115bcee4636a68011a38457d87bea3cdd 100644 (file)
@@ -310,9 +310,9 @@ static int clip_constructor(struct neighbour *neigh)
        return 0;
 }
 
-static u32 clip_hash(const void *pkey, const struct net_device *dev)
+static u32 clip_hash(const void *pkey, const struct net_device *dev, __u32 rnd)
 {
-       return jhash_2words(*(u32 *) pkey, dev->ifindex, clip_tbl.hash_rnd);
+       return jhash_2words(*(u32 *) pkey, dev->ifindex, rnd);
 }
 
 static struct neigh_table clip_tbl = {
index ce6ad88c980b8105531916fa3aa00ced83f437bd..7d149550e8d623ee6bee8d961d91d9d4800d90dd 100644 (file)
@@ -1483,8 +1483,9 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
        skb_orphan(skb);
        nf_reset(skb);
 
-       if (!(dev->flags & IFF_UP) ||
-           (skb->len > (dev->mtu + dev->hard_header_len))) {
+       if (unlikely(!(dev->flags & IFF_UP) ||
+                    (skb->len > (dev->mtu + dev->hard_header_len)))) {
+               atomic_long_inc(&dev->rx_dropped);
                kfree_skb(skb);
                return NET_RX_DROP;
        }
@@ -2548,6 +2549,7 @@ enqueue:
 
        local_irq_restore(flags);
 
+       atomic_long_inc(&skb->dev->rx_dropped);
        kfree_skb(skb);
        return NET_RX_DROP;
 }
@@ -2995,6 +2997,7 @@ ncls:
        if (pt_prev) {
                ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
        } else {
+               atomic_long_inc(&skb->dev->rx_dropped);
                kfree_skb(skb);
                /* Jamal, now you will not able to escape explaining
                 * me how you were going to use this. :-)
@@ -5429,14 +5432,14 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
 
        if (ops->ndo_get_stats64) {
                memset(storage, 0, sizeof(*storage));
-               return ops->ndo_get_stats64(dev, storage);
-       }
-       if (ops->ndo_get_stats) {
+               ops->ndo_get_stats64(dev, storage);
+       } else if (ops->ndo_get_stats) {
                netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
-               return storage;
+       } else {
+               netdev_stats_to_stats64(storage, &dev->stats);
+               dev_txq_stats_fold(dev, storage);
        }
-       netdev_stats_to_stats64(storage, &dev->stats);
-       dev_txq_stats_fold(dev, storage);
+       storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
        return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
index cfb7d25c172d60ae57061c345c7526765c93c5a9..21698f8c49ee7a7c0590fc38ab28e4ad7afaf1fd 100644 (file)
@@ -225,7 +225,8 @@ jumped:
                        err = ops->action(rule, fl, flags, arg);
 
                if (err != -EAGAIN) {
-                       if (likely(atomic_inc_not_zero(&rule->refcnt))) {
+                       if ((arg->flags & FIB_LOOKUP_NOREF) ||
+                           likely(atomic_inc_not_zero(&rule->refcnt))) {
                                arg->rule = rule;
                                goto out;
                        }
index b142a0d76072d55d490d5e191b2492ba8682941f..dd8920e4f508fe5b55a1b53c805c9316df2d2c2d 100644 (file)
@@ -131,14 +131,17 @@ static int neigh_forced_gc(struct neigh_table *tbl)
 {
        int shrunk = 0;
        int i;
+       struct neigh_hash_table *nht;
 
        NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
 
        write_lock_bh(&tbl->lock);
-       for (i = 0; i <= tbl->hash_mask; i++) {
+       nht = rcu_dereference_protected(tbl->nht,
+                                       lockdep_is_held(&tbl->lock));
+       for (i = 0; i <= nht->hash_mask; i++) {
                struct neighbour *n, **np;
 
-               np = &tbl->hash_buckets[i];
+               np = &nht->hash_buckets[i];
                while ((n = *np) != NULL) {
                        /* Neighbour record may be discarded if:
                         * - nobody refers to it.
@@ -199,9 +202,13 @@ static void pneigh_queue_purge(struct sk_buff_head *list)
 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
 {
        int i;
+       struct neigh_hash_table *nht;
 
-       for (i = 0; i <= tbl->hash_mask; i++) {
-               struct neighbour *n, **np = &tbl->hash_buckets[i];
+       nht = rcu_dereference_protected(tbl->nht,
+                                       lockdep_is_held(&tbl->lock));
+
+       for (i = 0; i <= nht->hash_mask; i++) {
+               struct neighbour *n, **np = &nht->hash_buckets[i];
 
                while ((n = *np) != NULL) {
                        if (dev && n->dev != dev) {
@@ -297,64 +304,81 @@ out_entries:
        goto out;
 }
 
-static struct neighbour **neigh_hash_alloc(unsigned int entries)
+static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
 {
-       unsigned long size = entries * sizeof(struct neighbour *);
-       struct neighbour **ret;
+       size_t size = entries * sizeof(struct neighbour *);
+       struct neigh_hash_table *ret;
+       struct neighbour **buckets;
 
-       if (size <= PAGE_SIZE) {
-               ret = kzalloc(size, GFP_ATOMIC);
-       } else {
-               ret = (struct neighbour **)
-                     __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
+       ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+       if (!ret)
+               return NULL;
+       if (size <= PAGE_SIZE)
+               buckets = kzalloc(size, GFP_ATOMIC);
+       else
+               buckets = (struct neighbour **)
+                         __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
+                                          get_order(size));
+       if (!buckets) {
+               kfree(ret);
+               return NULL;
        }
+       ret->hash_buckets = buckets;
+       ret->hash_mask = entries - 1;
+       get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
        return ret;
 }
 
-static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
+static void neigh_hash_free_rcu(struct rcu_head *head)
 {
-       unsigned long size = entries * sizeof(struct neighbour *);
+       struct neigh_hash_table *nht = container_of(head,
+                                                   struct neigh_hash_table,
+                                                   rcu);
+       size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
+       struct neighbour **buckets = nht->hash_buckets;
 
        if (size <= PAGE_SIZE)
-               kfree(hash);
+               kfree(buckets);
        else
-               free_pages((unsigned long)hash, get_order(size));
+               free_pages((unsigned long)buckets, get_order(size));
+       kfree(nht);
 }
 
-static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
+static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
+                                               unsigned long new_entries)
 {
-       struct neighbour **new_hash, **old_hash;
-       unsigned int i, new_hash_mask, old_entries;
+       unsigned int i, hash;
+       struct neigh_hash_table *new_nht, *old_nht;
 
        NEIGH_CACHE_STAT_INC(tbl, hash_grows);
 
        BUG_ON(!is_power_of_2(new_entries));
-       new_hash = neigh_hash_alloc(new_entries);
-       if (!new_hash)
-               return;
-
-       old_entries = tbl->hash_mask + 1;
-       new_hash_mask = new_entries - 1;
-       old_hash = tbl->hash_buckets;
+       old_nht = rcu_dereference_protected(tbl->nht,
+                                           lockdep_is_held(&tbl->lock));
+       new_nht = neigh_hash_alloc(new_entries);
+       if (!new_nht)
+               return old_nht;
 
-       get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
-       for (i = 0; i < old_entries; i++) {
+       for (i = 0; i <= old_nht->hash_mask; i++) {
                struct neighbour *n, *next;
 
-               for (n = old_hash[i]; n; n = next) {
-                       unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
+               for (n = old_nht->hash_buckets[i];
+                    n != NULL;
+                    n = next) {
+                       hash = tbl->hash(n->primary_key, n->dev,
+                                        new_nht->hash_rnd);
 
-                       hash_val &= new_hash_mask;
+                       hash &= new_nht->hash_mask;
                        next = n->next;
 
-                       n->next = new_hash[hash_val];
-                       new_hash[hash_val] = n;
+                       n->next = new_nht->hash_buckets[hash];
+                       new_nht->hash_buckets[hash] = n;
                }
        }
-       tbl->hash_buckets = new_hash;
-       tbl->hash_mask = new_hash_mask;
 
-       neigh_hash_free(old_hash, old_entries);
+       rcu_assign_pointer(tbl->nht, new_nht);
+       call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
+       return new_nht;
 }
 
 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
@@ -363,19 +387,23 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
        struct neighbour *n;
        int key_len = tbl->key_len;
        u32 hash_val;
+       struct neigh_hash_table *nht;
 
        NEIGH_CACHE_STAT_INC(tbl, lookups);
 
-       read_lock_bh(&tbl->lock);
-       hash_val = tbl->hash(pkey, dev);
-       for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
+       rcu_read_lock_bh();
+       nht = rcu_dereference_bh(tbl->nht);
+       hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
+       read_lock(&tbl->lock);
+       for (n = nht->hash_buckets[hash_val]; n; n = n->next) {
                if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
                        neigh_hold(n);
                        NEIGH_CACHE_STAT_INC(tbl, hits);
                        break;
                }
        }
-       read_unlock_bh(&tbl->lock);
+       read_unlock(&tbl->lock);
+       rcu_read_unlock_bh();
        return n;
 }
 EXPORT_SYMBOL(neigh_lookup);
@@ -386,12 +414,15 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
        struct neighbour *n;
        int key_len = tbl->key_len;
        u32 hash_val;
+       struct neigh_hash_table *nht;
 
        NEIGH_CACHE_STAT_INC(tbl, lookups);
 
-       read_lock_bh(&tbl->lock);
-       hash_val = tbl->hash(pkey, NULL);
-       for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
+       rcu_read_lock_bh();
+       nht = rcu_dereference_bh(tbl->nht);
+       hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) & nht->hash_mask;
+       read_lock(&tbl->lock);
+       for (n = nht->hash_buckets[hash_val]; n; n = n->next) {
                if (!memcmp(n->primary_key, pkey, key_len) &&
                    net_eq(dev_net(n->dev), net)) {
                        neigh_hold(n);
@@ -399,7 +430,8 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
                        break;
                }
        }
-       read_unlock_bh(&tbl->lock);
+       read_unlock(&tbl->lock);
+       rcu_read_unlock_bh();
        return n;
 }
 EXPORT_SYMBOL(neigh_lookup_nodev);
@@ -411,6 +443,7 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
        int key_len = tbl->key_len;
        int error;
        struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
+       struct neigh_hash_table *nht;
 
        if (!n) {
                rc = ERR_PTR(-ENOBUFS);
@@ -437,18 +470,20 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
        n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
 
        write_lock_bh(&tbl->lock);
+       nht = rcu_dereference_protected(tbl->nht,
+                                       lockdep_is_held(&tbl->lock));
 
-       if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
-               neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
+       if (atomic_read(&tbl->entries) > (nht->hash_mask + 1))
+               nht = neigh_hash_grow(tbl, (nht->hash_mask + 1) << 1);
 
-       hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
+       hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
 
        if (n->parms->dead) {
                rc = ERR_PTR(-EINVAL);
                goto out_tbl_unlock;
        }
 
-       for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
+       for (n1 = nht->hash_buckets[hash_val]; n1; n1 = n1->next) {
                if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
                        neigh_hold(n1);
                        rc = n1;
@@ -456,8 +491,8 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
                }
        }
 
-       n->next = tbl->hash_buckets[hash_val];
-       tbl->hash_buckets[hash_val] = n;
+       n->next = nht->hash_buckets[hash_val];
+       nht->hash_buckets[hash_val] = n;
        n->dead = 0;
        neigh_hold(n);
        write_unlock_bh(&tbl->lock);
@@ -698,10 +733,13 @@ static void neigh_periodic_work(struct work_struct *work)
        struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
        struct neighbour *n, **np;
        unsigned int i;
+       struct neigh_hash_table *nht;
 
        NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
 
        write_lock_bh(&tbl->lock);
+       nht = rcu_dereference_protected(tbl->nht,
+                                       lockdep_is_held(&tbl->lock));
 
        /*
         *      periodically recompute ReachableTime from random function
@@ -715,8 +753,8 @@ static void neigh_periodic_work(struct work_struct *work)
                                neigh_rand_reach_time(p->base_reachable_time);
        }
 
-       for (i = 0 ; i <= tbl->hash_mask; i++) {
-               np = &tbl->hash_buckets[i];
+       for (i = 0 ; i <= nht->hash_mask; i++) {
+               np = &nht->hash_buckets[i];
 
                while ((n = *np) != NULL) {
                        unsigned int state;
@@ -1438,17 +1476,14 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
                panic("cannot create neighbour proc dir entry");
 #endif
 
-       tbl->hash_mask = 1;
-       tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
+       tbl->nht = neigh_hash_alloc(8);
 
        phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
        tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
 
-       if (!tbl->hash_buckets || !tbl->phash_buckets)
+       if (!tbl->nht || !tbl->phash_buckets)
                panic("cannot allocate neighbour cache hashes");
 
-       get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
-
        rwlock_init(&tbl->lock);
        INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
        schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
@@ -1504,8 +1539,8 @@ int neigh_table_clear(struct neigh_table *tbl)
        }
        write_unlock(&neigh_tbl_lock);
 
-       neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
-       tbl->hash_buckets = NULL;
+       call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
+       tbl->nht = NULL;
 
        kfree(tbl->phash_buckets);
        tbl->phash_buckets = NULL;
@@ -1531,6 +1566,7 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        struct net_device *dev = NULL;
        int err = -EINVAL;
 
+       ASSERT_RTNL();
        if (nlmsg_len(nlh) < sizeof(*ndm))
                goto out;
 
@@ -1540,7 +1576,7 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
        ndm = nlmsg_data(nlh);
        if (ndm->ndm_ifindex) {
-               dev = dev_get_by_index(net, ndm->ndm_ifindex);
+               dev = __dev_get_by_index(net, ndm->ndm_ifindex);
                if (dev == NULL) {
                        err = -ENODEV;
                        goto out;
@@ -1556,34 +1592,31 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                read_unlock(&neigh_tbl_lock);
 
                if (nla_len(dst_attr) < tbl->key_len)
-                       goto out_dev_put;
+                       goto out;
 
                if (ndm->ndm_flags & NTF_PROXY) {
                        err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
-                       goto out_dev_put;
+                       goto out;
                }
 
                if (dev == NULL)
-                       goto out_dev_put;
+                       goto out;
 
                neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
                if (neigh == NULL) {
                        err = -ENOENT;
-                       goto out_dev_put;
+                       goto out;
                }
 
                err = neigh_update(neigh, NULL, NUD_FAILED,
                                   NEIGH_UPDATE_F_OVERRIDE |
                                   NEIGH_UPDATE_F_ADMIN);
                neigh_release(neigh);
-               goto out_dev_put;
+               goto out;
        }
        read_unlock(&neigh_tbl_lock);
        err = -EAFNOSUPPORT;
 
-out_dev_put:
-       if (dev)
-               dev_put(dev);
 out:
        return err;
 }
@@ -1597,6 +1630,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
        struct net_device *dev = NULL;
        int err;
 
+       ASSERT_RTNL();
        err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
        if (err < 0)
                goto out;
@@ -1607,14 +1641,14 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
 
        ndm = nlmsg_data(nlh);
        if (ndm->ndm_ifindex) {
-               dev = dev_get_by_index(net, ndm->ndm_ifindex);
+               dev = __dev_get_by_index(net, ndm->ndm_ifindex);
                if (dev == NULL) {
                        err = -ENODEV;
                        goto out;
                }
 
                if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
-                       goto out_dev_put;
+                       goto out;
        }
 
        read_lock(&neigh_tbl_lock);
@@ -1628,7 +1662,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                read_unlock(&neigh_tbl_lock);
 
                if (nla_len(tb[NDA_DST]) < tbl->key_len)
-                       goto out_dev_put;
+                       goto out;
                dst = nla_data(tb[NDA_DST]);
                lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
 
@@ -1641,29 +1675,29 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                                pn->flags = ndm->ndm_flags;
                                err = 0;
                        }
-                       goto out_dev_put;
+                       goto out;
                }
 
                if (dev == NULL)
-                       goto out_dev_put;
+                       goto out;
 
                neigh = neigh_lookup(tbl, dst, dev);
                if (neigh == NULL) {
                        if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
                                err = -ENOENT;
-                               goto out_dev_put;
+                               goto out;
                        }
 
                        neigh = __neigh_lookup_errno(tbl, dst, dev);
                        if (IS_ERR(neigh)) {
                                err = PTR_ERR(neigh);
-                               goto out_dev_put;
+                               goto out;
                        }
                } else {
                        if (nlh->nlmsg_flags & NLM_F_EXCL) {
                                err = -EEXIST;
                                neigh_release(neigh);
-                               goto out_dev_put;
+                               goto out;
                        }
 
                        if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
@@ -1676,15 +1710,11 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
                } else
                        err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
                neigh_release(neigh);
-               goto out_dev_put;
+               goto out;
        }
 
        read_unlock(&neigh_tbl_lock);
        err = -EAFNOSUPPORT;
-
-out_dev_put:
-       if (dev)
-               dev_put(dev);
 out:
        return err;
 }
@@ -1750,18 +1780,22 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
                unsigned long now = jiffies;
                unsigned int flush_delta = now - tbl->last_flush;
                unsigned int rand_delta = now - tbl->last_rand;
-
+               struct neigh_hash_table *nht;
                struct ndt_config ndc = {
                        .ndtc_key_len           = tbl->key_len,
                        .ndtc_entry_size        = tbl->entry_size,
                        .ndtc_entries           = atomic_read(&tbl->entries),
                        .ndtc_last_flush        = jiffies_to_msecs(flush_delta),
                        .ndtc_last_rand         = jiffies_to_msecs(rand_delta),
-                       .ndtc_hash_rnd          = tbl->hash_rnd,
-                       .ndtc_hash_mask         = tbl->hash_mask,
                        .ndtc_proxy_qlen        = tbl->proxy_queue.qlen,
                };
 
+               rcu_read_lock_bh();
+               nht = rcu_dereference_bh(tbl->nht);
+               ndc.ndtc_hash_rnd = nht->hash_rnd;
+               ndc.ndtc_hash_mask = nht->hash_mask;
+               rcu_read_unlock_bh();
+
                NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
        }
 
@@ -2093,14 +2127,18 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
        struct neighbour *n;
        int rc, h, s_h = cb->args[1];
        int idx, s_idx = idx = cb->args[2];
+       struct neigh_hash_table *nht;
 
-       read_lock_bh(&tbl->lock);
-       for (h = 0; h <= tbl->hash_mask; h++) {
+       rcu_read_lock_bh();
+       nht = rcu_dereference_bh(tbl->nht);
+
+       read_lock(&tbl->lock);
+       for (h = 0; h <= nht->hash_mask; h++) {
                if (h < s_h)
                        continue;
                if (h > s_h)
                        s_idx = 0;
-               for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
+               for (n = nht->hash_buckets[h], idx = 0; n; n = n->next) {
                        if (!net_eq(dev_net(n->dev), net))
                                continue;
                        if (idx < s_idx)
@@ -2109,7 +2147,6 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                                            cb->nlh->nlmsg_seq,
                                            RTM_NEWNEIGH,
                                            NLM_F_MULTI) <= 0) {
-                               read_unlock_bh(&tbl->lock);
                                rc = -1;
                                goto out;
                        }
@@ -2117,9 +2154,10 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
                        idx++;
                }
        }
-       read_unlock_bh(&tbl->lock);
        rc = skb->len;
 out:
+       read_unlock(&tbl->lock);
+       rcu_read_unlock_bh();
        cb->args[1] = h;
        cb->args[2] = idx;
        return rc;
@@ -2152,15 +2190,20 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
 {
        int chain;
+       struct neigh_hash_table *nht;
 
-       read_lock_bh(&tbl->lock);
-       for (chain = 0; chain <= tbl->hash_mask; chain++) {
+       rcu_read_lock_bh();
+       nht = rcu_dereference_bh(tbl->nht);
+
+       read_lock(&tbl->lock);
+       for (chain = 0; chain <= nht->hash_mask; chain++) {
                struct neighbour *n;
 
-               for (n = tbl->hash_buckets[chain]; n; n = n->next)
+               for (n = nht->hash_buckets[chain]; n; n = n->next)
                        cb(n, cookie);
        }
-       read_unlock_bh(&tbl->lock);
+       read_unlock(&tbl->lock);
+       rcu_read_unlock_bh();
 }
 EXPORT_SYMBOL(neigh_for_each);
 
@@ -2169,11 +2212,14 @@ void __neigh_for_each_release(struct neigh_table *tbl,
                              int (*cb)(struct neighbour *))
 {
        int chain;
+       struct neigh_hash_table *nht;
 
-       for (chain = 0; chain <= tbl->hash_mask; chain++) {
+       nht = rcu_dereference_protected(tbl->nht,
+                                       lockdep_is_held(&tbl->lock));
+       for (chain = 0; chain <= nht->hash_mask; chain++) {
                struct neighbour *n, **np;
 
-               np = &tbl->hash_buckets[chain];
+               np = &nht->hash_buckets[chain];
                while ((n = *np) != NULL) {
                        int release;
 
@@ -2198,13 +2244,13 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
 {
        struct neigh_seq_state *state = seq->private;
        struct net *net = seq_file_net(seq);
-       struct neigh_table *tbl = state->tbl;
+       struct neigh_hash_table *nht = state->nht;
        struct neighbour *n = NULL;
        int bucket = state->bucket;
 
        state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
-       for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
-               n = tbl->hash_buckets[bucket];
+       for (bucket = 0; bucket <= nht->hash_mask; bucket++) {
+               n = nht->hash_buckets[bucket];
 
                while (n) {
                        if (!net_eq(dev_net(n->dev), net))
@@ -2239,7 +2285,7 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
 {
        struct neigh_seq_state *state = seq->private;
        struct net *net = seq_file_net(seq);
-       struct neigh_table *tbl = state->tbl;
+       struct neigh_hash_table *nht = state->nht;
 
        if (state->neigh_sub_iter) {
                void *v = state->neigh_sub_iter(state, n, pos);
@@ -2270,10 +2316,10 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
                if (n)
                        break;
 
-               if (++state->bucket > tbl->hash_mask)
+               if (++state->bucket > nht->hash_mask)
                        break;
 
-               n = tbl->hash_buckets[state->bucket];
+               n = nht->hash_buckets[state->bucket];
        }
 
        if (n && pos)
@@ -2372,6 +2418,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
 
 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
        __acquires(tbl->lock)
+       __acquires(rcu_bh)
 {
        struct neigh_seq_state *state = seq->private;
 
@@ -2379,8 +2426,9 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
        state->bucket = 0;
        state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
 
-       read_lock_bh(&tbl->lock);
-
+       rcu_read_lock_bh();
+       state->nht = rcu_dereference_bh(tbl->nht);
+       read_lock(&tbl->lock);
        return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
 }
 EXPORT_SYMBOL(neigh_seq_start);
@@ -2414,11 +2462,13 @@ EXPORT_SYMBOL(neigh_seq_next);
 
 void neigh_seq_stop(struct seq_file *seq, void *v)
        __releases(tbl->lock)
+       __releases(rcu_bh)
 {
        struct neigh_seq_state *state = seq->private;
        struct neigh_table *tbl = state->tbl;
 
-       read_unlock_bh(&tbl->lock);
+       read_unlock(&tbl->lock);
+       rcu_read_unlock_bh();
 }
 EXPORT_SYMBOL(neigh_seq_stop);
 
index 0363bb95cc7db606d45dd08bac34340ba48a3a84..a085dbcf5c7fa4fde69419dd135c8c8570bb4fb2 100644 (file)
@@ -48,7 +48,6 @@
 #include <net/dn_neigh.h>
 #include <net/dn_route.h>
 
-static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev);
 static int dn_neigh_construct(struct neighbour *);
 static void dn_long_error_report(struct neighbour *, struct sk_buff *);
 static void dn_short_error_report(struct neighbour *, struct sk_buff *);
@@ -93,6 +92,13 @@ static const struct neigh_ops dn_phase3_ops = {
        .queue_xmit =           dev_queue_xmit
 };
 
+static u32 dn_neigh_hash(const void *pkey,
+                        const struct net_device *dev,
+                        __u32 hash_rnd)
+{
+       return jhash_2words(*(__u16 *)pkey, 0, hash_rnd);
+}
+
 struct neigh_table dn_neigh_table = {
        .family =                       PF_DECnet,
        .entry_size =                   sizeof(struct dn_neigh),
@@ -122,11 +128,6 @@ struct neigh_table dn_neigh_table = {
        .gc_thresh3 =                   1024,
 };
 
-static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev)
-{
-       return jhash_2words(*(__u16 *)pkey, 0, dn_neigh_table.hash_rnd);
-}
-
 static int dn_neigh_construct(struct neighbour *neigh)
 {
        struct net_device *dev = neigh->dev;
index d9031ad67826f99b71f50acd90c91de61ba6d7be..f35309578170960bd8f0f73cac1eeebd8bb4c78d 100644 (file)
@@ -127,7 +127,7 @@ EXPORT_SYMBOL(clip_tbl_hook);
 /*
  *     Interface to generic neighbour cache.
  */
-static u32 arp_hash(const void *pkey, const struct net_device *dev);
+static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 rnd);
 static int arp_constructor(struct neighbour *neigh);
 static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -225,9 +225,11 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
 }
 
 
-static u32 arp_hash(const void *pkey, const struct net_device *dev)
+static u32 arp_hash(const void *pkey,
+                   const struct net_device *dev,
+                   __u32 hash_rnd)
 {
-       return jhash_2words(*(u32 *)pkey, dev->ifindex, arp_tbl.hash_rnd);
+       return jhash_2words(*(u32 *)pkey, dev->ifindex, hash_rnd);
 }
 
 static int arp_constructor(struct neighbour *neigh)
index b05c23b05a9f9b766a4ea31b5e9b8aa54a1934a7..919f2ad19b4973eecfec44b39a9c6376e751ecae 100644 (file)
@@ -168,8 +168,11 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
        struct fib_result res = { 0 };
        struct net_device *dev = NULL;
 
-       if (fib_lookup(net, &fl, &res))
+       rcu_read_lock();
+       if (fib_lookup(net, &fl, &res)) {
+               rcu_read_unlock();
                return NULL;
+       }
        if (res.type != RTN_LOCAL)
                goto out;
        dev = FIB_RES_DEV(res);
@@ -177,7 +180,7 @@ struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
        if (dev && devref)
                dev_hold(dev);
 out:
-       fib_res_put(&res);
+       rcu_read_unlock();
        return dev;
 }
 EXPORT_SYMBOL(__ip_dev_find);
@@ -207,11 +210,12 @@ static inline unsigned __inet_dev_addr_type(struct net *net,
        local_table = fib_get_table(net, RT_TABLE_LOCAL);
        if (local_table) {
                ret = RTN_UNICAST;
-               if (!fib_table_lookup(local_table, &fl, &res)) {
+               rcu_read_lock();
+               if (!fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
                        if (!dev || dev == res.fi->fib_dev)
                                ret = res.type;
-                       fib_res_put(&res);
                }
+               rcu_read_unlock();
        }
        return ret;
 }
@@ -235,6 +239,7 @@ EXPORT_SYMBOL(inet_dev_addr_type);
  * - figure out what "logical" interface this packet arrived
  *   and calculate "specific destination" address.
  * - check, that packet arrived from expected physical interface.
+ * called with rcu_read_lock()
  */
 int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                        struct net_device *dev, __be32 *spec_dst,
@@ -259,7 +264,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
        struct net *net;
 
        no_addr = rpf = accept_local = 0;
-       rcu_read_lock();
        in_dev = __in_dev_get_rcu(dev);
        if (in_dev) {
                no_addr = in_dev->ifa_list == NULL;
@@ -268,7 +272,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                if (mark && !IN_DEV_SRC_VMARK(in_dev))
                        fl.mark = 0;
        }
-       rcu_read_unlock();
 
        if (in_dev == NULL)
                goto e_inval;
@@ -278,7 +281,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                goto last_resort;
        if (res.type != RTN_UNICAST) {
                if (res.type != RTN_LOCAL || !accept_local)
-                       goto e_inval_res;
+                       goto e_inval;
        }
        *spec_dst = FIB_RES_PREFSRC(res);
        fib_combine_itag(itag, &res);
@@ -299,10 +302,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
 #endif
        if (dev_match) {
                ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
-               fib_res_put(&res);
                return ret;
        }
-       fib_res_put(&res);
        if (no_addr)
                goto last_resort;
        if (rpf == 1)
@@ -315,7 +316,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                        *spec_dst = FIB_RES_PREFSRC(res);
                        ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
                }
-               fib_res_put(&res);
        }
        return ret;
 
@@ -326,8 +326,6 @@ last_resort:
        *itag = 0;
        return 0;
 
-e_inval_res:
-       fib_res_put(&res);
 e_inval:
        return -EINVAL;
 e_rpf:
@@ -873,15 +871,16 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
                local_bh_disable();
 
                frn->tb_id = tb->tb_id;
-               frn->err = fib_table_lookup(tb, &fl, &res);
+               rcu_read_lock();
+               frn->err = fib_table_lookup(tb, &fl, &res, FIB_LOOKUP_NOREF);
 
                if (!frn->err) {
                        frn->prefixlen = res.prefixlen;
                        frn->nh_sel = res.nh_sel;
                        frn->type = res.type;
                        frn->scope = res.scope;
-                       fib_res_put(&res);
                }
+               rcu_read_unlock();
                local_bh_enable();
        }
 }
index 4ed7e0dea1bc0e8b33bb1f094e712b59fb6715ad..83cca68e259c420b8c84c1a520e3c9b3fbe31c5c 100644 (file)
@@ -244,7 +244,8 @@ fn_new_zone(struct fn_hash *table, int z)
 }
 
 int fib_table_lookup(struct fib_table *tb,
-                    const struct flowi *flp, struct fib_result *res)
+                    const struct flowi *flp, struct fib_result *res,
+                    int fib_flags)
 {
        int err;
        struct fn_zone *fz;
@@ -264,7 +265,7 @@ int fib_table_lookup(struct fib_table *tb,
 
                        err = fib_semantic_match(&f->fn_alias,
                                                 flp, res,
-                                                fz->fz_order);
+                                                fz->fz_order, fib_flags);
                        if (err <= 0)
                                goto out;
                }
index 637b133973bd88e601aee3b833de0efed5b2a017..b9c9a9f2aee54f50f14f7594f119f46b3631fe3e 100644 (file)
@@ -22,7 +22,7 @@ struct fib_alias {
 /* Exported by fib_semantics.c */
 extern int fib_semantic_match(struct list_head *head,
                              const struct flowi *flp,
-                             struct fib_result *res, int prefixlen);
+                             struct fib_result *res, int prefixlen, int fib_flags);
 extern void fib_release_info(struct fib_info *);
 extern struct fib_info *fib_create_info(struct fib_config *cfg);
 extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
index 32300521e32c49f3e5675104f8416e2da3bb2d7d..7981a24f5c7b3b51e5c23c825f98e12b02dfcd87 100644 (file)
@@ -57,6 +57,7 @@ int fib_lookup(struct net *net, struct flowi *flp, struct fib_result *res)
 {
        struct fib_lookup_arg arg = {
                .result = res,
+               .flags = FIB_LOOKUP_NOREF,
        };
        int err;
 
@@ -94,7 +95,7 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
        if (!tbl)
                goto errout;
 
-       err = fib_table_lookup(tbl, flp, (struct fib_result *) arg->result);
+       err = fib_table_lookup(tbl, flp, (struct fib_result *) arg->result, arg->flags);
        if (err > 0)
                err = -EAGAIN;
 errout:
index ba52f399a898915cc109bd6338c82b6c6ad487ef..0f80dfc2f7fb49a4336329b48935aa937669351e 100644 (file)
@@ -148,6 +148,13 @@ static const struct
 
 /* Release a nexthop info record */
 
+static void free_fib_info_rcu(struct rcu_head *head)
+{
+       struct fib_info *fi = container_of(head, struct fib_info, rcu);
+
+       kfree(fi);
+}
+
 void free_fib_info(struct fib_info *fi)
 {
        if (fi->fib_dead == 0) {
@@ -161,7 +168,7 @@ void free_fib_info(struct fib_info *fi)
        } endfor_nexthops(fi);
        fib_info_cnt--;
        release_net(fi->fib_net);
-       kfree(fi);
+       call_rcu(&fi->rcu, free_fib_info_rcu);
 }
 
 void fib_release_info(struct fib_info *fi)
@@ -553,6 +560,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                        nh->nh_scope = RT_SCOPE_LINK;
                        return 0;
                }
+               rcu_read_lock();
                {
                        struct flowi fl = {
                                .nl_u = {
@@ -568,8 +576,10 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                        if (fl.fl4_scope < RT_SCOPE_LINK)
                                fl.fl4_scope = RT_SCOPE_LINK;
                        err = fib_lookup(net, &fl, &res);
-                       if (err)
+                       if (err) {
+                               rcu_read_unlock();
                                return err;
+                       }
                }
                err = -EINVAL;
                if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
@@ -585,7 +595,7 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
                        goto out;
                err = 0;
 out:
-               fib_res_put(&res);
+               rcu_read_unlock();
                return err;
        } else {
                struct in_device *in_dev;
@@ -879,7 +889,7 @@ failure:
 
 /* Note! fib_semantic_match intentionally uses  RCU list functions. */
 int fib_semantic_match(struct list_head *head, const struct flowi *flp,
-                      struct fib_result *res, int prefixlen)
+                      struct fib_result *res, int prefixlen, int fib_flags)
 {
        struct fib_alias *fa;
        int nh_sel = 0;
@@ -943,7 +953,8 @@ out_fill_res:
        res->type = fa->fa_type;
        res->scope = fa->fa_scope;
        res->fi = fa->fa_info;
-       atomic_inc(&res->fi->fib_clntref);
+       if (!(fib_flags & FIB_LOOKUP_NOREF))
+               atomic_inc(&res->fi->fib_clntref);
        return 0;
 }
 
index a96e5ec211a027398f243ec8c7ac99346b0d7082..271c89bdf049706dbc8288457c308f437a427205 100644 (file)
@@ -1342,7 +1342,7 @@ err:
 /* should be called with rcu_read_lock */
 static int check_leaf(struct trie *t, struct leaf *l,
                      t_key key,  const struct flowi *flp,
-                     struct fib_result *res)
+                     struct fib_result *res, int fib_flags)
 {
        struct leaf_info *li;
        struct hlist_head *hhead = &l->list;
@@ -1356,7 +1356,7 @@ static int check_leaf(struct trie *t, struct leaf *l,
                if (l->key != (key & ntohl(mask)))
                        continue;
 
-               err = fib_semantic_match(&li->falh, flp, res, plen);
+               err = fib_semantic_match(&li->falh, flp, res, plen, fib_flags);
 
 #ifdef CONFIG_IP_FIB_TRIE_STATS
                if (err <= 0)
@@ -1372,7 +1372,7 @@ static int check_leaf(struct trie *t, struct leaf *l,
 }
 
 int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
-                    struct fib_result *res)
+                    struct fib_result *res, int fib_flags)
 {
        struct trie *t = (struct trie *) tb->tb_data;
        int ret;
@@ -1399,7 +1399,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
 
        /* Just a leaf? */
        if (IS_LEAF(n)) {
-               ret = check_leaf(t, (struct leaf *)n, key, flp, res);
+               ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
                goto found;
        }
 
@@ -1424,7 +1424,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
                }
 
                if (IS_LEAF(n)) {
-                       ret = check_leaf(t, (struct leaf *)n, key, flp, res);
+                       ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
                        if (ret > 0)
                                goto backtrace;
                        goto found;
index 2a4bb76f2132957da25326ce98653b249d9ccaaf..25f339672b2891347ffed75adc3d91fb6486598b 100644 (file)
@@ -1269,14 +1269,14 @@ void ip_mc_rejoin_group(struct ip_mc_list *im)
        if (im->multiaddr == IGMP_ALL_HOSTS)
                return;
 
-       if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
-               igmp_mod_timer(im, IGMP_Initial_Report_Delay);
-               return;
-       }
-       /* else, v3 */
-       im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
-               IGMP_Unsolicited_Report_Count;
-       igmp_ifc_event(in_dev);
+       /* a failover is happening and switches
+        * must be notified immediately */
+       if (IGMP_V1_SEEN(in_dev))
+               igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
+       else if (IGMP_V2_SEEN(in_dev))
+               igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
+       else
+               igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
 #endif
 }
 EXPORT_SYMBOL(ip_mc_rejoin_group);
index fbe2c473a06a05ca82b0f0d2fff935924aa114e5..9d421f4cf3efbf41f52d1bd9d0a43c5b6aa2313d 100644 (file)
@@ -679,8 +679,7 @@ static int ipgre_rcv(struct sk_buff *skb)
                skb_reset_network_header(skb);
                ipgre_ecn_decapsulate(iph, skb);
 
-               if (netif_rx(skb) == NET_RX_DROP)
-                       tunnel->dev->stats.rx_dropped++;
+               netif_rx(skb);
 
                rcu_read_unlock();
                return 0;
index 6ad46c28ede210b822bf0e9585e130beaccebf05..e9b816e6cd73a681ea02e9540d6849eb924f4a8a 100644 (file)
@@ -414,8 +414,7 @@ static int ipip_rcv(struct sk_buff *skb)
 
                ipip_ecn_decapsulate(iph, skb);
 
-               if (netif_rx(skb) == NET_RX_DROP)
-                       tunnel->dev->stats.rx_dropped++;
+               netif_rx(skb);
 
                rcu_read_unlock();
                return 0;
index 04e0df82b88cde2573fe30e380a1c5d0ec50f0da..7864d0c489683e985a1a900b66ad1635557dfbbe 100644 (file)
@@ -1773,12 +1773,15 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
 
        if (rt->fl.iif == 0)
                src = rt->rt_src;
-       else if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0) {
-               src = FIB_RES_PREFSRC(res);
-               fib_res_put(&res);
-       } else
-               src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
+       else {
+               rcu_read_lock();
+               if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0)
+                       src = FIB_RES_PREFSRC(res);
+               else
+                       src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
                                        RT_SCOPE_UNIVERSE);
+               rcu_read_unlock();
+       }
        memcpy(addr, &src, 4);
 }
 
@@ -2081,6 +2084,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
  *     Such approach solves two big problems:
  *     1. Not simplex devices are handled properly.
  *     2. IP spoofing attempts are filtered with 100% of guarantee.
+ *     called with rcu_read_lock()
  */
 
 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2102,7 +2106,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        unsigned        hash;
        __be32          spec_dst;
        int             err = -EINVAL;
-       int             free_res = 0;
        struct net    * net = dev_net(dev);
 
        /* IP on this device is disabled. */
@@ -2134,12 +2137,12 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        /*
         *      Now we are ready to route packet.
         */
-       if ((err = fib_lookup(net, &fl, &res)) != 0) {
+       err = fib_lookup(net, &fl, &res);
+       if (err != 0) {
                if (!IN_DEV_FORWARD(in_dev))
                        goto e_hostunreach;
                goto no_route;
        }
-       free_res = 1;
 
        RT_CACHE_STAT_INC(in_slow_tot);
 
@@ -2148,8 +2151,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 
        if (res.type == RTN_LOCAL) {
                err = fib_validate_source(saddr, daddr, tos,
-                                            net->loopback_dev->ifindex,
-                                            dev, &spec_dst, &itag, skb->mark);
+                                         net->loopback_dev->ifindex,
+                                         dev, &spec_dst, &itag, skb->mark);
                if (err < 0)
                        goto martian_source_keep_err;
                if (err)
@@ -2164,9 +2167,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                goto martian_destination;
 
        err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
-done:
-       if (free_res)
-               fib_res_put(&res);
 out:   return err;
 
 brd_input:
@@ -2226,7 +2226,7 @@ local_input:
        rth->rt_type    = res.type;
        hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
        err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
-       goto done;
+       goto out;
 
 no_route:
        RT_CACHE_STAT_INC(in_no_route);
@@ -2249,21 +2249,21 @@ martian_destination:
 
 e_hostunreach:
        err = -EHOSTUNREACH;
-       goto done;
+       goto out;
 
 e_inval:
        err = -EINVAL;
-       goto done;
+       goto out;
 
 e_nobufs:
        err = -ENOBUFS;
-       goto done;
+       goto out;
 
 martian_source:
        err = -EINVAL;
 martian_source_keep_err:
        ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
-       goto done;
+       goto out;
 }
 
 int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2349,6 +2349,7 @@ skip_cache:
 }
 EXPORT_SYMBOL(ip_route_input_common);
 
+/* called with rcu_read_lock() */
 static int __mkroute_output(struct rtable **result,
                            struct fib_result *res,
                            const struct flowi *fl,
@@ -2373,18 +2374,13 @@ static int __mkroute_output(struct rtable **result,
        if (dev_out->flags & IFF_LOOPBACK)
                flags |= RTCF_LOCAL;
 
-       rcu_read_lock();
        in_dev = __in_dev_get_rcu(dev_out);
-       if (!in_dev) {
-               rcu_read_unlock();
+       if (!in_dev)
                return -EINVAL;
-       }
+
        if (res->type == RTN_BROADCAST) {
                flags |= RTCF_BROADCAST | RTCF_LOCAL;
-               if (res->fi) {
-                       fib_info_put(res->fi);
-                       res->fi = NULL;
-               }
+               res->fi = NULL;
        } else if (res->type == RTN_MULTICAST) {
                flags |= RTCF_MULTICAST | RTCF_LOCAL;
                if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
@@ -2394,10 +2390,8 @@ static int __mkroute_output(struct rtable **result,
                 * default one, but do not gateway in this case.
                 * Yes, it is hack.
                 */
-               if (res->fi && res->prefixlen < 4) {
-                       fib_info_put(res->fi);
+               if (res->fi && res->prefixlen < 4)
                        res->fi = NULL;
-               }
        }
 
 
@@ -2467,6 +2461,7 @@ static int __mkroute_output(struct rtable **result,
        return 0;
 }
 
+/* called with rcu_read_lock() */
 static int ip_mkroute_output(struct rtable **rp,
                             struct fib_result *res,
                             const struct flowi *fl,
@@ -2509,7 +2504,6 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
        struct fib_result res;
        unsigned int flags = 0;
        struct net_device *dev_out = NULL;
-       int free_res = 0;
        int err;
 
 
@@ -2636,15 +2630,12 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
                err = -ENETUNREACH;
                goto out;
        }
-       free_res = 1;
 
        if (res.type == RTN_LOCAL) {
                if (!fl.fl4_src)
                        fl.fl4_src = fl.fl4_dst;
                dev_out = net->loopback_dev;
                fl.oif = dev_out->ifindex;
-               if (res.fi)
-                       fib_info_put(res.fi);
                res.fi = NULL;
                flags |= RTCF_LOCAL;
                goto make_route;
@@ -2668,8 +2659,6 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
 make_route:
        err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
 
-       if (free_res)
-               fib_res_put(&res);
 out:   return err;
 }
 
index 8be3c452af9096e54c9736386c1c9d24b1328c36..c2c0f89397b1164bacefdb449cd2b97dbe41d66c 100644 (file)
@@ -768,8 +768,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
 
                dscp_ecn_decapsulate(t, ipv6h, skb);
 
-               if (netif_rx(skb) == NET_RX_DROP)
-                       t->dev->stats.rx_dropped++;
+               netif_rx(skb);
 
                rcu_read_unlock();
                return 0;
index 2640c9be589dd62805dab83ed910ba4cbad694d6..6f32ffce7022c198e4c78c9126c3df90026697c2 100644 (file)
@@ -666,8 +666,7 @@ static int pim6_rcv(struct sk_buff *skb)
 
        skb_tunnel_rx(skb, reg_dev);
 
-       if (netif_rx(skb) == NET_RX_DROP)
-               reg_dev->stats.rx_dropped++;
+       netif_rx(skb);
 
        dev_put(reg_dev);
        return 0;
index b3dd844cd34fea9d53f917006b395c2e9f30e46a..998d6d27e7cf293383b7c4e944899790170ba690 100644 (file)
@@ -91,7 +91,9 @@
 #include <linux/netfilter.h>
 #include <linux/netfilter_ipv6.h>
 
-static u32 ndisc_hash(const void *pkey, const struct net_device *dev);
+static u32 ndisc_hash(const void *pkey,
+                     const struct net_device *dev,
+                     __u32 rnd);
 static int ndisc_constructor(struct neighbour *neigh);
 static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
 static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -350,7 +352,9 @@ int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int d
 
 EXPORT_SYMBOL(ndisc_mc_map);
 
-static u32 ndisc_hash(const void *pkey, const struct net_device *dev)
+static u32 ndisc_hash(const void *pkey,
+                     const struct net_device *dev,
+                     __u32 hash_rnd)
 {
        const u32 *p32 = pkey;
        u32 addr_hash, i;
@@ -359,7 +363,7 @@ static u32 ndisc_hash(const void *pkey, const struct net_device *dev)
        for (i = 0; i < (sizeof(struct in6_addr) / sizeof(u32)); i++)
                addr_hash ^= *p32++;
 
-       return jhash_2words(addr_hash, dev->ifindex, nd_tbl.hash_rnd);
+       return jhash_2words(addr_hash, dev->ifindex, hash_rnd);
 }
 
 static int ndisc_constructor(struct neighbour *neigh)
index d7701782b6391a03bd04098d5d8b749d81fff4bb..367a6cc584ccc40bb1c5a7fc03022bd89f304d10 100644 (file)
@@ -600,8 +600,7 @@ static int ipip6_rcv(struct sk_buff *skb)
 
                ipip6_ecn_decapsulate(iph, skb);
 
-               if (netif_rx(skb) == NET_RX_DROP)
-                       tunnel->dev->stats.rx_dropped++;
+               netif_rx(skb);
 
                rcu_read_unlock();
                return 0;
index c586da3f4f188f0b723cf43e77b431b292ab7b2e..0ebc777a66601e05d48cc8805e57cb957d9512d4 100644 (file)
@@ -1511,6 +1511,8 @@ restart:
                goto restart;
        }
 
+       if (sock_flag(other, SOCK_RCVTSTAMP))
+               __net_timestamp(skb);
        skb_queue_tail(&other->sk_receive_queue, skb);
        unix_state_unlock(other);
        other->sk_data_ready(other, len);
@@ -1722,6 +1724,9 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
        if (err)
                goto out_free;
 
+       if (sock_flag(sk, SOCK_RCVTSTAMP))
+               __sock_recv_timestamp(msg, sk, skb);
+
        if (!siocb->scm) {
                siocb->scm = &tmp_scm;
                memset(&tmp_scm, 0, sizeof(tmp_scm));