net: add a noref bit on skb dst
[firefly-linux-kernel-4.4.55.git] / net / core / dev.c
index 3daee30a7c823529ac03c1e05f74b7571a642506..6c820650b80fbe63105303eed14d92fc0115a5a8 100644 (file)
@@ -1454,7 +1454,7 @@ void net_disable_timestamp(void)
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
-static inline void net_timestamp(struct sk_buff *skb)
+static inline void net_timestamp_set(struct sk_buff *skb)
 {
        if (atomic_read(&netstamp_needed))
                __net_timestamp(skb);
@@ -1462,6 +1462,12 @@ static inline void net_timestamp(struct sk_buff *skb)
                skb->tstamp.tv64 = 0;
 }
 
+static inline void net_timestamp_check(struct sk_buff *skb)
+{
+       if (!skb->tstamp.tv64 && atomic_read(&netstamp_needed))
+               __net_timestamp(skb);
+}
+
 /**
  * dev_forward_skb - loopback an skb to another netif
  *
@@ -1508,9 +1514,9 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 
 #ifdef CONFIG_NET_CLS_ACT
        if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
-               net_timestamp(skb);
+               net_timestamp_set(skb);
 #else
-       net_timestamp(skb);
+       net_timestamp_set(skb);
 #endif
 
        rcu_read_lock();
@@ -2046,6 +2052,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                 * waiting to be sent out; and the qdisc is not running -
                 * xmit the skb directly.
                 */
+               if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
+                       skb_dst_force(skb);
                __qdisc_update_bstats(q, skb->len);
                if (sch_direct_xmit(skb, q, dev, txq, root_lock))
                        __qdisc_run(q);
@@ -2054,6 +2062,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
 
                rc = NET_XMIT_SUCCESS;
        } else {
+               skb_dst_force(skb);
                rc = qdisc_enqueue_root(skb, q);
                qdisc_run(q);
        }
@@ -2201,6 +2210,7 @@ EXPORT_SYMBOL(dev_queue_xmit);
   =======================================================================*/
 
 int netdev_max_backlog __read_mostly = 1000;
+int netdev_tstamp_prequeue __read_mostly = 1;
 int netdev_budget __read_mostly = 300;
 int weight_p __read_mostly = 64;            /* old backlog weight */
 
@@ -2425,8 +2435,10 @@ enqueue:
                        return NET_RX_SUCCESS;
                }
 
-               /* Schedule NAPI for backlog device */
-               if (napi_schedule_prep(&sd->backlog)) {
+               /* Schedule NAPI for backlog device
+                * We can use non atomic operation since we own the queue lock
+                */
+               if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
                        if (!rps_ipi_queued(sd))
                                ____napi_schedule(sd, &sd->backlog);
                }
@@ -2465,8 +2477,8 @@ int netif_rx(struct sk_buff *skb)
        if (netpoll_rx(skb))
                return NET_RX_DROP;
 
-       if (!skb->tstamp.tv64)
-               net_timestamp(skb);
+       if (netdev_tstamp_prequeue)
+               net_timestamp_check(skb);
 
 #ifdef CONFIG_RPS
        {
@@ -2612,7 +2624,8 @@ static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
 #endif
 
 #if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
-struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
+struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p,
+                                            struct sk_buff *skb) __read_mostly;
 EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
 
 static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
@@ -2620,14 +2633,17 @@ static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
                                             int *ret,
                                             struct net_device *orig_dev)
 {
-       if (skb->dev->macvlan_port == NULL)
+       struct macvlan_port *port;
+
+       port = rcu_dereference(skb->dev->macvlan_port);
+       if (!port)
                return skb;
 
        if (*pt_prev) {
                *ret = deliver_skb(skb, *pt_prev, orig_dev);
                *pt_prev = NULL;
        }
-       return macvlan_handle_frame_hook(skb);
+       return macvlan_handle_frame_hook(port, skb);
 }
 #else
 #define handle_macvlan(skb, pt_prev, ret, orig_dev)    (skb)
@@ -2787,8 +2803,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
        int ret = NET_RX_DROP;
        __be16 type;
 
-       if (!skb->tstamp.tv64)
-               net_timestamp(skb);
+       if (!netdev_tstamp_prequeue)
+               net_timestamp_check(skb);
 
        if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
                return NET_RX_SUCCESS;
@@ -2906,23 +2922,28 @@ out:
  */
 int netif_receive_skb(struct sk_buff *skb)
 {
+       if (netdev_tstamp_prequeue)
+               net_timestamp_check(skb);
+
 #ifdef CONFIG_RPS
-       struct rps_dev_flow voidflow, *rflow = &voidflow;
-       int cpu, ret;
+       {
+               struct rps_dev_flow voidflow, *rflow = &voidflow;
+               int cpu, ret;
 
-       rcu_read_lock();
+               rcu_read_lock();
 
-       cpu = get_rps_cpu(skb->dev, skb, &rflow);
+               cpu = get_rps_cpu(skb->dev, skb, &rflow);
 
-       if (cpu >= 0) {
-               ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
-               rcu_read_unlock();
-       } else {
-               rcu_read_unlock();
-               ret = __netif_receive_skb(skb);
-       }
+               if (cpu >= 0) {
+                       ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+                       rcu_read_unlock();
+               } else {
+                       rcu_read_unlock();
+                       ret = __netif_receive_skb(skb);
+               }
 
-       return ret;
+               return ret;
+       }
 #else
        return __netif_receive_skb(skb);
 #endif