net: fix saving TX flow hash in sock for outgoing connections
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / udp_offload.c
index 59035bc3008d2bc45ec87e9eea4145120bf664be..6480cea7aa53a6df85a970279b82e3b9ed5a33e6 100644 (file)
@@ -25,30 +25,11 @@ struct udp_offload_priv {
        struct udp_offload_priv __rcu *next;
 };
 
-static int udp4_ufo_send_check(struct sk_buff *skb)
-{
-       if (!pskb_may_pull(skb, sizeof(struct udphdr)))
-               return -EINVAL;
-
-       if (likely(!skb->encapsulation)) {
-               const struct iphdr *iph;
-               struct udphdr *uh;
-
-               iph = ip_hdr(skb);
-               uh = udp_hdr(skb);
-
-               uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
-                               IPPROTO_UDP, 0);
-               skb->csum_start = skb_transport_header(skb) - skb->head;
-               skb->csum_offset = offsetof(struct udphdr, check);
-               skb->ip_summed = CHECKSUM_PARTIAL;
-       }
-
-       return 0;
-}
-
-struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
-                                      netdev_features_t features)
+static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
+       netdev_features_t features,
+       struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
+                                            netdev_features_t features),
+       __be16 new_protocol)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        u16 mac_offset = skb->mac_header;
@@ -70,15 +51,15 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
        skb_reset_mac_header(skb);
        skb_set_network_header(skb, skb_inner_network_offset(skb));
        skb->mac_len = skb_inner_network_offset(skb);
-       skb->protocol = htons(ETH_P_TEB);
+       skb->protocol = new_protocol;
 
        need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
        if (need_csum)
                skb->encap_hdr_csum = 1;
 
        /* segment inner packet. */
-       enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
-       segs = skb_mac_gso_segment(skb, enc_features);
+       enc_features = skb->dev->hw_enc_features & features;
+       segs = gso_inner_segment(skb, enc_features);
        if (IS_ERR_OR_NULL(segs)) {
                skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
                                     mac_len);
@@ -123,21 +104,63 @@ out:
        return segs;
 }
 
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+                                      netdev_features_t features,
+                                      bool is_ipv6)
+{
+       __be16 protocol = skb->protocol;
+       const struct net_offload **offloads;
+       const struct net_offload *ops;
+       struct sk_buff *segs = ERR_PTR(-EINVAL);
+       struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
+                                            netdev_features_t features);
+
+       rcu_read_lock();
+
+       switch (skb->inner_protocol_type) {
+       case ENCAP_TYPE_ETHER:
+               protocol = skb->inner_protocol;
+               gso_inner_segment = skb_mac_gso_segment;
+               break;
+       case ENCAP_TYPE_IPPROTO:
+               offloads = is_ipv6 ? inet6_offloads : inet_offloads;
+               ops = rcu_dereference(offloads[skb->inner_ipproto]);
+               if (!ops || !ops->callbacks.gso_segment)
+                       goto out_unlock;
+               gso_inner_segment = ops->callbacks.gso_segment;
+               break;
+       default:
+               goto out_unlock;
+       }
+
+       segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
+                                       protocol);
+
+out_unlock:
+       rcu_read_unlock();
+
+       return segs;
+}
+
 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
                                         netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        unsigned int mss;
-       int offset;
        __wsum csum;
+       struct udphdr *uh;
+       struct iphdr *iph;
 
        if (skb->encapsulation &&
            (skb_shinfo(skb)->gso_type &
             (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
-               segs = skb_udp_tunnel_segment(skb, features);
+               segs = skb_udp_tunnel_segment(skb, features, false);
                goto out;
        }
 
+       if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+               goto out;
+
        mss = skb_shinfo(skb)->gso_size;
        if (unlikely(skb->len <= mss))
                goto out;
@@ -165,10 +188,16 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
         * HW cannot do checksum of UDP packets sent as multiple
         * IP fragments.
         */
-       offset = skb_checksum_start_offset(skb);
-       csum = skb_checksum(skb, offset, skb->len - offset, 0);
-       offset += skb->csum_offset;
-       *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+
+       uh = udp_hdr(skb);
+       iph = ip_hdr(skb);
+
+       uh->check = 0;
+       csum = skb_checksum(skb, 0, skb->len, 0);
+       uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
+       if (uh->check == 0)
+               uh->check = CSUM_MANGLED_0;
+
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Fragment the skb. IP headers of the fragments are updated in
@@ -228,30 +257,24 @@ unlock:
 }
 EXPORT_SYMBOL(udp_del_offload);
 
-static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+                                struct udphdr *uh)
 {
        struct udp_offload_priv *uo_priv;
        struct sk_buff *p, **pp = NULL;
-       struct udphdr *uh, *uh2;
-       unsigned int hlen, off;
+       struct udphdr *uh2;
+       unsigned int off = skb_gro_offset(skb);
        int flush = 1;
 
        if (NAPI_GRO_CB(skb)->udp_mark ||
-           (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE))
+           (skb->ip_summed != CHECKSUM_PARTIAL &&
+            NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+            !NAPI_GRO_CB(skb)->csum_valid))
                goto out;
 
        /* mark that this skb passed once through the udp gro layer */
        NAPI_GRO_CB(skb)->udp_mark = 1;
 
-       off  = skb_gro_offset(skb);
-       hlen = off + sizeof(*uh);
-       uh   = skb_gro_header_fast(skb, off);
-       if (skb_gro_header_hard(skb, hlen)) {
-               uh = skb_gro_header_slow(skb, hlen, off);
-               if (unlikely(!uh))
-                       goto out;
-       }
-
        rcu_read_lock();
        uo_priv = rcu_dereference(udp_offload_base);
        for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
@@ -269,7 +292,12 @@ unflush:
                        continue;
 
                uh2 = (struct udphdr   *)(p->data + off);
-               if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
+
+               /* Match ports and either checksums are either both zero
+                * or nonzero.
+                */
+               if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
+                   (!uh->check ^ !uh2->check)) {
                        NAPI_GRO_CB(p)->same_flow = 0;
                        continue;
                }
@@ -277,6 +305,7 @@ unflush:
 
        skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
        skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
+       NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
        pp = uo_priv->offload->callbacks.gro_receive(head, skb);
 
 out_unlock:
@@ -286,7 +315,34 @@ out:
        return pp;
 }
 
-static int udp_gro_complete(struct sk_buff *skb, int nhoff)
+static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
+                                        struct sk_buff *skb)
+{
+       struct udphdr *uh = udp_gro_udphdr(skb);
+
+       if (unlikely(!uh))
+               goto flush;
+
+       /* Don't bother verifying checksum if we're going to flush anyway. */
+       if (NAPI_GRO_CB(skb)->flush)
+               goto skip;
+
+       if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
+                                                inet_gro_compute_pseudo))
+               goto flush;
+       else if (uh->check)
+               skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+                                            inet_gro_compute_pseudo);
+skip:
+       NAPI_GRO_CB(skb)->is_ipv6 = 0;
+       return udp_gro_receive(head, skb, uh);
+
+flush:
+       NAPI_GRO_CB(skb)->flush = 1;
+       return NULL;
+}
+
+int udp_gro_complete(struct sk_buff *skb, int nhoff)
 {
        struct udp_offload_priv *uo_priv;
        __be16 newlen = htons(skb->len - nhoff);
@@ -304,19 +360,32 @@ static int udp_gro_complete(struct sk_buff *skb, int nhoff)
                        break;
        }
 
-       if (uo_priv != NULL)
+       if (uo_priv != NULL) {
+               NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
                err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
+       }
 
        rcu_read_unlock();
        return err;
 }
 
+static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
+
+       if (uh->check)
+               uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
+                                         iph->daddr, 0);
+
+       return udp_gro_complete(skb, nhoff);
+}
+
 static const struct net_offload udpv4_offload = {
        .callbacks = {
-               .gso_send_check = udp4_ufo_send_check,
                .gso_segment = udp4_ufo_fragment,
-               .gro_receive  = udp_gro_receive,
-               .gro_complete = udp_gro_complete,
+               .gro_receive  = udp4_gro_receive,
+               .gro_complete = udp4_gro_complete,
        },
 };