3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/ipv4/udp.c
11 * Hideaki YOSHIFUJI : sin6_scope_id support
12 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
13 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
14 * a single port at the same time.
15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
16 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/in6.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/ipv6.h>
33 #include <linux/icmpv6.h>
34 #include <linux/init.h>
35 #include <linux/module.h>
36 #include <linux/skbuff.h>
37 #include <linux/slab.h>
38 #include <asm/uaccess.h>
40 #include <net/ndisc.h>
41 #include <net/protocol.h>
42 #include <net/transp_v6.h>
43 #include <net/ip6_route.h>
45 #include <net/tcp_states.h>
46 #include <net/ip6_checksum.h>
48 #include <net/inet6_hashtables.h>
49 #include <net/busy_poll.h>
51 #include <linux/proc_fs.h>
52 #include <linux/seq_file.h>
53 #include <trace/events/skb.h>
56 static u32 udp6_ehashfn(const struct net *net,
57 const struct in6_addr *laddr,
59 const struct in6_addr *faddr,
62 static u32 udp6_ehash_secret __read_mostly;
63 static u32 udp_ipv6_hash_secret __read_mostly;
67 net_get_random_once(&udp6_ehash_secret,
68 sizeof(udp6_ehash_secret));
69 net_get_random_once(&udp_ipv6_hash_secret,
70 sizeof(udp_ipv6_hash_secret));
72 lhash = (__force u32)laddr->s6_addr32[3];
73 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
75 return __inet6_ehashfn(lhash, lport, fhash, fport,
76 udp_ipv6_hash_secret + net_hash_mix(net));
79 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
81 const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
82 int sk2_ipv6only = inet_v6_ipv6only(sk2);
83 int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
84 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
86 /* if both are mapped, treat as IPv4 */
87 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
88 return (!sk2_ipv6only &&
89 (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
90 sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
92 if (addr_type2 == IPV6_ADDR_ANY &&
93 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
96 if (addr_type == IPV6_ADDR_ANY &&
97 !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
100 if (sk2_rcv_saddr6 &&
101 ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
107 static u32 udp6_portaddr_hash(const struct net *net,
108 const struct in6_addr *addr6,
111 unsigned int hash, mix = net_hash_mix(net);
113 if (ipv6_addr_any(addr6))
114 hash = jhash_1word(0, mix);
115 else if (ipv6_addr_v4mapped(addr6))
116 hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
118 hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
123 int udp_v6_get_port(struct sock *sk, unsigned short snum)
125 unsigned int hash2_nulladdr =
126 udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
127 unsigned int hash2_partial =
128 udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
130 /* precompute partial secondary hash */
131 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
132 return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
135 static void udp_v6_rehash(struct sock *sk)
137 u16 new_hash = udp6_portaddr_hash(sock_net(sk),
138 &sk->sk_v6_rcv_saddr,
139 inet_sk(sk)->inet_num);
141 udp_lib_rehash(sk, new_hash);
144 static inline int compute_score(struct sock *sk, struct net *net,
146 const struct in6_addr *saddr, __be16 sport,
147 const struct in6_addr *daddr, __be16 dport,
151 struct inet_sock *inet;
153 if (!net_eq(sock_net(sk), net) ||
154 udp_sk(sk)->udp_port_hash != hnum ||
155 sk->sk_family != PF_INET6)
161 if (inet->inet_dport) {
162 if (inet->inet_dport != sport)
167 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
168 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
173 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
174 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
179 if (sk->sk_bound_dev_if) {
180 if (sk->sk_bound_dev_if != dif)
185 if (sk->sk_incoming_cpu == raw_smp_processor_id())
191 static inline int compute_score2(struct sock *sk, struct net *net,
192 const struct in6_addr *saddr, __be16 sport,
193 const struct in6_addr *daddr,
194 unsigned short hnum, int dif)
197 struct inet_sock *inet;
199 if (!net_eq(sock_net(sk), net) ||
200 udp_sk(sk)->udp_port_hash != hnum ||
201 sk->sk_family != PF_INET6)
204 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
210 if (inet->inet_dport) {
211 if (inet->inet_dport != sport)
216 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
217 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
222 if (sk->sk_bound_dev_if) {
223 if (sk->sk_bound_dev_if != dif)
228 if (sk->sk_incoming_cpu == raw_smp_processor_id())
234 /* called with read_rcu_lock() */
235 static struct sock *udp6_lib_lookup2(struct net *net,
236 const struct in6_addr *saddr, __be16 sport,
237 const struct in6_addr *daddr, unsigned int hnum, int dif,
238 struct udp_hslot *hslot2, unsigned int slot2)
240 struct sock *sk, *result;
241 struct hlist_nulls_node *node;
242 int score, badness, matches = 0, reuseport = 0;
248 udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
249 score = compute_score2(sk, net, saddr, sport,
251 if (score > badness) {
254 reuseport = sk->sk_reuseport;
256 hash = udp6_ehashfn(net, daddr, hnum,
260 } else if (score == badness && reuseport) {
262 if (reciprocal_scale(hash, matches) == 0)
264 hash = next_pseudo_random32(hash);
268 * if the nulls value we got at the end of this lookup is
269 * not the expected one, we must restart lookup.
270 * We probably met an item that was moved to another chain.
272 if (get_nulls_value(node) != slot2)
276 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
278 else if (unlikely(compute_score2(result, net, saddr, sport,
279 daddr, hnum, dif) < badness)) {
287 struct sock *__udp6_lib_lookup(struct net *net,
288 const struct in6_addr *saddr, __be16 sport,
289 const struct in6_addr *daddr, __be16 dport,
290 int dif, struct udp_table *udptable)
292 struct sock *sk, *result;
293 struct hlist_nulls_node *node;
294 unsigned short hnum = ntohs(dport);
295 unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
296 struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
297 int score, badness, matches = 0, reuseport = 0;
301 if (hslot->count > 10) {
302 hash2 = udp6_portaddr_hash(net, daddr, hnum);
303 slot2 = hash2 & udptable->mask;
304 hslot2 = &udptable->hash2[slot2];
305 if (hslot->count < hslot2->count)
308 result = udp6_lib_lookup2(net, saddr, sport,
312 hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
313 slot2 = hash2 & udptable->mask;
314 hslot2 = &udptable->hash2[slot2];
315 if (hslot->count < hslot2->count)
318 result = udp6_lib_lookup2(net, saddr, sport,
319 &in6addr_any, hnum, dif,
328 sk_nulls_for_each_rcu(sk, node, &hslot->head) {
329 score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
330 if (score > badness) {
333 reuseport = sk->sk_reuseport;
335 hash = udp6_ehashfn(net, daddr, hnum,
339 } else if (score == badness && reuseport) {
341 if (reciprocal_scale(hash, matches) == 0)
343 hash = next_pseudo_random32(hash);
347 * if the nulls value we got at the end of this lookup is
348 * not the expected one, we must restart lookup.
349 * We probably met an item that was moved to another chain.
351 if (get_nulls_value(node) != slot)
355 if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
357 else if (unlikely(compute_score(result, net, hnum, saddr, sport,
358 daddr, dport, dif) < badness)) {
366 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
368 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
369 __be16 sport, __be16 dport,
370 struct udp_table *udptable)
373 const struct ipv6hdr *iph = ipv6_hdr(skb);
375 sk = skb_steal_sock(skb);
378 return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
379 &iph->daddr, dport, inet6_iif(skb),
383 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
384 const struct in6_addr *daddr, __be16 dport, int dif)
386 return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
388 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
391 * This should be easy, if there is something there we
392 * return it, otherwise we block.
395 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
396 int noblock, int flags, int *addr_len)
398 struct ipv6_pinfo *np = inet6_sk(sk);
399 struct inet_sock *inet = inet_sk(sk);
401 unsigned int ulen, copied;
404 int is_udplite = IS_UDPLITE(sk);
405 bool checksum_valid = false;
409 if (flags & MSG_ERRQUEUE)
410 return ipv6_recv_error(sk, msg, len, addr_len);
412 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
413 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
416 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
417 &peeked, &off, &err);
421 ulen = skb->len - sizeof(struct udphdr);
425 else if (copied < ulen)
426 msg->msg_flags |= MSG_TRUNC;
428 is_udp4 = (skb->protocol == htons(ETH_P_IP));
431 * If checksum is needed at all, try to do it while copying the
432 * data. If the data is truncated, or if we only want a partial
433 * coverage checksum (UDP-Lite), do it before the copy.
436 if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
437 checksum_valid = !udp_lib_checksum_complete(skb);
442 if (checksum_valid || skb_csum_unnecessary(skb))
443 err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
446 err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
451 trace_kfree_skb(skb, udpv6_recvmsg);
453 atomic_inc(&sk->sk_drops);
455 UDP_INC_STATS_USER(sock_net(sk),
459 UDP6_INC_STATS_USER(sock_net(sk),
467 UDP_INC_STATS_USER(sock_net(sk),
468 UDP_MIB_INDATAGRAMS, is_udplite);
470 UDP6_INC_STATS_USER(sock_net(sk),
471 UDP_MIB_INDATAGRAMS, is_udplite);
474 sock_recv_ts_and_drops(msg, sk, skb);
476 /* Copy the address. */
478 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
479 sin6->sin6_family = AF_INET6;
480 sin6->sin6_port = udp_hdr(skb)->source;
481 sin6->sin6_flowinfo = 0;
484 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
486 sin6->sin6_scope_id = 0;
488 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
489 sin6->sin6_scope_id =
490 ipv6_iface_scope_id(&sin6->sin6_addr,
493 *addr_len = sizeof(*sin6);
497 ip6_datagram_recv_common_ctl(sk, msg, skb);
500 if (inet->cmsg_flags)
501 ip_cmsg_recv_offset(msg, skb,
502 sizeof(struct udphdr), off);
505 ip6_datagram_recv_specific_ctl(sk, msg, skb);
509 if (flags & MSG_TRUNC)
513 skb_free_datagram_locked(sk, skb);
518 slow = lock_sock_fast(sk);
519 if (!skb_kill_datagram(sk, skb, flags)) {
521 UDP_INC_STATS_USER(sock_net(sk),
522 UDP_MIB_CSUMERRORS, is_udplite);
523 UDP_INC_STATS_USER(sock_net(sk),
524 UDP_MIB_INERRORS, is_udplite);
526 UDP6_INC_STATS_USER(sock_net(sk),
527 UDP_MIB_CSUMERRORS, is_udplite);
528 UDP6_INC_STATS_USER(sock_net(sk),
529 UDP_MIB_INERRORS, is_udplite);
532 unlock_sock_fast(sk, slow);
534 /* starting over for a new packet, but check if we need to yield */
536 msg->msg_flags &= ~MSG_TRUNC;
540 void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
541 u8 type, u8 code, int offset, __be32 info,
542 struct udp_table *udptable)
544 struct ipv6_pinfo *np;
545 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
546 const struct in6_addr *saddr = &hdr->saddr;
547 const struct in6_addr *daddr = &hdr->daddr;
548 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
551 struct net *net = dev_net(skb->dev);
553 sk = __udp6_lib_lookup(net, daddr, uh->dest,
554 saddr, uh->source, inet6_iif(skb), udptable);
556 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
561 if (type == ICMPV6_PKT_TOOBIG) {
562 if (!ip6_sk_accept_pmtu(sk))
564 ip6_sk_update_pmtu(skb, sk, info);
566 if (type == NDISC_REDIRECT) {
567 ip6_sk_redirect(skb, sk);
573 if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
576 if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
580 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
583 sk->sk_error_report(sk);
588 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
592 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
593 sock_rps_save_rxhash(sk, skb);
594 sk_mark_napi_id(sk, skb);
595 sk_incoming_cpu_update(sk);
598 rc = sock_queue_rcv_skb(sk, skb);
600 int is_udplite = IS_UDPLITE(sk);
602 /* Note that an ENOMEM error is charged twice */
604 UDP6_INC_STATS_BH(sock_net(sk),
605 UDP_MIB_RCVBUFERRORS, is_udplite);
606 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
613 static __inline__ void udpv6_err(struct sk_buff *skb,
614 struct inet6_skb_parm *opt, u8 type,
615 u8 code, int offset, __be32 info)
617 __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
620 static struct static_key udpv6_encap_needed __read_mostly;
621 void udpv6_encap_enable(void)
623 if (!static_key_enabled(&udpv6_encap_needed))
624 static_key_slow_inc(&udpv6_encap_needed);
626 EXPORT_SYMBOL(udpv6_encap_enable);
628 int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
630 struct udp_sock *up = udp_sk(sk);
632 int is_udplite = IS_UDPLITE(sk);
634 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
637 if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
638 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
641 * This is an encapsulation socket so pass the skb to
642 * the socket's udp_encap_rcv() hook. Otherwise, just
643 * fall through and pass this up the UDP socket.
644 * up->encap_rcv() returns the following value:
645 * =0 if skb was successfully passed to the encap
646 * handler or was discarded by it.
647 * >0 if skb should be passed on to UDP.
648 * <0 if skb should be resubmitted as proto -N
651 /* if we're overly short, let UDP handle it */
652 encap_rcv = ACCESS_ONCE(up->encap_rcv);
656 /* Verify checksum before giving to encap */
657 if (udp_lib_checksum_complete(skb))
660 ret = encap_rcv(sk, skb);
662 UDP_INC_STATS_BH(sock_net(sk),
669 /* FALLTHROUGH -- it's a UDP Packet */
673 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
675 if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
677 if (up->pcrlen == 0) { /* full coverage was set */
678 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
679 UDP_SKB_CB(skb)->cscov, skb->len);
682 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
683 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
684 UDP_SKB_CB(skb)->cscov, up->pcrlen);
689 if (rcu_access_pointer(sk->sk_filter)) {
690 if (udp_lib_checksum_complete(skb))
694 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
695 UDP6_INC_STATS_BH(sock_net(sk),
696 UDP_MIB_RCVBUFERRORS, is_udplite);
704 if (!sock_owned_by_user(sk))
705 rc = __udpv6_queue_rcv_skb(sk, skb);
706 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
715 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
717 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
718 atomic_inc(&sk->sk_drops);
723 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
724 __be16 loc_port, const struct in6_addr *loc_addr,
725 __be16 rmt_port, const struct in6_addr *rmt_addr,
726 int dif, unsigned short hnum)
728 struct inet_sock *inet = inet_sk(sk);
730 if (!net_eq(sock_net(sk), net))
733 if (udp_sk(sk)->udp_port_hash != hnum ||
734 sk->sk_family != PF_INET6 ||
735 (inet->inet_dport && inet->inet_dport != rmt_port) ||
736 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
737 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
738 (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
739 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
740 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
742 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
747 static void flush_stack(struct sock **stack, unsigned int count,
748 struct sk_buff *skb, unsigned int final)
750 struct sk_buff *skb1 = NULL;
754 for (i = 0; i < count; i++) {
757 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
759 atomic_inc(&sk->sk_drops);
760 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
762 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
766 if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
774 static void udp6_csum_zero_error(struct sk_buff *skb)
776 /* RFC 2460 section 8.1 says that we SHOULD log
777 * this error. Well, it is reasonable.
779 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
780 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
781 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
785 * Note: called only from the BH handler context,
786 * so we don't need to lock the hashes.
788 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
789 const struct in6_addr *saddr, const struct in6_addr *daddr,
790 struct udp_table *udptable, int proto)
792 struct sock *sk, *stack[256 / sizeof(struct sock *)];
793 const struct udphdr *uh = udp_hdr(skb);
794 struct hlist_nulls_node *node;
795 unsigned short hnum = ntohs(uh->dest);
796 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
797 int dif = inet6_iif(skb);
798 unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
799 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
800 bool inner_flushed = false;
803 hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
805 hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask;
807 hslot = &udp_table.hash2[hash2];
808 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
811 spin_lock(&hslot->lock);
812 sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
813 if (__udp_v6_is_mcast_sock(net, sk,
817 /* If zero checksum and no_check is not on for
818 * the socket then skip it.
820 (uh->check || udp_sk(sk)->no_check6_rx)) {
821 if (unlikely(count == ARRAY_SIZE(stack))) {
822 flush_stack(stack, count, skb, ~0);
823 inner_flushed = true;
831 spin_unlock(&hslot->lock);
833 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
834 if (use_hash2 && hash2 != hash2_any) {
840 flush_stack(stack, count, skb, count - 1);
843 UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
844 proto == IPPROTO_UDPLITE);
850 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
853 struct net *net = dev_net(skb->dev);
856 const struct in6_addr *saddr, *daddr;
859 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
862 saddr = &ipv6_hdr(skb)->saddr;
863 daddr = &ipv6_hdr(skb)->daddr;
866 ulen = ntohs(uh->len);
870 if (proto == IPPROTO_UDP) {
871 /* UDP validates ulen. */
873 /* Check for jumbo payload */
877 if (ulen < sizeof(*uh))
880 if (ulen < skb->len) {
881 if (pskb_trim_rcsum(skb, ulen))
883 saddr = &ipv6_hdr(skb)->saddr;
884 daddr = &ipv6_hdr(skb)->daddr;
889 if (udp6_csum_init(skb, uh, proto))
893 * Multicast receive code
895 if (ipv6_addr_is_multicast(daddr))
896 return __udp6_lib_mcast_deliver(net, skb,
897 saddr, daddr, udptable, proto);
902 * check socket cache ... must talk to Alan about his plans
903 * for sock caches... i'll skip this for now.
905 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
909 if (!uh->check && !udp_sk(sk)->no_check6_rx) {
911 udp6_csum_zero_error(skb);
915 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
916 skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
919 ret = udpv6_queue_rcv_skb(sk, skb);
922 /* a return value > 0 means to resubmit the input */
930 udp6_csum_zero_error(skb);
934 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
937 if (udp_lib_checksum_complete(skb))
940 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
941 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
947 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
948 proto == IPPROTO_UDPLITE ? "-Lite" : "",
949 saddr, ntohs(uh->source),
951 daddr, ntohs(uh->dest));
954 UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
956 UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
961 static __inline__ int udpv6_rcv(struct sk_buff *skb)
963 return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
967 * Throw away all pending data and cancel the corking. Socket is locked.
969 static void udp_v6_flush_pending_frames(struct sock *sk)
971 struct udp_sock *up = udp_sk(sk);
973 if (up->pending == AF_INET)
974 udp_flush_pending_frames(sk);
975 else if (up->pending) {
978 ip6_flush_pending_frames(sk);
983 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
984 * @sk: socket we are sending on
985 * @skb: sk_buff containing the filled-in UDP header
986 * (checksum field must be zeroed out)
988 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
989 const struct in6_addr *saddr,
990 const struct in6_addr *daddr, int len)
993 struct udphdr *uh = udp_hdr(skb);
994 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
998 /* Only one fragment on the socket. */
999 skb->csum_start = skb_transport_header(skb) - skb->head;
1000 skb->csum_offset = offsetof(struct udphdr, check);
1001 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1004 * HW-checksum won't work as there are two or more
1005 * fragments on the socket so that all csums of sk_buffs
1006 * should be together
1008 offset = skb_transport_offset(skb);
1009 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1011 skb->ip_summed = CHECKSUM_NONE;
1014 csum = csum_add(csum, frags->csum);
1015 } while ((frags = frags->next));
1017 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1020 uh->check = CSUM_MANGLED_0;
1028 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6)
1030 struct sock *sk = skb->sk;
1033 int is_udplite = IS_UDPLITE(sk);
1035 int offset = skb_transport_offset(skb);
1036 int len = skb->len - offset;
1039 * Create a UDP header
1042 uh->source = fl6->fl6_sport;
1043 uh->dest = fl6->fl6_dport;
1044 uh->len = htons(len);
1048 csum = udplite_csum(skb);
1049 else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
1050 skb->ip_summed = CHECKSUM_NONE;
1052 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1053 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1056 csum = udp_csum(skb);
1058 /* add protocol-dependent pseudo-header */
1059 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1060 len, fl6->flowi6_proto, csum);
1062 uh->check = CSUM_MANGLED_0;
1065 err = ip6_send_skb(skb);
1067 if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1068 UDP6_INC_STATS_USER(sock_net(sk),
1069 UDP_MIB_SNDBUFERRORS, is_udplite);
1073 UDP6_INC_STATS_USER(sock_net(sk),
1074 UDP_MIB_OUTDATAGRAMS, is_udplite);
1078 static int udp_v6_push_pending_frames(struct sock *sk)
1080 struct sk_buff *skb;
1081 struct udp_sock *up = udp_sk(sk);
1085 if (up->pending == AF_INET)
1086 return udp_push_pending_frames(sk);
1088 /* ip6_finish_skb will release the cork, so make a copy of
1091 fl6 = inet_sk(sk)->cork.fl.u.ip6;
1093 skb = ip6_finish_skb(sk);
1097 err = udp_v6_send_skb(skb, &fl6);
1105 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1107 struct ipv6_txoptions opt_space;
1108 struct udp_sock *up = udp_sk(sk);
1109 struct inet_sock *inet = inet_sk(sk);
1110 struct ipv6_pinfo *np = inet6_sk(sk);
1111 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1112 struct in6_addr *daddr, *final_p, final;
1113 struct ipv6_txoptions *opt = NULL;
1114 struct ipv6_txoptions *opt_to_free = NULL;
1115 struct ip6_flowlabel *flowlabel = NULL;
1117 struct dst_entry *dst;
1118 int addr_len = msg->msg_namelen;
1123 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
1126 int is_udplite = IS_UDPLITE(sk);
1127 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1129 /* destination address check */
1131 if (addr_len < offsetof(struct sockaddr, sa_data))
1134 switch (sin6->sin6_family) {
1136 if (addr_len < SIN6_LEN_RFC2133)
1138 daddr = &sin6->sin6_addr;
1141 goto do_udp_sendmsg;
1143 msg->msg_name = sin6 = NULL;
1144 msg->msg_namelen = addr_len = 0;
1150 } else if (!up->pending) {
1151 if (sk->sk_state != TCP_ESTABLISHED)
1152 return -EDESTADDRREQ;
1153 daddr = &sk->sk_v6_daddr;
1158 if (ipv6_addr_v4mapped(daddr)) {
1159 struct sockaddr_in sin;
1160 sin.sin_family = AF_INET;
1161 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1162 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1163 msg->msg_name = &sin;
1164 msg->msg_namelen = sizeof(sin);
1166 if (__ipv6_only_sock(sk))
1167 return -ENETUNREACH;
1168 return udp_sendmsg(sk, msg, len);
1172 if (up->pending == AF_INET)
1173 return udp_sendmsg(sk, msg, len);
1175 /* Rough check on arithmetic overflow,
1176 better check is made in ip6_append_data().
1178 if (len > INT_MAX - sizeof(struct udphdr))
1181 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1184 * There are pending frames.
1185 * The socket lock must be held while it's corked.
1188 if (likely(up->pending)) {
1189 if (unlikely(up->pending != AF_INET6)) {
1191 return -EAFNOSUPPORT;
1194 goto do_append_data;
1198 ulen += sizeof(struct udphdr);
1200 memset(&fl6, 0, sizeof(fl6));
1203 if (sin6->sin6_port == 0)
1206 fl6.fl6_dport = sin6->sin6_port;
1207 daddr = &sin6->sin6_addr;
1210 fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1211 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1212 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1219 * Otherwise it will be difficult to maintain
1222 if (sk->sk_state == TCP_ESTABLISHED &&
1223 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1224 daddr = &sk->sk_v6_daddr;
1226 if (addr_len >= sizeof(struct sockaddr_in6) &&
1227 sin6->sin6_scope_id &&
1228 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1229 fl6.flowi6_oif = sin6->sin6_scope_id;
1231 if (sk->sk_state != TCP_ESTABLISHED)
1232 return -EDESTADDRREQ;
1234 fl6.fl6_dport = inet->inet_dport;
1235 daddr = &sk->sk_v6_daddr;
1236 fl6.flowlabel = np->flow_label;
1240 if (!fl6.flowi6_oif)
1241 fl6.flowi6_oif = sk->sk_bound_dev_if;
1243 if (!fl6.flowi6_oif)
1244 fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1246 fl6.flowi6_mark = sk->sk_mark;
1247 fl6.flowi6_uid = sk->sk_uid;
1249 if (msg->msg_controllen) {
1251 memset(opt, 0, sizeof(struct ipv6_txoptions));
1252 opt->tot_len = sizeof(*opt);
1254 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
1255 &hlimit, &tclass, &dontfrag);
1257 fl6_sock_release(flowlabel);
1260 if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1261 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1265 if (!(opt->opt_nflen|opt->opt_flen))
1270 opt = txopt_get(np);
1274 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1275 opt = ipv6_fixup_options(&opt_space, opt);
1277 fl6.flowi6_proto = sk->sk_protocol;
1278 if (!ipv6_addr_any(daddr))
1281 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1282 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1283 fl6.saddr = np->saddr;
1284 fl6.fl6_sport = inet->inet_sport;
1286 final_p = fl6_update_dst(&fl6, opt, &final);
1290 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1291 fl6.flowi6_oif = np->mcast_oif;
1293 } else if (!fl6.flowi6_oif)
1294 fl6.flowi6_oif = np->ucast_oif;
1296 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
1298 dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p);
1306 hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1309 tclass = np->tclass;
1311 if (msg->msg_flags&MSG_CONFIRM)
1315 /* Lockless fast path for the non-corking case */
1317 struct sk_buff *skb;
1319 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1320 sizeof(struct udphdr), hlimit, tclass, opt,
1321 &fl6, (struct rt6_info *)dst,
1322 msg->msg_flags, dontfrag);
1324 if (!IS_ERR_OR_NULL(skb))
1325 err = udp_v6_send_skb(skb, &fl6);
1330 if (unlikely(up->pending)) {
1331 /* The socket is already corked while preparing it. */
1332 /* ... which is an evident application bug. --ANK */
1335 net_dbg_ratelimited("udp cork app bug 2\n");
1340 up->pending = AF_INET6;
1344 dontfrag = np->dontfrag;
1346 err = ip6_append_data(sk, getfrag, msg, ulen,
1347 sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
1348 (struct rt6_info *)dst,
1349 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
1351 udp_v6_flush_pending_frames(sk);
1353 err = udp_v6_push_pending_frames(sk);
1354 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1358 err = np->recverr ? net_xmit_errno(err) : 0;
1364 ip6_dst_store(sk, dst,
1365 ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
1366 &sk->sk_v6_daddr : NULL,
1367 #ifdef CONFIG_IPV6_SUBTREES
1368 ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
1380 fl6_sock_release(flowlabel);
1381 txopt_put(opt_to_free);
1385 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1386 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1387 * we don't have a good statistic (IpOutDiscards but it can be too many
1388 * things). We could add another new stat but at least for now that
1389 * seems like overkill.
1391 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1392 UDP6_INC_STATS_USER(sock_net(sk),
1393 UDP_MIB_SNDBUFERRORS, is_udplite);
1399 if (!(msg->msg_flags&MSG_PROBE) || len)
1400 goto back_from_confirm;
1405 void udpv6_destroy_sock(struct sock *sk)
1407 struct udp_sock *up = udp_sk(sk);
1409 udp_v6_flush_pending_frames(sk);
1412 if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
1413 void (*encap_destroy)(struct sock *sk);
1414 encap_destroy = ACCESS_ONCE(up->encap_destroy);
1419 inet6_destroy_sock(sk);
1423 * Socket option code for UDP
1425 int udpv6_setsockopt(struct sock *sk, int level, int optname,
1426 char __user *optval, unsigned int optlen)
1428 if (level == SOL_UDP || level == SOL_UDPLITE)
1429 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1430 udp_v6_push_pending_frames);
1431 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1434 #ifdef CONFIG_COMPAT
1435 int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
1436 char __user *optval, unsigned int optlen)
1438 if (level == SOL_UDP || level == SOL_UDPLITE)
1439 return udp_lib_setsockopt(sk, level, optname, optval, optlen,
1440 udp_v6_push_pending_frames);
1441 return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
1445 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1446 char __user *optval, int __user *optlen)
1448 if (level == SOL_UDP || level == SOL_UDPLITE)
1449 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1450 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1453 #ifdef CONFIG_COMPAT
1454 int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1455 char __user *optval, int __user *optlen)
1457 if (level == SOL_UDP || level == SOL_UDPLITE)
1458 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1459 return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
1463 static const struct inet6_protocol udpv6_protocol = {
1464 .handler = udpv6_rcv,
1465 .err_handler = udpv6_err,
1466 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1469 /* ------------------------------------------------------------------------ */
1470 #ifdef CONFIG_PROC_FS
1471 int udp6_seq_show(struct seq_file *seq, void *v)
1473 if (v == SEQ_START_TOKEN) {
1474 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1476 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1477 struct inet_sock *inet = inet_sk(v);
1478 __u16 srcp = ntohs(inet->inet_sport);
1479 __u16 destp = ntohs(inet->inet_dport);
1480 ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
1485 static const struct file_operations udp6_afinfo_seq_fops = {
1486 .owner = THIS_MODULE,
1487 .open = udp_seq_open,
1489 .llseek = seq_lseek,
1490 .release = seq_release_net
1493 static struct udp_seq_afinfo udp6_seq_afinfo = {
1496 .udp_table = &udp_table,
1497 .seq_fops = &udp6_afinfo_seq_fops,
1499 .show = udp6_seq_show,
1503 int __net_init udp6_proc_init(struct net *net)
1505 return udp_proc_register(net, &udp6_seq_afinfo);
1508 void udp6_proc_exit(struct net *net)
1510 udp_proc_unregister(net, &udp6_seq_afinfo);
1512 #endif /* CONFIG_PROC_FS */
1514 void udp_v6_clear_sk(struct sock *sk, int size)
1516 struct inet_sock *inet = inet_sk(sk);
1518 /* we do not want to clear pinet6 field, because of RCU lookups */
1519 sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
1521 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1522 memset(&inet->pinet6 + 1, 0, size);
1525 /* ------------------------------------------------------------------------ */
1527 struct proto udpv6_prot = {
1529 .owner = THIS_MODULE,
1530 .close = udp_lib_close,
1531 .connect = ip6_datagram_connect,
1532 .disconnect = udp_disconnect,
1534 .destroy = udpv6_destroy_sock,
1535 .setsockopt = udpv6_setsockopt,
1536 .getsockopt = udpv6_getsockopt,
1537 .sendmsg = udpv6_sendmsg,
1538 .recvmsg = udpv6_recvmsg,
1539 .backlog_rcv = __udpv6_queue_rcv_skb,
1540 .hash = udp_lib_hash,
1541 .unhash = udp_lib_unhash,
1542 .rehash = udp_v6_rehash,
1543 .get_port = udp_v6_get_port,
1544 .memory_allocated = &udp_memory_allocated,
1545 .sysctl_mem = sysctl_udp_mem,
1546 .sysctl_wmem = &sysctl_udp_wmem_min,
1547 .sysctl_rmem = &sysctl_udp_rmem_min,
1548 .obj_size = sizeof(struct udp6_sock),
1549 .slab_flags = SLAB_DESTROY_BY_RCU,
1550 .h.udp_table = &udp_table,
1551 #ifdef CONFIG_COMPAT
1552 .compat_setsockopt = compat_udpv6_setsockopt,
1553 .compat_getsockopt = compat_udpv6_getsockopt,
1555 .clear_sk = udp_v6_clear_sk,
1556 .diag_destroy = udp_abort,
1559 static struct inet_protosw udpv6_protosw = {
1561 .protocol = IPPROTO_UDP,
1562 .prot = &udpv6_prot,
1563 .ops = &inet6_dgram_ops,
1564 .flags = INET_PROTOSW_PERMANENT,
1567 int __init udpv6_init(void)
1571 ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1575 ret = inet6_register_protosw(&udpv6_protosw);
1577 goto out_udpv6_protocol;
1582 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1586 void udpv6_exit(void)
1588 inet6_unregister_protosw(&udpv6_protosw);
1589 inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);