2 * ip_vs_xmit.c: various packet transmitters for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
14 * Description of forwarding methods:
15 * - all transmitters are called from LOCAL_IN (remote clients) and
16 * LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
17 * - not all connections have destination server, for example,
18 * connections in backup server when fwmark is used
19 * - bypass connections use daddr from packet
21 * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
22 * - skb->pkt_type is not set yet
23 * - the only place where we can see skb->sk != NULL
26 #define KMSG_COMPONENT "IPVS"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/tcp.h> /* for tcphdr */
33 #include <net/tcp.h> /* for csum_tcpudp_magic */
35 #include <net/icmp.h> /* for icmp_send */
36 #include <net/route.h> /* for ip_route_output */
38 #include <net/ip6_route.h>
39 #include <net/addrconf.h>
40 #include <linux/icmpv6.h>
41 #include <linux/netfilter.h>
42 #include <linux/netfilter_ipv4.h>
44 #include <net/ip_vs.h>
47 IP_VS_RT_MODE_LOCAL = 1, /* Allow local dest */
48 IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
49 IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to
52 IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */
53 IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */
57 * Destination cache to speed up outgoing route lookup
60 __ip_vs_dst_set(struct ip_vs_dest *dest, struct dst_entry *dst, u32 dst_cookie)
62 struct dst_entry *old_dst;
64 old_dst = dest->dst_cache;
65 dest->dst_cache = dst;
66 dest->dst_cookie = dst_cookie;
70 static inline struct dst_entry *
71 __ip_vs_dst_check(struct ip_vs_dest *dest)
73 struct dst_entry *dst = dest->dst_cache;
77 if (dst->obsolete && dst->ops->check(dst, dest->dst_cookie) == NULL) {
78 dest->dst_cache = NULL;
87 __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
89 if (IP6CB(skb)->frag_max_size) {
90 /* frag_max_size tell us that, this packet have been
91 * defragmented by netfilter IPv6 conntrack module.
93 if (IP6CB(skb)->frag_max_size > mtu)
94 return true; /* largest fragment violate MTU */
96 else if (skb->len > mtu && !skb_is_gso(skb)) {
97 return true; /* Packet size violate MTU size */
102 /* Get route to daddr, update *saddr, optionally bind route to saddr */
103 static struct rtable *do_output_route4(struct net *net, __be32 daddr,
104 int rt_mode, __be32 *saddr)
110 memset(&fl4, 0, sizeof(fl4));
112 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
113 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
114 FLOWI_FLAG_KNOWN_NH : 0;
117 rt = ip_route_output_key(net, &fl4);
119 /* Invalid saddr ? */
120 if (PTR_ERR(rt) == -EINVAL && *saddr &&
121 rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
123 flowi4_update_output(&fl4, 0, 0, daddr, 0);
126 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
128 } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
131 flowi4_update_output(&fl4, 0, 0, daddr, fl4.saddr);
139 /* Get route to destination or remote server */
140 static struct rtable *
141 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
142 __be32 daddr, int rt_mode, __be32 *ret_saddr)
144 struct net *net = dev_net(skb_dst(skb)->dev);
145 struct rtable *rt; /* Route to the other host */
146 struct rtable *ort; /* Original route */
150 spin_lock(&dest->dst_lock);
151 rt = (struct rtable *) __ip_vs_dst_check(dest);
153 rt = do_output_route4(net, dest->addr.ip, rt_mode,
154 &dest->dst_saddr.ip);
156 spin_unlock(&dest->dst_lock);
159 __ip_vs_dst_set(dest, dst_clone(&rt->dst), 0);
160 IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d\n",
161 &dest->addr.ip, &dest->dst_saddr.ip,
162 atomic_read(&rt->dst.__refcnt));
164 daddr = dest->addr.ip;
166 *ret_saddr = dest->dst_saddr.ip;
167 spin_unlock(&dest->dst_lock);
169 __be32 saddr = htonl(INADDR_ANY);
171 /* For such unconfigured boxes avoid many route lookups
172 * for performance reasons because we do not remember saddr
174 rt_mode &= ~IP_VS_RT_MODE_CONNECT;
175 rt = do_output_route4(net, daddr, rt_mode, &saddr);
182 local = rt->rt_flags & RTCF_LOCAL;
183 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
185 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
186 (rt->rt_flags & RTCF_LOCAL) ?
187 "local":"non-local", &daddr);
191 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
192 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
193 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
194 "requires NAT method, dest: %pI4\n",
195 &ip_hdr(skb)->daddr, &daddr);
199 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
200 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
201 "to non-local address, dest: %pI4\n",
202 &ip_hdr(skb)->saddr, &daddr);
210 /* Reroute packet to local IPv4 stack after DNAT */
212 __ip_vs_reroute_locally(struct sk_buff *skb)
214 struct rtable *rt = skb_rtable(skb);
215 struct net_device *dev = rt->dst.dev;
216 struct net *net = dev_net(dev);
217 struct iphdr *iph = ip_hdr(skb);
219 if (rt_is_input_route(rt)) {
220 unsigned long orefdst = skb->_skb_refdst;
222 if (ip_route_input(skb, iph->daddr, iph->saddr,
225 refdst_drop(orefdst);
227 struct flowi4 fl4 = {
230 .flowi4_tos = RT_TOS(iph->tos),
231 .flowi4_mark = skb->mark,
234 rt = ip_route_output_key(net, &fl4);
237 if (!(rt->rt_flags & RTCF_LOCAL)) {
241 /* Drop old route. */
243 skb_dst_set(skb, &rt->dst);
248 #ifdef CONFIG_IP_VS_IPV6
250 static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
252 return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
255 static struct dst_entry *
256 __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
257 struct in6_addr *ret_saddr, int do_xfrm)
259 struct dst_entry *dst;
260 struct flowi6 fl6 = {
264 dst = ip6_route_output(net, NULL, &fl6);
269 if (ipv6_addr_any(&fl6.saddr) &&
270 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
271 &fl6.daddr, 0, &fl6.saddr) < 0)
274 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
280 *ret_saddr = fl6.saddr;
285 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
290 * Get route to destination or remote server
292 static struct rt6_info *
293 __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
294 struct in6_addr *daddr, struct in6_addr *ret_saddr,
295 int do_xfrm, int rt_mode)
297 struct net *net = dev_net(skb_dst(skb)->dev);
298 struct rt6_info *rt; /* Route to the other host */
299 struct rt6_info *ort; /* Original route */
300 struct dst_entry *dst;
304 spin_lock(&dest->dst_lock);
305 rt = (struct rt6_info *)__ip_vs_dst_check(dest);
309 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
310 &dest->dst_saddr.in6,
313 spin_unlock(&dest->dst_lock);
316 rt = (struct rt6_info *) dst;
317 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
318 __ip_vs_dst_set(dest, dst_clone(&rt->dst), cookie);
319 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
320 &dest->addr.in6, &dest->dst_saddr.in6,
321 atomic_read(&rt->dst.__refcnt));
324 *ret_saddr = dest->dst_saddr.in6;
325 spin_unlock(&dest->dst_lock);
327 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
330 rt = (struct rt6_info *) dst;
333 local = __ip_vs_is_local_route6(rt);
334 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
336 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n",
337 local ? "local":"non-local", daddr);
338 dst_release(&rt->dst);
341 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
342 !((ort = (struct rt6_info *) skb_dst(skb)) &&
343 __ip_vs_is_local_route6(ort))) {
344 IP_VS_DBG_RL("Redirect from non-local address %pI6c to local "
345 "requires NAT method, dest: %pI6c\n",
346 &ipv6_hdr(skb)->daddr, daddr);
347 dst_release(&rt->dst);
350 if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
351 ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
352 IPV6_ADDR_LOOPBACK)) {
353 IP_VS_DBG_RL("Stopping traffic from loopback address %pI6c "
354 "to non-local address, dest: %pI6c\n",
355 &ipv6_hdr(skb)->saddr, daddr);
356 dst_release(&rt->dst);
365 /* return NF_ACCEPT to allow forwarding or other NF_xxx on error */
366 static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
367 struct ip_vs_conn *cp)
371 skb->ipvs_property = 1;
372 if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
373 ret = ip_vs_confirm_conntrack(skb);
374 if (ret == NF_ACCEPT) {
376 skb_forward_csum(skb);
381 /* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
382 static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
383 struct ip_vs_conn *cp, int local)
387 skb->ipvs_property = 1;
388 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
391 ip_vs_update_conntrack(skb, cp, 1);
393 skb_forward_csum(skb);
394 NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
401 /* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
402 static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
403 struct ip_vs_conn *cp, int local)
407 skb->ipvs_property = 1;
408 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
411 skb_forward_csum(skb);
412 NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
421 * NULL transmitter (do nothing except return NF_ACCEPT)
424 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
425 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
427 /* we do not touch skb and do not need pskb ptr */
428 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
434 * Let packets bypass the destination when the destination is not
435 * available, it may be only used in transparent cache cluster.
438 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
439 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
441 struct rtable *rt; /* Route to the other host */
442 struct iphdr *iph = ip_hdr(skb);
447 rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, IP_VS_RT_MODE_NON_LOCAL,
453 mtu = dst_mtu(&rt->dst);
454 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
457 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
458 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
463 * Call ip_send_check because we are not sure it is called
464 * after ip_defrag. Is copy-on-write needed?
466 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
470 ip_send_check(ip_hdr(skb));
474 skb_dst_set(skb, &rt->dst);
476 /* Another hack: avoid icmp_send in ip_fragment */
479 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
485 dst_link_failure(skb);
492 #ifdef CONFIG_IP_VS_IPV6
494 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
495 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
497 struct rt6_info *rt; /* Route to the other host */
502 rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr.in6, NULL, 0,
503 IP_VS_RT_MODE_NON_LOCAL);
508 mtu = dst_mtu(&rt->dst);
509 if (__mtu_check_toobig_v6(skb, mtu)) {
511 struct net *net = dev_net(skb_dst(skb)->dev);
513 skb->dev = net->loopback_dev;
515 /* only send ICMP too big on first fragment */
517 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
518 dst_release(&rt->dst);
519 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
524 * Call ip_send_check because we are not sure it is called
525 * after ip_defrag. Is copy-on-write needed?
527 skb = skb_share_check(skb, GFP_ATOMIC);
528 if (unlikely(skb == NULL)) {
529 dst_release(&rt->dst);
535 skb_dst_set(skb, &rt->dst);
537 /* Another hack: avoid icmp_send in ip_fragment */
540 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
546 dst_link_failure(skb);
555 * NAT transmitter (only for outside-to-inside nat forwarding)
556 * Not used for related ICMP
559 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
560 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
562 struct rtable *rt; /* Route to the other host */
564 struct iphdr *iph = ip_hdr(skb);
569 /* check if it is a connection of no-client-port */
570 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
572 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
575 ip_vs_conn_fill_cport(cp, *p);
576 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
579 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
580 IP_VS_RT_MODE_LOCAL |
581 IP_VS_RT_MODE_NON_LOCAL |
582 IP_VS_RT_MODE_RDR, NULL)))
584 local = rt->rt_flags & RTCF_LOCAL;
586 * Avoid duplicate tuple in reply direction for NAT traffic
587 * to local address when connection is sync-ed
589 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
590 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
591 enum ip_conntrack_info ctinfo;
592 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
594 if (ct && !nf_ct_is_untracked(ct)) {
595 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
597 "stopping DNAT to local address");
603 /* From world but DNAT to loopback address? */
604 if (local && ipv4_is_loopback(cp->daddr.ip) &&
605 rt_is_input_route(skb_rtable(skb))) {
606 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
607 "stopping DNAT to loopback address");
612 mtu = dst_mtu(&rt->dst);
613 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
615 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
616 IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
617 "ip_vs_nat_xmit(): frag needed for");
621 /* copy-on-write the packet before mangling it */
622 if (!skb_make_writable(skb, sizeof(struct iphdr)))
625 if (skb_cow(skb, rt->dst.dev->hard_header_len))
628 /* mangle the packet */
629 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
631 ip_hdr(skb)->daddr = cp->daddr.ip;
632 ip_send_check(ip_hdr(skb));
637 skb_dst_set(skb, &rt->dst);
641 * Some IPv4 replies get local address from routes,
642 * not from iph, so while we DNAT after routing
643 * we need this second input/output route.
645 if (!__ip_vs_reroute_locally(skb))
649 IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT");
651 /* FIXME: when application helper enlarges the packet and the length
652 is larger than the MTU of outgoing device, there will be still
655 /* Another hack: avoid icmp_send in ip_fragment */
658 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
664 dst_link_failure(skb);
674 #ifdef CONFIG_IP_VS_IPV6
676 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
677 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
679 struct rt6_info *rt; /* Route to the other host */
685 /* check if it is a connection of no-client-port */
686 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !iph->fragoffs)) {
688 p = skb_header_pointer(skb, iph->len, sizeof(_pt), &_pt);
691 ip_vs_conn_fill_cport(cp, *p);
692 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
695 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
696 0, (IP_VS_RT_MODE_LOCAL |
697 IP_VS_RT_MODE_NON_LOCAL |
698 IP_VS_RT_MODE_RDR))))
700 local = __ip_vs_is_local_route6(rt);
702 * Avoid duplicate tuple in reply direction for NAT traffic
703 * to local address when connection is sync-ed
705 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
706 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
707 enum ip_conntrack_info ctinfo;
708 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
710 if (ct && !nf_ct_is_untracked(ct)) {
711 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
712 "ip_vs_nat_xmit_v6(): "
713 "stopping DNAT to local address");
719 /* From world but DNAT to loopback address? */
720 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
721 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
722 IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
723 "ip_vs_nat_xmit_v6(): "
724 "stopping DNAT to loopback address");
729 mtu = dst_mtu(&rt->dst);
730 if (__mtu_check_toobig_v6(skb, mtu)) {
732 struct net *net = dev_net(skb_dst(skb)->dev);
734 skb->dev = net->loopback_dev;
736 /* only send ICMP too big on first fragment */
738 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
739 IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0,
740 "ip_vs_nat_xmit_v6(): frag needed for");
744 /* copy-on-write the packet before mangling it */
745 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
748 if (skb_cow(skb, rt->dst.dev->hard_header_len))
751 /* mangle the packet */
752 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, iph))
754 ipv6_hdr(skb)->daddr = cp->daddr.in6;
756 if (!local || !skb->dev) {
757 /* drop the old route when skb is not shared */
759 skb_dst_set(skb, &rt->dst);
761 /* destined to loopback, do we need to change route? */
762 dst_release(&rt->dst);
765 IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT");
767 /* FIXME: when application helper enlarges the packet and the length
768 is larger than the MTU of outgoing device, there will be still
771 /* Another hack: avoid icmp_send in ip_fragment */
774 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
780 dst_link_failure(skb);
786 dst_release(&rt->dst);
793 * IP Tunneling transmitter
795 * This function encapsulates the packet in a new IP packet, its
796 * destination will be set to cp->daddr. Most code of this function
797 * is taken from ipip.c.
799 * It is used in VS/TUN cluster. The load balancer selects a real
800 * server from a cluster based on a scheduling algorithm,
801 * encapsulates the request packet and forwards it to the selected
802 * server. For example, all real servers are configured with
803 * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
804 * the encapsulated packet, it will decapsulate the packet, processe
805 * the request and return the response packets directly to the client
806 * without passing the load balancer. This can greatly increase the
807 * scalability of virtual server.
809 * Used for ANY protocol
812 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
813 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
815 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
816 struct rtable *rt; /* Route to the other host */
817 __be32 saddr; /* Source for tunnel */
818 struct net_device *tdev; /* Device to other host */
819 struct iphdr *old_iph = ip_hdr(skb);
820 u8 tos = old_iph->tos;
822 struct iphdr *iph; /* Our new IP header */
823 unsigned int max_headroom; /* The extra header space needed */
829 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
830 IP_VS_RT_MODE_LOCAL |
831 IP_VS_RT_MODE_NON_LOCAL |
832 IP_VS_RT_MODE_CONNECT, &saddr)))
834 if (rt->rt_flags & RTCF_LOCAL) {
836 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
841 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
843 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
846 if (rt_is_output_route(skb_rtable(skb)))
847 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
849 /* Copy DF, reset fragment offset and MF */
850 df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
852 if (df && mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb)) {
853 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
854 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
859 * Okay, now see if we can stuff it in the buffer as-is.
861 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
863 if (skb_headroom(skb) < max_headroom
864 || skb_cloned(skb) || skb_shared(skb)) {
865 struct sk_buff *new_skb =
866 skb_realloc_headroom(skb, max_headroom);
870 IP_VS_ERR_RL("%s(): no memory\n", __func__);
875 old_iph = ip_hdr(skb);
878 skb->transport_header = skb->network_header;
880 /* fix old IP header checksum */
881 ip_send_check(old_iph);
883 skb_push(skb, sizeof(struct iphdr));
884 skb_reset_network_header(skb);
885 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
889 skb_dst_set(skb, &rt->dst);
892 * Push down and install the IPIP header.
896 iph->ihl = sizeof(struct iphdr)>>2;
898 iph->protocol = IPPROTO_IPIP;
900 iph->daddr = cp->daddr.ip;
902 iph->ttl = old_iph->ttl;
903 ip_select_ident(iph, &rt->dst, NULL);
905 /* Another hack: avoid icmp_send in ip_fragment */
908 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
909 if (ret == NF_ACCEPT)
911 else if (ret == NF_DROP)
919 dst_link_failure(skb);
929 #ifdef CONFIG_IP_VS_IPV6
931 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
932 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
934 struct rt6_info *rt; /* Route to the other host */
935 struct in6_addr saddr; /* Source for tunnel */
936 struct net_device *tdev; /* Device to other host */
937 struct ipv6hdr *old_iph = ipv6_hdr(skb);
938 struct ipv6hdr *iph; /* Our new IP header */
939 unsigned int max_headroom; /* The extra header space needed */
945 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
946 &saddr, 1, (IP_VS_RT_MODE_LOCAL |
947 IP_VS_RT_MODE_NON_LOCAL))))
949 if (__ip_vs_is_local_route6(rt)) {
950 dst_release(&rt->dst);
951 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
956 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
957 if (mtu < IPV6_MIN_MTU) {
958 IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
963 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
965 /* MTU checking: Notice that 'mtu' have been adjusted before hand */
966 if (__mtu_check_toobig_v6(skb, mtu)) {
968 struct net *net = dev_net(skb_dst(skb)->dev);
970 skb->dev = net->loopback_dev;
972 /* only send ICMP too big on first fragment */
973 if (!ipvsh->fragoffs)
974 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
975 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
980 * Okay, now see if we can stuff it in the buffer as-is.
982 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
984 if (skb_headroom(skb) < max_headroom
985 || skb_cloned(skb) || skb_shared(skb)) {
986 struct sk_buff *new_skb =
987 skb_realloc_headroom(skb, max_headroom);
989 dst_release(&rt->dst);
991 IP_VS_ERR_RL("%s(): no memory\n", __func__);
996 old_iph = ipv6_hdr(skb);
999 skb->transport_header = skb->network_header;
1001 skb_push(skb, sizeof(struct ipv6hdr));
1002 skb_reset_network_header(skb);
1003 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1005 /* drop old route */
1007 skb_dst_set(skb, &rt->dst);
1010 * Push down and install the IPIP header.
1012 iph = ipv6_hdr(skb);
1014 iph->nexthdr = IPPROTO_IPV6;
1015 iph->payload_len = old_iph->payload_len;
1016 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
1017 iph->priority = old_iph->priority;
1018 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
1019 iph->daddr = cp->daddr.in6;
1021 iph->hop_limit = old_iph->hop_limit;
1023 /* Another hack: avoid icmp_send in ip_fragment */
1026 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
1027 if (ret == NF_ACCEPT)
1029 else if (ret == NF_DROP)
1037 dst_link_failure(skb);
1043 dst_release(&rt->dst);
1050 * Direct Routing transmitter
1051 * Used for ANY protocol
1054 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1055 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
1057 struct rtable *rt; /* Route to the other host */
1058 struct iphdr *iph = ip_hdr(skb);
1063 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1064 IP_VS_RT_MODE_LOCAL |
1065 IP_VS_RT_MODE_NON_LOCAL |
1066 IP_VS_RT_MODE_KNOWN_NH, NULL)))
1068 if (rt->rt_flags & RTCF_LOCAL) {
1070 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
1074 mtu = dst_mtu(&rt->dst);
1075 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
1077 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
1079 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1084 * Call ip_send_check because we are not sure it is called
1085 * after ip_defrag. Is copy-on-write needed?
1087 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
1091 ip_send_check(ip_hdr(skb));
1093 /* drop old route */
1095 skb_dst_set(skb, &rt->dst);
1097 /* Another hack: avoid icmp_send in ip_fragment */
1100 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
1106 dst_link_failure(skb);
1113 #ifdef CONFIG_IP_VS_IPV6
1115 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1116 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph)
1118 struct rt6_info *rt; /* Route to the other host */
1123 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1124 0, (IP_VS_RT_MODE_LOCAL |
1125 IP_VS_RT_MODE_NON_LOCAL))))
1127 if (__ip_vs_is_local_route6(rt)) {
1128 dst_release(&rt->dst);
1129 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
1133 mtu = dst_mtu(&rt->dst);
1134 if (__mtu_check_toobig_v6(skb, mtu)) {
1136 struct net *net = dev_net(skb_dst(skb)->dev);
1138 skb->dev = net->loopback_dev;
1140 /* only send ICMP too big on first fragment */
1142 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1143 dst_release(&rt->dst);
1144 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1149 * Call ip_send_check because we are not sure it is called
1150 * after ip_defrag. Is copy-on-write needed?
1152 skb = skb_share_check(skb, GFP_ATOMIC);
1153 if (unlikely(skb == NULL)) {
1154 dst_release(&rt->dst);
1158 /* drop old route */
1160 skb_dst_set(skb, &rt->dst);
1162 /* Another hack: avoid icmp_send in ip_fragment */
1165 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
1171 dst_link_failure(skb);
1181 * ICMP packet transmitter
1182 * called by the ip_vs_in_icmp
1185 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1186 struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1187 struct ip_vs_iphdr *iph)
1189 struct rtable *rt; /* Route to the other host */
1197 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1198 forwarded directly here, because there is no need to
1199 translate address/port back */
1200 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1201 if (cp->packet_xmit)
1202 rc = cp->packet_xmit(skb, cp, pp, iph);
1205 /* do not touch skb anymore */
1206 atomic_inc(&cp->in_pkts);
1211 * mangle and send the packet here (only for VS/NAT)
1214 /* LOCALNODE from FORWARD hook is not supported */
1215 rt_mode = (hooknum != NF_INET_FORWARD) ?
1216 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1217 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1218 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1221 local = rt->rt_flags & RTCF_LOCAL;
1224 * Avoid duplicate tuple in reply direction for NAT traffic
1225 * to local address when connection is sync-ed
1227 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1228 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1229 enum ip_conntrack_info ctinfo;
1230 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1232 if (ct && !nf_ct_is_untracked(ct)) {
1233 IP_VS_DBG(10, "%s(): "
1234 "stopping DNAT to local address %pI4\n",
1235 __func__, &cp->daddr.ip);
1241 /* From world but DNAT to loopback address? */
1242 if (local && ipv4_is_loopback(cp->daddr.ip) &&
1243 rt_is_input_route(skb_rtable(skb))) {
1244 IP_VS_DBG(1, "%s(): "
1245 "stopping DNAT to loopback %pI4\n",
1246 __func__, &cp->daddr.ip);
1251 mtu = dst_mtu(&rt->dst);
1252 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
1254 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1255 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1259 /* copy-on-write the packet before mangling it */
1260 if (!skb_make_writable(skb, offset))
1263 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1266 ip_vs_nat_icmp(skb, pp, cp, 0);
1269 /* drop the old route when skb is not shared */
1271 skb_dst_set(skb, &rt->dst);
1275 * Some IPv4 replies get local address from routes,
1276 * not from iph, so while we DNAT after routing
1277 * we need this second input/output route.
1279 if (!__ip_vs_reroute_locally(skb))
1283 /* Another hack: avoid icmp_send in ip_fragment */
1286 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
1290 dst_link_failure(skb);
1302 #ifdef CONFIG_IP_VS_IPV6
1304 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1305 struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1306 struct ip_vs_iphdr *iph)
1308 struct rt6_info *rt; /* Route to the other host */
1316 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1317 forwarded directly here, because there is no need to
1318 translate address/port back */
1319 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1320 if (cp->packet_xmit)
1321 rc = cp->packet_xmit(skb, cp, pp, iph);
1324 /* do not touch skb anymore */
1325 atomic_inc(&cp->in_pkts);
1330 * mangle and send the packet here (only for VS/NAT)
1333 /* LOCALNODE from FORWARD hook is not supported */
1334 rt_mode = (hooknum != NF_INET_FORWARD) ?
1335 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1336 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1337 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1341 local = __ip_vs_is_local_route6(rt);
1343 * Avoid duplicate tuple in reply direction for NAT traffic
1344 * to local address when connection is sync-ed
1346 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1347 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1348 enum ip_conntrack_info ctinfo;
1349 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
1351 if (ct && !nf_ct_is_untracked(ct)) {
1352 IP_VS_DBG(10, "%s(): "
1353 "stopping DNAT to local address %pI6\n",
1354 __func__, &cp->daddr.in6);
1360 /* From world but DNAT to loopback address? */
1361 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
1362 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
1363 IP_VS_DBG(1, "%s(): "
1364 "stopping DNAT to loopback %pI6\n",
1365 __func__, &cp->daddr.in6);
1370 mtu = dst_mtu(&rt->dst);
1371 if (__mtu_check_toobig_v6(skb, mtu)) {
1373 struct net *net = dev_net(skb_dst(skb)->dev);
1375 skb->dev = net->loopback_dev;
1377 /* only send ICMP too big on first fragment */
1379 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1380 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1384 /* copy-on-write the packet before mangling it */
1385 if (!skb_make_writable(skb, offset))
1388 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1391 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1393 if (!local || !skb->dev) {
1394 /* drop the old route when skb is not shared */
1396 skb_dst_set(skb, &rt->dst);
1398 /* destined to loopback, do we need to change route? */
1399 dst_release(&rt->dst);
1402 /* Another hack: avoid icmp_send in ip_fragment */
1405 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
1409 dst_link_failure(skb);
1417 dst_release(&rt->dst);