2 * (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
4 * (C) 2011 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
15 #include <linux/icmp.h>
16 #include <linux/netfilter.h>
17 #include <linux/netfilter_ipv4.h>
18 #include <net/secure_seq.h>
19 #include <net/checksum.h>
20 #include <net/route.h>
23 #include <net/netfilter/nf_conntrack_core.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_nat_core.h>
26 #include <net/netfilter/nf_nat_l3proto.h>
27 #include <net/netfilter/nf_nat_l4proto.h>
29 static const struct nf_nat_l3proto nf_nat_l3proto_ipv4;
32 static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
33 const struct nf_conn *ct,
34 enum ip_conntrack_dir dir,
35 unsigned long statusbit,
38 const struct nf_conntrack_tuple *t = &ct->tuplehash[dir].tuple;
39 struct flowi4 *fl4 = &fl->u.ip4;
41 if (ct->status & statusbit) {
42 fl4->daddr = t->dst.u3.ip;
43 if (t->dst.protonum == IPPROTO_TCP ||
44 t->dst.protonum == IPPROTO_UDP ||
45 t->dst.protonum == IPPROTO_UDPLITE ||
46 t->dst.protonum == IPPROTO_DCCP ||
47 t->dst.protonum == IPPROTO_SCTP)
48 fl4->fl4_dport = t->dst.u.all;
51 statusbit ^= IPS_NAT_MASK;
53 if (ct->status & statusbit) {
54 fl4->saddr = t->src.u3.ip;
55 if (t->dst.protonum == IPPROTO_TCP ||
56 t->dst.protonum == IPPROTO_UDP ||
57 t->dst.protonum == IPPROTO_UDPLITE ||
58 t->dst.protonum == IPPROTO_DCCP ||
59 t->dst.protonum == IPPROTO_SCTP)
60 fl4->fl4_sport = t->src.u.all;
63 #endif /* CONFIG_XFRM */
65 static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t,
66 const struct nf_nat_range *range)
68 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
69 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
72 static u32 nf_nat_ipv4_secure_port(const struct nf_conntrack_tuple *t,
75 return secure_ipv4_port_ephemeral(t->src.u3.ip, t->dst.u3.ip, dport);
78 static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
79 unsigned int iphdroff,
80 const struct nf_nat_l4proto *l4proto,
81 const struct nf_conntrack_tuple *target,
82 enum nf_nat_manip_type maniptype)
87 if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
90 iph = (void *)skb->data + iphdroff;
91 hdroff = iphdroff + iph->ihl * 4;
93 if (!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff, hdroff,
96 iph = (void *)skb->data + iphdroff;
98 if (maniptype == NF_NAT_MANIP_SRC) {
99 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
100 iph->saddr = target->src.u3.ip;
102 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
103 iph->daddr = target->dst.u3.ip;
108 static void nf_nat_ipv4_csum_update(struct sk_buff *skb,
109 unsigned int iphdroff, __sum16 *check,
110 const struct nf_conntrack_tuple *t,
111 enum nf_nat_manip_type maniptype)
113 struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff);
116 if (maniptype == NF_NAT_MANIP_SRC) {
118 newip = t->src.u3.ip;
121 newip = t->dst.u3.ip;
123 inet_proto_csum_replace4(check, skb, oldip, newip, true);
126 static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
127 u8 proto, void *data, __sum16 *check,
128 int datalen, int oldlen)
130 const struct iphdr *iph = ip_hdr(skb);
131 struct rtable *rt = skb_rtable(skb);
133 if (skb->ip_summed != CHECKSUM_PARTIAL) {
134 if (!(rt->rt_flags & RTCF_LOCAL) &&
135 (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
136 skb->ip_summed = CHECKSUM_PARTIAL;
137 skb->csum_start = skb_headroom(skb) +
138 skb_network_offset(skb) +
140 skb->csum_offset = (void *)check - data;
141 *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
145 *check = csum_tcpudp_magic(iph->saddr, iph->daddr,
147 csum_partial(data, datalen,
149 if (proto == IPPROTO_UDP && !*check)
150 *check = CSUM_MANGLED_0;
153 inet_proto_csum_replace2(check, skb,
154 htons(oldlen), htons(datalen), true);
157 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
158 static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
159 struct nf_nat_range *range)
161 if (tb[CTA_NAT_V4_MINIP]) {
162 range->min_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MINIP]);
163 range->flags |= NF_NAT_RANGE_MAP_IPS;
166 if (tb[CTA_NAT_V4_MAXIP])
167 range->max_addr.ip = nla_get_be32(tb[CTA_NAT_V4_MAXIP]);
169 range->max_addr.ip = range->min_addr.ip;
175 static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
176 .l3proto = NFPROTO_IPV4,
177 .in_range = nf_nat_ipv4_in_range,
178 .secure_port = nf_nat_ipv4_secure_port,
179 .manip_pkt = nf_nat_ipv4_manip_pkt,
180 .csum_update = nf_nat_ipv4_csum_update,
181 .csum_recalc = nf_nat_ipv4_csum_recalc,
182 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
183 .nlattr_to_range = nf_nat_ipv4_nlattr_to_range,
186 .decode_session = nf_nat_ipv4_decode_session,
190 int nf_nat_icmp_reply_translation(struct sk_buff *skb,
192 enum ip_conntrack_info ctinfo,
193 unsigned int hooknum)
199 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
200 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
201 unsigned int hdrlen = ip_hdrlen(skb);
202 const struct nf_nat_l4proto *l4proto;
203 struct nf_conntrack_tuple target;
204 unsigned long statusbit;
206 NF_CT_ASSERT(ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY);
208 if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
210 if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
213 inside = (void *)skb->data + hdrlen;
214 if (inside->icmp.type == ICMP_REDIRECT) {
215 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
217 if (ct->status & IPS_NAT_MASK)
221 if (manip == NF_NAT_MANIP_SRC)
222 statusbit = IPS_SRC_NAT;
224 statusbit = IPS_DST_NAT;
226 /* Invert if this is reply direction */
227 if (dir == IP_CT_DIR_REPLY)
228 statusbit ^= IPS_NAT_MASK;
230 if (!(ct->status & statusbit))
233 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, inside->ip.protocol);
234 if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
235 l4proto, &ct->tuplehash[!dir].tuple, !manip))
238 if (skb->ip_summed != CHECKSUM_PARTIAL) {
239 /* Reloading "inside" here since manip_pkt may reallocate */
240 inside = (void *)skb->data + hdrlen;
241 inside->icmp.checksum = 0;
242 inside->icmp.checksum =
243 csum_fold(skb_checksum(skb, hdrlen,
244 skb->len - hdrlen, 0));
247 /* Change outer to look like the reply to an incoming packet */
248 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
249 l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, 0);
250 if (!nf_nat_ipv4_manip_pkt(skb, 0, l4proto, &target, manip))
255 EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
258 nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
259 const struct nf_hook_state *state,
260 unsigned int (*do_chain)(void *priv,
262 const struct nf_hook_state *state,
266 enum ip_conntrack_info ctinfo;
267 struct nf_conn_nat *nat;
268 /* maniptype == SRC for postrouting. */
269 enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
271 /* We never see fragments: conntrack defrags on pre-routing
272 * and local-out, and nf_nat_out protects post-routing.
274 NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
276 ct = nf_ct_get(skb, &ctinfo);
277 /* Can't track? It's not due to stress, or conntrack would
278 * have dropped it. Hence it's the user's responsibilty to
279 * packet filter it out, or implement conntrack/NAT for that
285 /* Don't try to NAT if this packet is not conntracked */
286 if (nf_ct_is_untracked(ct))
289 nat = nf_ct_nat_ext_add(ct);
295 case IP_CT_RELATED_REPLY:
296 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
297 if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
303 /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
305 /* Seen it before? This can happen for loopback, retrans,
308 if (!nf_nat_initialized(ct, maniptype)) {
311 ret = do_chain(priv, skb, state, ct);
312 if (ret != NF_ACCEPT)
315 if (nf_nat_initialized(ct, HOOK2MANIP(state->hook)))
318 ret = nf_nat_alloc_null_binding(ct, state->hook);
319 if (ret != NF_ACCEPT)
322 pr_debug("Already setup manip %s for ct %p\n",
323 maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
325 if (nf_nat_oif_changed(state->hook, ctinfo, nat,
333 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
334 ctinfo == IP_CT_ESTABLISHED_REPLY);
335 if (nf_nat_oif_changed(state->hook, ctinfo, nat, state->out))
339 return nf_nat_packet(ct, ctinfo, state->hook, skb);
342 nf_ct_kill_acct(ct, ctinfo, skb);
345 EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
348 nf_nat_ipv4_in(void *priv, struct sk_buff *skb,
349 const struct nf_hook_state *state,
350 unsigned int (*do_chain)(void *priv,
352 const struct nf_hook_state *state,
356 __be32 daddr = ip_hdr(skb)->daddr;
358 ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
359 if (ret != NF_DROP && ret != NF_STOLEN &&
360 daddr != ip_hdr(skb)->daddr)
365 EXPORT_SYMBOL_GPL(nf_nat_ipv4_in);
368 nf_nat_ipv4_out(void *priv, struct sk_buff *skb,
369 const struct nf_hook_state *state,
370 unsigned int (*do_chain)(void *priv,
372 const struct nf_hook_state *state,
376 const struct nf_conn *ct;
377 enum ip_conntrack_info ctinfo;
382 /* root is playing with raw sockets. */
383 if (skb->len < sizeof(struct iphdr) ||
384 ip_hdrlen(skb) < sizeof(struct iphdr))
387 ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
389 if (ret != NF_DROP && ret != NF_STOLEN &&
390 !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
391 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
392 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
394 if ((ct->tuplehash[dir].tuple.src.u3.ip !=
395 ct->tuplehash[!dir].tuple.dst.u3.ip) ||
396 (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
397 ct->tuplehash[dir].tuple.src.u.all !=
398 ct->tuplehash[!dir].tuple.dst.u.all)) {
399 err = nf_xfrm_me_harder(state->net, skb, AF_INET);
401 ret = NF_DROP_ERR(err);
407 EXPORT_SYMBOL_GPL(nf_nat_ipv4_out);
410 nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
411 const struct nf_hook_state *state,
412 unsigned int (*do_chain)(void *priv,
414 const struct nf_hook_state *state,
417 const struct nf_conn *ct;
418 enum ip_conntrack_info ctinfo;
422 /* root is playing with raw sockets. */
423 if (skb->len < sizeof(struct iphdr) ||
424 ip_hdrlen(skb) < sizeof(struct iphdr))
427 ret = nf_nat_ipv4_fn(priv, skb, state, do_chain);
428 if (ret != NF_DROP && ret != NF_STOLEN &&
429 (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
430 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
432 if (ct->tuplehash[dir].tuple.dst.u3.ip !=
433 ct->tuplehash[!dir].tuple.src.u3.ip) {
434 err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
436 ret = NF_DROP_ERR(err);
439 else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
440 ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
441 ct->tuplehash[dir].tuple.dst.u.all !=
442 ct->tuplehash[!dir].tuple.src.u.all) {
443 err = nf_xfrm_me_harder(state->net, skb, AF_INET);
445 ret = NF_DROP_ERR(err);
451 EXPORT_SYMBOL_GPL(nf_nat_ipv4_local_fn);
453 static int __init nf_nat_l3proto_ipv4_init(void)
457 err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
460 err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv4);
466 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
471 static void __exit nf_nat_l3proto_ipv4_exit(void)
473 nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv4);
474 nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
477 MODULE_LICENSE("GPL");
478 MODULE_ALIAS("nf-nat-" __stringify(AF_INET));
480 module_init(nf_nat_l3proto_ipv4_init);
481 module_exit(nf_nat_l3proto_ipv4_exit);