1 #include <linux/types.h>
2 #include <linux/skbuff.h>
3 #include <linux/socket.h>
4 #include <linux/sysctl.h>
6 #include <linux/module.h>
7 #include <linux/if_arp.h>
8 #include <linux/ipv6.h>
9 #include <linux/mpls.h>
14 #include <net/ip_fib.h>
15 #include <net/netevent.h>
16 #include <net/netns/generic.h>
19 #define MAX_NEW_LABELS 2
21 /* This maximum ha length copied from the definition of struct neighbour */
22 #define MAX_VIA_ALEN (ALIGN(MAX_ADDR_LEN, sizeof(unsigned long)))
24 struct mpls_route { /* next hop label forwarding entry */
25 struct net_device *rt_dev;
26 struct rcu_head rt_rcu;
27 u32 rt_label[MAX_NEW_LABELS];
28 u8 rt_protocol; /* routing protocol that set this entry */
31 unsigned short rt_via_family;
36 static int label_limit = (1 << 20) - 1;
38 static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
40 struct mpls_route *rt = NULL;
42 if (index < net->mpls.platform_labels) {
43 struct mpls_route __rcu **platform_label =
44 rcu_dereference(net->mpls.platform_label);
45 rt = rcu_dereference(platform_label[index]);
50 static bool mpls_output_possible(const struct net_device *dev)
52 return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
55 static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
57 /* The size of the layer 2.5 labels to be added for this route */
58 return rt->rt_labels * sizeof(struct mpls_shim_hdr);
61 static unsigned int mpls_dev_mtu(const struct net_device *dev)
63 /* The amount of data the layer 2 frame can hold */
67 static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
72 if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
78 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
79 struct mpls_entry_decoded dec)
81 /* RFC4385 and RFC5586 encode other packets in mpls such that
82 * they don't conflict with the ip version number, making
83 * decoding by examining the ip version correct in everything
84 * except for the strangest cases.
86 * The strange cases if we choose to support them will require
87 * manual configuration.
89 struct iphdr *hdr4 = ip_hdr(skb);
92 if (hdr4->version == 4) {
93 skb->protocol = htons(ETH_P_IP);
94 csum_replace2(&hdr4->check,
95 htons(hdr4->ttl << 8),
99 else if (hdr4->version == 6) {
100 struct ipv6hdr *hdr6 = ipv6_hdr(skb);
101 skb->protocol = htons(ETH_P_IPV6);
102 hdr6->hop_limit = dec.ttl;
105 /* version 0 and version 1 are used by pseudo wires */
110 static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
111 struct packet_type *pt, struct net_device *orig_dev)
113 struct net *net = dev_net(dev);
114 struct mpls_shim_hdr *hdr;
115 struct mpls_route *rt;
116 struct mpls_entry_decoded dec;
117 struct net_device *out_dev;
119 unsigned int new_header_size;
123 /* Careful this entire function runs inside of an rcu critical section */
125 if (skb->pkt_type != PACKET_HOST)
128 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
131 if (!pskb_may_pull(skb, sizeof(*hdr)))
134 /* Read and decode the label */
136 dec = mpls_entry_decode(hdr);
139 skb_pull(skb, sizeof(*hdr));
140 skb_reset_network_header(skb);
144 rt = mpls_route_input_rcu(net, dec.label);
148 /* Find the output device */
149 out_dev = rt->rt_dev;
150 if (!mpls_output_possible(out_dev))
153 if (skb_warn_if_lro(skb))
156 skb_forward_csum(skb);
158 /* Verify ttl is valid */
163 /* Verify the destination can hold the packet */
164 new_header_size = mpls_rt_header_size(rt);
165 mtu = mpls_dev_mtu(out_dev);
166 if (mpls_pkt_too_big(skb, mtu - new_header_size))
169 hh_len = LL_RESERVED_SPACE(out_dev);
170 if (!out_dev->header_ops)
173 /* Ensure there is enough space for the headers in the skb */
174 if (skb_cow(skb, hh_len + new_header_size))
178 skb->protocol = htons(ETH_P_MPLS_UC);
180 if (unlikely(!new_header_size && dec.bos)) {
181 /* Penultimate hop popping */
182 if (!mpls_egress(rt, skb, dec))
187 skb_push(skb, new_header_size);
188 skb_reset_network_header(skb);
189 /* Push the new labels */
192 for (i = rt->rt_labels - 1; i >= 0; i--) {
193 hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos);
198 err = neigh_xmit(rt->rt_via_family, out_dev, rt->rt_via, skb);
200 net_dbg_ratelimited("%s: packet transmission failed: %d\n",
209 static struct packet_type mpls_packet_type __read_mostly = {
210 .type = cpu_to_be16(ETH_P_MPLS_UC),
211 .func = mpls_forward,
214 static struct mpls_route *mpls_rt_alloc(size_t alen)
216 struct mpls_route *rt;
218 rt = kzalloc(GFP_KERNEL, sizeof(*rt) + alen);
220 rt->rt_via_alen = alen;
224 static void mpls_rt_free(struct mpls_route *rt)
227 kfree_rcu(rt, rt_rcu);
230 static void mpls_route_update(struct net *net, unsigned index,
231 struct net_device *dev, struct mpls_route *new,
232 const struct nl_info *info)
234 struct mpls_route *rt, *old = NULL;
238 rt = net->mpls.platform_label[index];
239 if (!dev || (rt && (rt->rt_dev == dev))) {
240 rcu_assign_pointer(net->mpls.platform_label[index], new);
244 /* If we removed a route free it now */
248 static void mpls_ifdown(struct net_device *dev)
250 struct net *net = dev_net(dev);
253 for (index = 0; index < net->mpls.platform_labels; index++) {
254 struct mpls_route *rt = net->mpls.platform_label[index];
257 if (rt->rt_dev != dev)
263 static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
266 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
269 case NETDEV_UNREGISTER:
276 static struct notifier_block mpls_dev_notifier = {
277 .notifier_call = mpls_dev_notify,
280 static int resize_platform_label_table(struct net *net, size_t limit)
282 size_t size = sizeof(struct mpls_route *) * limit;
285 struct mpls_route __rcu **labels = NULL, **old;
286 struct mpls_route *rt0 = NULL, *rt2 = NULL;
290 labels = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
292 labels = vzalloc(size);
298 /* In case the predefined labels need to be populated */
299 if (limit > LABEL_IPV4_EXPLICIT_NULL) {
300 struct net_device *lo = net->loopback_dev;
301 rt0 = mpls_rt_alloc(lo->addr_len);
305 rt0->rt_protocol = RTPROT_KERNEL;
306 rt0->rt_via_family = AF_PACKET;
307 memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
309 if (limit > LABEL_IPV6_EXPLICIT_NULL) {
310 struct net_device *lo = net->loopback_dev;
311 rt2 = mpls_rt_alloc(lo->addr_len);
315 rt2->rt_protocol = RTPROT_KERNEL;
316 rt2->rt_via_family = AF_PACKET;
317 memcpy(rt2->rt_via, lo->dev_addr, lo->addr_len);
321 /* Remember the original table */
322 old = net->mpls.platform_label;
323 old_limit = net->mpls.platform_labels;
325 /* Free any labels beyond the new table */
326 for (index = limit; index < old_limit; index++)
327 mpls_route_update(net, index, NULL, NULL, NULL);
329 /* Copy over the old labels */
331 if (old_limit < limit)
332 cp_size = old_limit * sizeof(struct mpls_route *);
334 memcpy(labels, old, cp_size);
336 /* If needed set the predefined labels */
337 if ((old_limit <= LABEL_IPV6_EXPLICIT_NULL) &&
338 (limit > LABEL_IPV6_EXPLICIT_NULL)) {
339 labels[LABEL_IPV6_EXPLICIT_NULL] = rt2;
343 if ((old_limit <= LABEL_IPV4_EXPLICIT_NULL) &&
344 (limit > LABEL_IPV4_EXPLICIT_NULL)) {
345 labels[LABEL_IPV4_EXPLICIT_NULL] = rt0;
349 /* Update the global pointers */
350 net->mpls.platform_labels = limit;
351 net->mpls.platform_label = labels;
372 static int mpls_platform_labels(struct ctl_table *table, int write,
373 void __user *buffer, size_t *lenp, loff_t *ppos)
375 struct net *net = table->data;
376 int platform_labels = net->mpls.platform_labels;
378 struct ctl_table tmp = {
379 .procname = table->procname,
380 .data = &platform_labels,
381 .maxlen = sizeof(int),
384 .extra2 = &label_limit,
387 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
389 if (write && ret == 0)
390 ret = resize_platform_label_table(net, platform_labels);
395 static struct ctl_table mpls_table[] = {
397 .procname = "platform_labels",
399 .maxlen = sizeof(int),
401 .proc_handler = mpls_platform_labels,
406 static int mpls_net_init(struct net *net)
408 struct ctl_table *table;
410 net->mpls.platform_labels = 0;
411 net->mpls.platform_label = NULL;
413 table = kmemdup(mpls_table, sizeof(mpls_table), GFP_KERNEL);
418 net->mpls.ctl = register_net_sysctl(net, "net/mpls", table);
419 if (net->mpls.ctl == NULL)
425 static void mpls_net_exit(struct net *net)
427 struct ctl_table *table;
430 table = net->mpls.ctl->ctl_table_arg;
431 unregister_net_sysctl_table(net->mpls.ctl);
434 /* An rcu grace period haselapsed since there was a device in
435 * the network namespace (and thus the last in fqlight packet)
436 * left this network namespace. This is because
437 * unregister_netdevice_many and netdev_run_todo has completed
438 * for each network device that was in this network namespace.
440 * As such no additional rcu synchronization is necessary when
441 * freeing the platform_label table.
444 for (index = 0; index < net->mpls.platform_labels; index++) {
445 struct mpls_route *rt = net->mpls.platform_label[index];
446 rcu_assign_pointer(net->mpls.platform_label[index], NULL);
451 kvfree(net->mpls.platform_label);
454 static struct pernet_operations mpls_net_ops = {
455 .init = mpls_net_init,
456 .exit = mpls_net_exit,
459 static int __init mpls_init(void)
463 BUILD_BUG_ON(sizeof(struct mpls_shim_hdr) != 4);
465 err = register_pernet_subsys(&mpls_net_ops);
469 err = register_netdevice_notifier(&mpls_dev_notifier);
471 goto out_unregister_pernet;
473 dev_add_pack(&mpls_packet_type);
479 out_unregister_pernet:
480 unregister_pernet_subsys(&mpls_net_ops);
483 module_init(mpls_init);
485 static void __exit mpls_exit(void)
487 dev_remove_pack(&mpls_packet_type);
488 unregister_netdevice_notifier(&mpls_dev_notifier);
489 unregister_pernet_subsys(&mpls_net_ops);
491 module_exit(mpls_exit);
493 MODULE_DESCRIPTION("MultiProtocol Label Switching");
494 MODULE_LICENSE("GPL v2");
495 MODULE_ALIAS_NETPROTO(PF_MPLS);