2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/string.h>
19 #include <linux/if_arp.h>
20 #include <linux/inetdevice.h>
21 #include <linux/inet.h>
22 #include <linux/interrupt.h>
23 #include <linux/netpoll.h>
24 #include <linux/sched.h>
25 #include <linux/delay.h>
26 #include <linux/rcupdate.h>
27 #include <linux/workqueue.h>
28 #include <linux/slab.h>
29 #include <linux/export.h>
30 #include <linux/if_vlan.h>
33 #include <net/addrconf.h>
34 #include <net/ndisc.h>
35 #include <net/ip6_checksum.h>
36 #include <asm/unaligned.h>
37 #include <trace/events/napi.h>
40 * We maintain a small pool of fully-sized skbs, to make sure the
41 * message gets out even in extreme OOM situations.
44 #define MAX_UDP_CHUNK 1460
47 static struct sk_buff_head skb_pool;
49 #ifdef CONFIG_NETPOLL_TRAP
50 static atomic_t trapped;
53 DEFINE_STATIC_SRCU(netpoll_srcu);
55 #define USEC_PER_POLL 50
57 #define MAX_SKB_SIZE \
58 (sizeof(struct ethhdr) + \
59 sizeof(struct iphdr) + \
60 sizeof(struct udphdr) + \
63 static void zap_completion_queue(void);
64 static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
65 static void netpoll_async_cleanup(struct work_struct *work);
67 static unsigned int carrier_timeout = 4;
68 module_param(carrier_timeout, uint, 0644);
70 #define np_info(np, fmt, ...) \
71 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
72 #define np_err(np, fmt, ...) \
73 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
74 #define np_notice(np, fmt, ...) \
75 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
77 static void queue_process(struct work_struct *work)
79 struct netpoll_info *npinfo =
80 container_of(work, struct netpoll_info, tx_work.work);
84 while ((skb = skb_dequeue(&npinfo->txq))) {
85 struct net_device *dev = skb->dev;
86 const struct net_device_ops *ops = dev->netdev_ops;
87 struct netdev_queue *txq;
89 if (!netif_device_present(dev) || !netif_running(dev)) {
94 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
96 local_irq_save(flags);
97 __netif_tx_lock(txq, smp_processor_id());
98 if (netif_xmit_frozen_or_stopped(txq) ||
99 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
100 skb_queue_head(&npinfo->txq, skb);
101 __netif_tx_unlock(txq);
102 local_irq_restore(flags);
104 schedule_delayed_work(&npinfo->tx_work, HZ/10);
107 __netif_tx_unlock(txq);
108 local_irq_restore(flags);
112 static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
113 unsigned short ulen, __be32 saddr, __be32 daddr)
117 if (uh->check == 0 || skb_csum_unnecessary(skb))
120 psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
122 if (skb->ip_summed == CHECKSUM_COMPLETE &&
123 !csum_fold(csum_add(psum, skb->csum)))
128 return __skb_checksum_complete(skb);
132 * Check whether delayed processing was scheduled for our NIC. If so,
133 * we attempt to grab the poll lock and use ->poll() to pump the card.
134 * If this fails, either we've recursed in ->poll() or it's already
135 * running on another CPU.
137 * Note: we don't mask interrupts with this lock because we're using
138 * trylock here and interrupts are already disabled in the softirq
139 * case. Further, we test the poll_owner to avoid recursion on UP
140 * systems where the lock doesn't exist.
142 * In cases where there is bi-directional communications, reading only
143 * one message at a time can lead to packets being dropped by the
144 * network adapter, forcing superfluous retries and possibly timeouts.
145 * Thus, we set our budget to greater than 1.
147 static int poll_one_napi(struct napi_struct *napi, int budget)
151 /* net_rx_action's ->poll() invocations and our's are
152 * synchronized by this test which is only made while
153 * holding the napi->poll_lock.
155 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
158 set_bit(NAPI_STATE_NPSVC, &napi->state);
160 work = napi->poll(napi, budget);
161 WARN_ONCE(work > budget, "%pF exceeded budget in poll\n", napi->poll);
162 trace_napi_poll(napi);
164 clear_bit(NAPI_STATE_NPSVC, &napi->state);
166 return budget - work;
169 static void poll_napi(struct net_device *dev, int budget)
171 struct napi_struct *napi;
173 list_for_each_entry(napi, &dev->napi_list, dev_list) {
174 if (napi->poll_owner != smp_processor_id() &&
175 spin_trylock(&napi->poll_lock)) {
176 budget = poll_one_napi(napi, budget);
177 spin_unlock(&napi->poll_lock);
182 static void service_neigh_queue(struct netpoll_info *npi)
187 while ((skb = skb_dequeue(&npi->neigh_tx)))
188 netpoll_neigh_reply(skb, npi);
192 static void netpoll_poll_dev(struct net_device *dev)
194 const struct net_device_ops *ops;
195 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
196 bool rx_processing = netpoll_rx_processing(ni);
197 int budget = rx_processing? 16 : 0;
199 /* Don't do any rx activity if the dev_lock mutex is held
200 * the dev_open/close paths use this to block netpoll activity
201 * while changing device state
203 if (down_trylock(&ni->dev_lock))
206 if (!netif_running(dev)) {
214 ops = dev->netdev_ops;
215 if (!ops->ndo_poll_controller) {
220 /* Process pending work on NIC */
221 ops->ndo_poll_controller(dev);
223 poll_napi(dev, budget);
230 if (dev->flags & IFF_SLAVE) {
232 struct net_device *bond_dev;
234 struct netpoll_info *bond_ni;
236 bond_dev = netdev_master_upper_dev_get_rcu(dev);
237 bond_ni = rcu_dereference_bh(bond_dev->npinfo);
238 while ((skb = skb_dequeue(&ni->neigh_tx))) {
240 skb_queue_tail(&bond_ni->neigh_tx, skb);
245 service_neigh_queue(ni);
247 zap_completion_queue();
250 void netpoll_rx_disable(struct net_device *dev)
252 struct netpoll_info *ni;
255 idx = srcu_read_lock(&netpoll_srcu);
256 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
259 srcu_read_unlock(&netpoll_srcu, idx);
261 EXPORT_SYMBOL(netpoll_rx_disable);
263 void netpoll_rx_enable(struct net_device *dev)
265 struct netpoll_info *ni;
267 ni = rcu_dereference(dev->npinfo);
272 EXPORT_SYMBOL(netpoll_rx_enable);
274 static void refill_skbs(void)
279 spin_lock_irqsave(&skb_pool.lock, flags);
280 while (skb_pool.qlen < MAX_SKBS) {
281 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
285 __skb_queue_tail(&skb_pool, skb);
287 spin_unlock_irqrestore(&skb_pool.lock, flags);
290 static void zap_completion_queue(void)
293 struct softnet_data *sd = &get_cpu_var(softnet_data);
295 if (sd->completion_queue) {
296 struct sk_buff *clist;
298 local_irq_save(flags);
299 clist = sd->completion_queue;
300 sd->completion_queue = NULL;
301 local_irq_restore(flags);
303 while (clist != NULL) {
304 struct sk_buff *skb = clist;
306 if (skb->destructor) {
307 atomic_inc(&skb->users);
308 dev_kfree_skb_any(skb); /* put this one back */
315 put_cpu_var(softnet_data);
318 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
323 zap_completion_queue();
327 skb = alloc_skb(len, GFP_ATOMIC);
329 skb = skb_dequeue(&skb_pool);
333 netpoll_poll_dev(np->dev);
339 atomic_set(&skb->users, 1);
340 skb_reserve(skb, reserve);
344 static int netpoll_owner_active(struct net_device *dev)
346 struct napi_struct *napi;
348 list_for_each_entry(napi, &dev->napi_list, dev_list) {
349 if (napi->poll_owner == smp_processor_id())
355 /* call with IRQ disabled */
356 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
357 struct net_device *dev)
359 int status = NETDEV_TX_BUSY;
361 const struct net_device_ops *ops = dev->netdev_ops;
362 /* It is up to the caller to keep npinfo alive. */
363 struct netpoll_info *npinfo;
365 WARN_ON_ONCE(!irqs_disabled());
367 npinfo = rcu_dereference_bh(np->dev->npinfo);
368 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
373 /* don't get messages out of order, and no recursion */
374 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
375 struct netdev_queue *txq;
377 txq = netdev_pick_tx(dev, skb, NULL);
379 /* try until next clock tick */
380 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
381 tries > 0; --tries) {
382 if (__netif_tx_trylock(txq)) {
383 if (!netif_xmit_stopped(txq)) {
384 if (vlan_tx_tag_present(skb) &&
385 !vlan_hw_offload_capable(netif_skb_features(skb),
387 skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
388 if (unlikely(!skb)) {
389 /* This is actually a packet drop, but we
390 * don't want the code at the end of this
391 * function to try and re-queue a NULL skb.
393 status = NETDEV_TX_OK;
399 status = ops->ndo_start_xmit(skb, dev);
400 if (status == NETDEV_TX_OK)
401 txq_trans_update(txq);
404 __netif_tx_unlock(txq);
406 if (status == NETDEV_TX_OK)
411 /* tickle device maybe there is some cleanup */
412 netpoll_poll_dev(np->dev);
414 udelay(USEC_PER_POLL);
417 WARN_ONCE(!irqs_disabled(),
418 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
419 dev->name, ops->ndo_start_xmit);
423 if (status != NETDEV_TX_OK) {
424 skb_queue_tail(&npinfo->txq, skb);
425 schedule_delayed_work(&npinfo->tx_work,0);
428 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
430 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
432 int total_len, ip_len, udp_len;
437 static atomic_t ip_ident;
438 struct ipv6hdr *ip6h;
440 udp_len = len + sizeof(*udph);
442 ip_len = udp_len + sizeof(*ip6h);
444 ip_len = udp_len + sizeof(*iph);
446 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
448 skb = find_skb(np, total_len + np->dev->needed_tailroom,
453 skb_copy_to_linear_data(skb, msg, len);
456 skb_push(skb, sizeof(*udph));
457 skb_reset_transport_header(skb);
459 udph->source = htons(np->local_port);
460 udph->dest = htons(np->remote_port);
461 udph->len = htons(udp_len);
465 udph->check = csum_ipv6_magic(&np->local_ip.in6,
467 udp_len, IPPROTO_UDP,
468 csum_partial(udph, udp_len, 0));
469 if (udph->check == 0)
470 udph->check = CSUM_MANGLED_0;
472 skb_push(skb, sizeof(*ip6h));
473 skb_reset_network_header(skb);
474 ip6h = ipv6_hdr(skb);
476 /* ip6h->version = 6; ip6h->priority = 0; */
477 put_unaligned(0x60, (unsigned char *)ip6h);
478 ip6h->flow_lbl[0] = 0;
479 ip6h->flow_lbl[1] = 0;
480 ip6h->flow_lbl[2] = 0;
482 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
483 ip6h->nexthdr = IPPROTO_UDP;
484 ip6h->hop_limit = 32;
485 ip6h->saddr = np->local_ip.in6;
486 ip6h->daddr = np->remote_ip.in6;
488 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
489 skb_reset_mac_header(skb);
490 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
493 udph->check = csum_tcpudp_magic(np->local_ip.ip,
495 udp_len, IPPROTO_UDP,
496 csum_partial(udph, udp_len, 0));
497 if (udph->check == 0)
498 udph->check = CSUM_MANGLED_0;
500 skb_push(skb, sizeof(*iph));
501 skb_reset_network_header(skb);
504 /* iph->version = 4; iph->ihl = 5; */
505 put_unaligned(0x45, (unsigned char *)iph);
507 put_unaligned(htons(ip_len), &(iph->tot_len));
508 iph->id = htons(atomic_inc_return(&ip_ident));
511 iph->protocol = IPPROTO_UDP;
513 put_unaligned(np->local_ip.ip, &(iph->saddr));
514 put_unaligned(np->remote_ip.ip, &(iph->daddr));
515 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
517 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
518 skb_reset_mac_header(skb);
519 skb->protocol = eth->h_proto = htons(ETH_P_IP);
522 ether_addr_copy(eth->h_source, np->dev->dev_addr);
523 ether_addr_copy(eth->h_dest, np->remote_mac);
527 netpoll_send_skb(np, skb);
529 EXPORT_SYMBOL(netpoll_send_udp);
531 static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
533 int size, type = ARPOP_REPLY;
536 struct sk_buff *send_skb;
537 struct netpoll *np, *tmp;
542 if (!netpoll_rx_processing(npinfo))
545 /* Before checking the packet, we do some early
546 inspection whether this is interesting at all */
547 spin_lock_irqsave(&npinfo->rx_lock, flags);
548 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
549 if (np->dev == skb->dev)
552 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
554 /* No netpoll struct is using this dev */
558 proto = ntohs(eth_hdr(skb)->h_proto);
559 if (proto == ETH_P_ARP) {
561 unsigned char *arp_ptr;
562 /* No arp on this interface */
563 if (skb->dev->flags & IFF_NOARP)
566 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
569 skb_reset_network_header(skb);
570 skb_reset_transport_header(skb);
573 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
574 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
575 arp->ar_pro != htons(ETH_P_IP) ||
576 arp->ar_op != htons(ARPOP_REQUEST))
579 arp_ptr = (unsigned char *)(arp+1);
580 /* save the location of the src hw addr */
582 arp_ptr += skb->dev->addr_len;
583 memcpy(&sip, arp_ptr, 4);
585 /* If we actually cared about dst hw addr,
586 it would get copied here */
587 arp_ptr += skb->dev->addr_len;
588 memcpy(&tip, arp_ptr, 4);
590 /* Should we ignore arp? */
591 if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
594 size = arp_hdr_len(skb->dev);
596 spin_lock_irqsave(&npinfo->rx_lock, flags);
597 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
598 if (tip != np->local_ip.ip)
601 hlen = LL_RESERVED_SPACE(np->dev);
602 tlen = np->dev->needed_tailroom;
603 send_skb = find_skb(np, size + hlen + tlen, hlen);
607 skb_reset_network_header(send_skb);
608 arp = (struct arphdr *) skb_put(send_skb, size);
609 send_skb->dev = skb->dev;
610 send_skb->protocol = htons(ETH_P_ARP);
612 /* Fill the device header for the ARP frame */
613 if (dev_hard_header(send_skb, skb->dev, ETH_P_ARP,
614 sha, np->dev->dev_addr,
615 send_skb->len) < 0) {
621 * Fill out the arp protocol part.
623 * we only support ethernet device type,
624 * which (according to RFC 1390) should
625 * always equal 1 (Ethernet).
628 arp->ar_hrd = htons(np->dev->type);
629 arp->ar_pro = htons(ETH_P_IP);
630 arp->ar_hln = np->dev->addr_len;
632 arp->ar_op = htons(type);
634 arp_ptr = (unsigned char *)(arp + 1);
635 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
636 arp_ptr += np->dev->addr_len;
637 memcpy(arp_ptr, &tip, 4);
639 memcpy(arp_ptr, sha, np->dev->addr_len);
640 arp_ptr += np->dev->addr_len;
641 memcpy(arp_ptr, &sip, 4);
643 netpoll_send_skb(np, send_skb);
645 /* If there are several rx_skb_hooks for the same
646 * address we're fine by sending a single reply
650 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
651 } else if( proto == ETH_P_IPV6) {
652 #if IS_ENABLED(CONFIG_IPV6)
656 struct icmp6hdr *icmp6h;
657 const struct in6_addr *saddr;
658 const struct in6_addr *daddr;
659 struct inet6_dev *in6_dev = NULL;
660 struct in6_addr *target;
662 in6_dev = in6_dev_get(skb->dev);
663 if (!in6_dev || !in6_dev->cnf.accept_ra)
666 if (!pskb_may_pull(skb, skb->len))
669 msg = (struct nd_msg *)skb_transport_header(skb);
671 __skb_push(skb, skb->data - skb_transport_header(skb));
673 if (ipv6_hdr(skb)->hop_limit != 255)
675 if (msg->icmph.icmp6_code != 0)
677 if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
680 saddr = &ipv6_hdr(skb)->saddr;
681 daddr = &ipv6_hdr(skb)->daddr;
683 size = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
685 spin_lock_irqsave(&npinfo->rx_lock, flags);
686 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
687 if (!ipv6_addr_equal(daddr, &np->local_ip.in6))
690 hlen = LL_RESERVED_SPACE(np->dev);
691 tlen = np->dev->needed_tailroom;
692 send_skb = find_skb(np, size + hlen + tlen, hlen);
696 send_skb->protocol = htons(ETH_P_IPV6);
697 send_skb->dev = skb->dev;
699 skb_reset_network_header(send_skb);
700 hdr = (struct ipv6hdr *) skb_put(send_skb, sizeof(struct ipv6hdr));
701 *(__be32*)hdr = htonl(0x60000000);
702 hdr->payload_len = htons(size);
703 hdr->nexthdr = IPPROTO_ICMPV6;
704 hdr->hop_limit = 255;
708 icmp6h = (struct icmp6hdr *) skb_put(send_skb, sizeof(struct icmp6hdr));
709 icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
710 icmp6h->icmp6_router = 0;
711 icmp6h->icmp6_solicited = 1;
713 target = (struct in6_addr *) skb_put(send_skb, sizeof(struct in6_addr));
714 *target = msg->target;
715 icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
720 if (dev_hard_header(send_skb, skb->dev, ETH_P_IPV6,
721 lladdr, np->dev->dev_addr,
722 send_skb->len) < 0) {
727 netpoll_send_skb(np, send_skb);
729 /* If there are several rx_skb_hooks for the same
730 * address, we're fine by sending a single reply
734 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
739 static bool pkt_is_ns(struct sk_buff *skb)
744 if (skb->protocol != htons(ETH_P_ARP))
746 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
749 msg = (struct nd_msg *)skb_transport_header(skb);
750 __skb_push(skb, skb->data - skb_transport_header(skb));
753 if (hdr->nexthdr != IPPROTO_ICMPV6)
755 if (hdr->hop_limit != 255)
757 if (msg->icmph.icmp6_code != 0)
759 if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
765 int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
767 int proto, len, ulen, data_len;
768 int hits = 0, offset;
769 const struct iphdr *iph;
771 struct netpoll *np, *tmp;
774 if (!netpoll_rx_processing(npinfo))
777 if (skb->dev->type != ARPHRD_ETHER)
780 /* check if netpoll clients need ARP */
781 if (skb->protocol == htons(ETH_P_ARP) && netpoll_trap()) {
782 skb_queue_tail(&npinfo->neigh_tx, skb);
784 } else if (pkt_is_ns(skb) && netpoll_trap()) {
785 skb_queue_tail(&npinfo->neigh_tx, skb);
789 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
790 skb = vlan_untag(skb);
795 proto = ntohs(eth_hdr(skb)->h_proto);
796 if (proto != ETH_P_IP && proto != ETH_P_IPV6)
798 if (skb->pkt_type == PACKET_OTHERHOST)
803 if (proto == ETH_P_IP) {
804 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
806 iph = (struct iphdr *)skb->data;
807 if (iph->ihl < 5 || iph->version != 4)
809 if (!pskb_may_pull(skb, iph->ihl*4))
811 iph = (struct iphdr *)skb->data;
812 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
815 len = ntohs(iph->tot_len);
816 if (skb->len < len || len < iph->ihl*4)
820 * Our transport medium may have padded the buffer out.
821 * Now We trim to the true length of the frame.
823 if (pskb_trim_rcsum(skb, len))
826 iph = (struct iphdr *)skb->data;
827 if (iph->protocol != IPPROTO_UDP)
831 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
832 offset = (unsigned char *)(uh + 1) - skb->data;
833 ulen = ntohs(uh->len);
834 data_len = skb->len - offset;
835 source = ntohs(uh->source);
839 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
841 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
842 if (np->local_ip.ip && np->local_ip.ip != iph->daddr)
844 if (np->remote_ip.ip && np->remote_ip.ip != iph->saddr)
846 if (np->local_port && np->local_port != ntohs(uh->dest))
849 np->rx_skb_hook(np, source, skb, offset, data_len);
853 #if IS_ENABLED(CONFIG_IPV6)
854 const struct ipv6hdr *ip6h;
856 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
858 ip6h = (struct ipv6hdr *)skb->data;
859 if (ip6h->version != 6)
861 len = ntohs(ip6h->payload_len);
864 if (len + sizeof(struct ipv6hdr) > skb->len)
866 if (pskb_trim_rcsum(skb, len + sizeof(struct ipv6hdr)))
868 ip6h = ipv6_hdr(skb);
869 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
872 offset = (unsigned char *)(uh + 1) - skb->data;
873 ulen = ntohs(uh->len);
874 data_len = skb->len - offset;
875 source = ntohs(uh->source);
876 if (ulen != skb->len)
878 if (udp6_csum_init(skb, uh, IPPROTO_UDP))
880 list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
881 if (!ipv6_addr_equal(&np->local_ip.in6, &ip6h->daddr))
883 if (!ipv6_addr_equal(&np->remote_ip.in6, &ip6h->saddr))
885 if (np->local_port && np->local_port != ntohs(uh->dest))
888 np->rx_skb_hook(np, source, skb, offset, data_len);
901 if (netpoll_trap()) {
909 void netpoll_print_options(struct netpoll *np)
911 np_info(np, "local port %d\n", np->local_port);
913 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
915 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
916 np_info(np, "interface '%s'\n", np->dev_name);
917 np_info(np, "remote port %d\n", np->remote_port);
919 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
921 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
922 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
924 EXPORT_SYMBOL(netpoll_print_options);
926 static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
930 if (!strchr(str, ':') &&
931 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
935 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
936 #if IS_ENABLED(CONFIG_IPV6)
946 int netpoll_parse_options(struct netpoll *np, char *opt)
948 char *cur=opt, *delim;
950 bool ipversion_set = false;
953 if ((delim = strchr(cur, '@')) == NULL)
956 if (kstrtou16(cur, 10, &np->local_port))
963 ipversion_set = true;
964 if ((delim = strchr(cur, '/')) == NULL)
967 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
971 np->ipv6 = (bool)ipv6;
977 /* parse out dev name */
978 if ((delim = strchr(cur, ',')) == NULL)
981 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
988 if ((delim = strchr(cur, '@')) == NULL)
991 if (*cur == ' ' || *cur == '\t')
992 np_info(np, "warning: whitespace is not allowed\n");
993 if (kstrtou16(cur, 10, &np->remote_port))
1000 if ((delim = strchr(cur, '/')) == NULL)
1003 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
1006 else if (ipversion_set && np->ipv6 != (bool)ipv6)
1009 np->ipv6 = (bool)ipv6;
1014 if (!mac_pton(cur, np->remote_mac))
1018 netpoll_print_options(np);
1023 np_info(np, "couldn't parse config at '%s'!\n", cur);
1026 EXPORT_SYMBOL(netpoll_parse_options);
1028 int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1030 struct netpoll_info *npinfo;
1031 const struct net_device_ops *ops;
1032 unsigned long flags;
1036 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
1037 INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
1039 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
1040 !ndev->netdev_ops->ndo_poll_controller) {
1041 np_err(np, "%s doesn't support polling, aborting\n",
1047 if (!ndev->npinfo) {
1048 npinfo = kmalloc(sizeof(*npinfo), gfp);
1054 INIT_LIST_HEAD(&npinfo->rx_np);
1056 spin_lock_init(&npinfo->rx_lock);
1057 sema_init(&npinfo->dev_lock, 1);
1058 skb_queue_head_init(&npinfo->neigh_tx);
1059 skb_queue_head_init(&npinfo->txq);
1060 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
1062 atomic_set(&npinfo->refcnt, 1);
1064 ops = np->dev->netdev_ops;
1065 if (ops->ndo_netpoll_setup) {
1066 err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
1071 npinfo = rtnl_dereference(ndev->npinfo);
1072 atomic_inc(&npinfo->refcnt);
1075 npinfo->netpoll = np;
1077 if (np->rx_skb_hook) {
1078 spin_lock_irqsave(&npinfo->rx_lock, flags);
1079 list_add_tail(&np->rx, &npinfo->rx_np);
1080 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1083 /* last thing to do is link it to the net device structure */
1084 rcu_assign_pointer(ndev->npinfo, npinfo);
1093 EXPORT_SYMBOL_GPL(__netpoll_setup);
1095 int netpoll_setup(struct netpoll *np)
1097 struct net_device *ndev = NULL;
1098 struct in_device *in_dev;
1103 struct net *net = current->nsproxy->net_ns;
1104 ndev = __dev_get_by_name(net, np->dev_name);
1107 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
1113 if (netdev_master_upper_dev_get(ndev)) {
1114 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
1119 if (!netif_running(ndev)) {
1120 unsigned long atmost, atleast;
1122 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
1124 err = dev_open(ndev);
1127 np_err(np, "failed to open %s\n", ndev->name);
1132 atleast = jiffies + HZ/10;
1133 atmost = jiffies + carrier_timeout * HZ;
1134 while (!netif_carrier_ok(ndev)) {
1135 if (time_after(jiffies, atmost)) {
1136 np_notice(np, "timeout waiting for carrier\n");
1142 /* If carrier appears to come up instantly, we don't
1143 * trust it and pause so that we don't pump all our
1144 * queued console messages into the bitbucket.
1147 if (time_before(jiffies, atleast)) {
1148 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
1154 if (!np->local_ip.ip) {
1156 in_dev = __in_dev_get_rtnl(ndev);
1158 if (!in_dev || !in_dev->ifa_list) {
1159 np_err(np, "no IP address for %s, aborting\n",
1161 err = -EDESTADDRREQ;
1165 np->local_ip.ip = in_dev->ifa_list->ifa_local;
1166 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
1168 #if IS_ENABLED(CONFIG_IPV6)
1169 struct inet6_dev *idev;
1171 err = -EDESTADDRREQ;
1172 idev = __in6_dev_get(ndev);
1174 struct inet6_ifaddr *ifp;
1176 read_lock_bh(&idev->lock);
1177 list_for_each_entry(ifp, &idev->addr_list, if_list) {
1178 if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
1180 np->local_ip.in6 = ifp->addr;
1184 read_unlock_bh(&idev->lock);
1187 np_err(np, "no IPv6 address for %s, aborting\n",
1191 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
1193 np_err(np, "IPv6 is not supported %s, aborting\n",
1201 /* fill up the skb queue */
1204 err = __netpoll_setup(np, ndev, GFP_KERNEL);
1217 EXPORT_SYMBOL(netpoll_setup);
1219 static int __init netpoll_init(void)
1221 skb_queue_head_init(&skb_pool);
1224 core_initcall(netpoll_init);
1226 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
1228 struct netpoll_info *npinfo =
1229 container_of(rcu_head, struct netpoll_info, rcu);
1231 skb_queue_purge(&npinfo->neigh_tx);
1232 skb_queue_purge(&npinfo->txq);
1234 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
1235 cancel_delayed_work(&npinfo->tx_work);
1237 /* clean after last, unfinished work */
1238 __skb_queue_purge(&npinfo->txq);
1239 /* now cancel it again */
1240 cancel_delayed_work(&npinfo->tx_work);
1244 void __netpoll_cleanup(struct netpoll *np)
1246 struct netpoll_info *npinfo;
1247 unsigned long flags;
1249 /* rtnl_dereference would be preferable here but
1250 * rcu_cleanup_netpoll path can put us in here safely without
1251 * holding the rtnl, so plain rcu_dereference it is
1253 npinfo = rtnl_dereference(np->dev->npinfo);
1257 if (!list_empty(&npinfo->rx_np)) {
1258 spin_lock_irqsave(&npinfo->rx_lock, flags);
1260 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1263 synchronize_srcu(&netpoll_srcu);
1265 if (atomic_dec_and_test(&npinfo->refcnt)) {
1266 const struct net_device_ops *ops;
1268 ops = np->dev->netdev_ops;
1269 if (ops->ndo_netpoll_cleanup)
1270 ops->ndo_netpoll_cleanup(np->dev);
1272 rcu_assign_pointer(np->dev->npinfo, NULL);
1273 call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
1276 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
1278 static void netpoll_async_cleanup(struct work_struct *work)
1280 struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
1283 __netpoll_cleanup(np);
1288 void __netpoll_free_async(struct netpoll *np)
1290 schedule_work(&np->cleanup_work);
1292 EXPORT_SYMBOL_GPL(__netpoll_free_async);
1294 void netpoll_cleanup(struct netpoll *np)
1299 __netpoll_cleanup(np);
1305 EXPORT_SYMBOL(netpoll_cleanup);
1307 #ifdef CONFIG_NETPOLL_TRAP
1308 int netpoll_trap(void)
1310 return atomic_read(&trapped);
1312 EXPORT_SYMBOL(netpoll_trap);
1314 void netpoll_set_trap(int trap)
1317 atomic_inc(&trapped);
1319 atomic_dec(&trapped);
1321 EXPORT_SYMBOL(netpoll_set_trap);
1322 #endif /* CONFIG_NETPOLL_TRAP */