2 * Common framework for low-level network console, dump, and debugger code
4 * Sep 8 2003 Matt Mackall <mpm@selenic.com>
6 * based on the netconsole code from:
8 * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
9 * Copyright (C) 2002 Red Hat, Inc.
12 #include <linux/smp_lock.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/string.h>
16 #include <linux/inetdevice.h>
17 #include <linux/inet.h>
18 #include <linux/interrupt.h>
19 #include <linux/netpoll.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
22 #include <linux/rcupdate.h>
23 #include <linux/workqueue.h>
26 #include <asm/unaligned.h>
29 * We maintain a small pool of fully-sized skbs, to make sure the
30 * message gets out even in extreme OOM situations.
33 #define MAX_UDP_CHUNK 1460
35 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
36 #define MAX_RETRIES 20000
38 static DEFINE_SPINLOCK(skb_list_lock);
40 static struct sk_buff *skbs;
42 static DEFINE_SPINLOCK(queue_lock);
43 static int queue_depth;
44 static struct sk_buff *queue_head, *queue_tail;
46 static atomic_t trapped;
48 #define NETPOLL_RX_ENABLED 1
49 #define NETPOLL_RX_DROP 2
51 #define MAX_SKB_SIZE \
52 (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
53 sizeof(struct iphdr) + sizeof(struct ethhdr))
55 static void zap_completion_queue(void);
57 static void queue_process(void *p)
63 spin_lock_irqsave(&queue_lock, flags);
66 queue_head = skb->next;
67 if (skb == queue_tail)
72 spin_unlock_irqrestore(&queue_lock, flags);
78 static DECLARE_WORK(send_queue, queue_process, NULL);
80 void netpoll_queue(struct sk_buff *skb)
84 if (queue_depth == MAX_QUEUE_DEPTH) {
89 spin_lock_irqsave(&queue_lock, flags);
93 queue_tail->next = skb;
96 spin_unlock_irqrestore(&queue_lock, flags);
98 schedule_work(&send_queue);
101 static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
102 unsigned short ulen, u32 saddr, u32 daddr)
107 if (skb->ip_summed == CHECKSUM_HW)
108 return csum_tcpudp_magic(
109 saddr, daddr, ulen, IPPROTO_UDP, skb->csum);
111 skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
113 return csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
117 * Check whether delayed processing was scheduled for our NIC. If so,
118 * we attempt to grab the poll lock and use ->poll() to pump the card.
119 * If this fails, either we've recursed in ->poll() or it's already
120 * running on another CPU.
122 * Note: we don't mask interrupts with this lock because we're using
123 * trylock here and interrupts are already disabled in the softirq
124 * case. Further, we test the poll_owner to avoid recursion on UP
125 * systems where the lock doesn't exist.
127 * In cases where there is bi-directional communications, reading only
128 * one message at a time can lead to packets being dropped by the
129 * network adapter, forcing superfluous retries and possibly timeouts.
130 * Thus, we set our budget to greater than 1.
132 static void poll_napi(struct netpoll *np)
134 struct netpoll_info *npinfo = np->dev->npinfo;
137 if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) &&
138 npinfo->poll_owner != smp_processor_id() &&
139 spin_trylock(&npinfo->poll_lock)) {
140 npinfo->rx_flags |= NETPOLL_RX_DROP;
141 atomic_inc(&trapped);
143 np->dev->poll(np->dev, &budget);
145 atomic_dec(&trapped);
146 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
147 spin_unlock(&npinfo->poll_lock);
151 void netpoll_poll(struct netpoll *np)
153 if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller)
156 /* Process pending work on NIC */
157 np->dev->poll_controller(np->dev);
161 zap_completion_queue();
164 static void refill_skbs(void)
169 spin_lock_irqsave(&skb_list_lock, flags);
170 while (nr_skbs < MAX_SKBS) {
171 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
179 spin_unlock_irqrestore(&skb_list_lock, flags);
182 static void zap_completion_queue(void)
185 struct softnet_data *sd = &get_cpu_var(softnet_data);
187 if (sd->completion_queue) {
188 struct sk_buff *clist;
190 local_irq_save(flags);
191 clist = sd->completion_queue;
192 sd->completion_queue = NULL;
193 local_irq_restore(flags);
195 while (clist != NULL) {
196 struct sk_buff *skb = clist;
199 dev_kfree_skb_any(skb); /* put this one back */
205 put_cpu_var(softnet_data);
208 static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve)
210 int once = 1, count = 0;
212 struct sk_buff *skb = NULL;
214 zap_completion_queue();
216 if (nr_skbs < MAX_SKBS)
219 skb = alloc_skb(len, GFP_ATOMIC);
222 spin_lock_irqsave(&skb_list_lock, flags);
229 spin_unlock_irqrestore(&skb_list_lock, flags);
234 if (once && (count == 1000000)) {
235 printk("out of netpoll skbs!\n");
242 atomic_set(&skb->users, 1);
243 skb_reserve(skb, reserve);
247 static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
250 struct netpoll_info *npinfo;
252 if (!np || !np->dev || !netif_running(np->dev)) {
257 npinfo = np->dev->npinfo;
259 /* avoid recursion */
260 if (npinfo->poll_owner == smp_processor_id() ||
261 np->dev->xmit_lock_owner == smp_processor_id()) {
271 spin_lock(&np->dev->xmit_lock);
272 np->dev->xmit_lock_owner = smp_processor_id();
275 * network drivers do not expect to be called if the queue is
278 if (netif_queue_stopped(np->dev)) {
279 np->dev->xmit_lock_owner = -1;
280 spin_unlock(&np->dev->xmit_lock);
286 status = np->dev->hard_start_xmit(skb, np->dev);
287 np->dev->xmit_lock_owner = -1;
288 spin_unlock(&np->dev->xmit_lock);
292 npinfo->tries = MAX_RETRIES; /* reset */
299 } while (npinfo->tries > 0);
302 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
304 int total_len, eth_len, ip_len, udp_len;
310 udp_len = len + sizeof(*udph);
311 ip_len = eth_len = udp_len + sizeof(*iph);
312 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
314 skb = find_skb(np, total_len, total_len - len);
318 memcpy(skb->data, msg, len);
321 udph = (struct udphdr *) skb_push(skb, sizeof(*udph));
322 udph->source = htons(np->local_port);
323 udph->dest = htons(np->remote_port);
324 udph->len = htons(udp_len);
327 iph = (struct iphdr *)skb_push(skb, sizeof(*iph));
329 /* iph->version = 4; iph->ihl = 5; */
330 put_unaligned(0x45, (unsigned char *)iph);
332 put_unaligned(htons(ip_len), &(iph->tot_len));
336 iph->protocol = IPPROTO_UDP;
338 put_unaligned(htonl(np->local_ip), &(iph->saddr));
339 put_unaligned(htonl(np->remote_ip), &(iph->daddr));
340 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
342 eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
344 eth->h_proto = htons(ETH_P_IP);
345 memcpy(eth->h_source, np->local_mac, 6);
346 memcpy(eth->h_dest, np->remote_mac, 6);
350 netpoll_send_skb(np, skb);
353 static void arp_reply(struct sk_buff *skb)
355 struct netpoll_info *npinfo = skb->dev->npinfo;
357 unsigned char *arp_ptr;
358 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
360 struct sk_buff *send_skb;
361 struct netpoll *np = NULL;
363 if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
368 /* No arp on this interface */
369 if (skb->dev->flags & IFF_NOARP)
372 if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
373 (2 * skb->dev->addr_len) +
377 skb->h.raw = skb->nh.raw = skb->data;
380 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
381 arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
382 arp->ar_pro != htons(ETH_P_IP) ||
383 arp->ar_op != htons(ARPOP_REQUEST))
386 arp_ptr = (unsigned char *)(arp+1) + skb->dev->addr_len;
387 memcpy(&sip, arp_ptr, 4);
388 arp_ptr += 4 + skb->dev->addr_len;
389 memcpy(&tip, arp_ptr, 4);
391 /* Should we ignore arp? */
392 if (tip != htonl(np->local_ip) || LOOPBACK(tip) || MULTICAST(tip))
395 size = sizeof(struct arphdr) + 2 * (skb->dev->addr_len + 4);
396 send_skb = find_skb(np, size + LL_RESERVED_SPACE(np->dev),
397 LL_RESERVED_SPACE(np->dev));
402 send_skb->nh.raw = send_skb->data;
403 arp = (struct arphdr *) skb_put(send_skb, size);
404 send_skb->dev = skb->dev;
405 send_skb->protocol = htons(ETH_P_ARP);
407 /* Fill the device header for the ARP frame */
409 if (np->dev->hard_header &&
410 np->dev->hard_header(send_skb, skb->dev, ptype,
411 np->remote_mac, np->local_mac,
412 send_skb->len) < 0) {
418 * Fill out the arp protocol part.
420 * we only support ethernet device type,
421 * which (according to RFC 1390) should always equal 1 (Ethernet).
424 arp->ar_hrd = htons(np->dev->type);
425 arp->ar_pro = htons(ETH_P_IP);
426 arp->ar_hln = np->dev->addr_len;
428 arp->ar_op = htons(type);
430 arp_ptr=(unsigned char *)(arp + 1);
431 memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
432 arp_ptr += np->dev->addr_len;
433 memcpy(arp_ptr, &tip, 4);
435 memcpy(arp_ptr, np->remote_mac, np->dev->addr_len);
436 arp_ptr += np->dev->addr_len;
437 memcpy(arp_ptr, &sip, 4);
439 netpoll_send_skb(np, send_skb);
442 int __netpoll_rx(struct sk_buff *skb)
444 int proto, len, ulen;
447 struct netpoll *np = skb->dev->npinfo->rx_np;
451 if (skb->dev->type != ARPHRD_ETHER)
454 /* check if netpoll clients need ARP */
455 if (skb->protocol == __constant_htons(ETH_P_ARP) &&
456 atomic_read(&trapped)) {
461 proto = ntohs(eth_hdr(skb)->h_proto);
462 if (proto != ETH_P_IP)
464 if (skb->pkt_type == PACKET_OTHERHOST)
469 iph = (struct iphdr *)skb->data;
470 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
472 if (iph->ihl < 5 || iph->version != 4)
474 if (!pskb_may_pull(skb, iph->ihl*4))
476 if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
479 len = ntohs(iph->tot_len);
480 if (skb->len < len || len < iph->ihl*4)
483 if (iph->protocol != IPPROTO_UDP)
487 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
488 ulen = ntohs(uh->len);
492 if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr) < 0)
494 if (np->local_ip && np->local_ip != ntohl(iph->daddr))
496 if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
498 if (np->local_port && np->local_port != ntohs(uh->dest))
501 np->rx_hook(np, ntohs(uh->source),
503 ulen - sizeof(struct udphdr));
509 if (atomic_read(&trapped)) {
517 int netpoll_parse_options(struct netpoll *np, char *opt)
519 char *cur=opt, *delim;
522 if ((delim = strchr(cur, '@')) == NULL)
525 np->local_port=simple_strtol(cur, NULL, 10);
529 printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port);
532 if ((delim = strchr(cur, '/')) == NULL)
535 np->local_ip=ntohl(in_aton(cur));
538 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
539 np->name, HIPQUAD(np->local_ip));
544 /* parse out dev name */
545 if ((delim = strchr(cur, ',')) == NULL)
548 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
553 printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name);
557 if ((delim = strchr(cur, '@')) == NULL)
560 np->remote_port=simple_strtol(cur, NULL, 10);
564 printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port);
567 if ((delim = strchr(cur, '/')) == NULL)
570 np->remote_ip=ntohl(in_aton(cur));
573 printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
574 np->name, HIPQUAD(np->remote_ip));
579 if ((delim = strchr(cur, ':')) == NULL)
582 np->remote_mac[0]=simple_strtol(cur, NULL, 16);
584 if ((delim = strchr(cur, ':')) == NULL)
587 np->remote_mac[1]=simple_strtol(cur, NULL, 16);
589 if ((delim = strchr(cur, ':')) == NULL)
592 np->remote_mac[2]=simple_strtol(cur, NULL, 16);
594 if ((delim = strchr(cur, ':')) == NULL)
597 np->remote_mac[3]=simple_strtol(cur, NULL, 16);
599 if ((delim = strchr(cur, ':')) == NULL)
602 np->remote_mac[4]=simple_strtol(cur, NULL, 16);
604 np->remote_mac[5]=simple_strtol(cur, NULL, 16);
607 printk(KERN_INFO "%s: remote ethernet address "
608 "%02x:%02x:%02x:%02x:%02x:%02x\n",
620 printk(KERN_INFO "%s: couldn't parse config at %s!\n",
625 int netpoll_setup(struct netpoll *np)
627 struct net_device *ndev = NULL;
628 struct in_device *in_dev;
629 struct netpoll_info *npinfo;
633 ndev = dev_get_by_name(np->dev_name);
635 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
636 np->name, np->dev_name);
642 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
646 npinfo->rx_flags = 0;
647 npinfo->rx_np = NULL;
648 spin_lock_init(&npinfo->poll_lock);
649 npinfo->poll_owner = -1;
650 npinfo->tries = MAX_RETRIES;
651 spin_lock_init(&npinfo->rx_lock);
653 npinfo = ndev->npinfo;
655 if (!ndev->poll_controller) {
656 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
657 np->name, np->dev_name);
661 if (!netif_running(ndev)) {
662 unsigned long atmost, atleast;
664 printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
665 np->name, np->dev_name);
668 if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) {
669 printk(KERN_ERR "%s: failed to open %s\n",
670 np->name, np->dev_name);
676 atleast = jiffies + HZ/10;
677 atmost = jiffies + 4*HZ;
678 while (!netif_carrier_ok(ndev)) {
679 if (time_after(jiffies, atmost)) {
681 "%s: timeout waiting for carrier\n",
688 /* If carrier appears to come up instantly, we don't
689 * trust it and pause so that we don't pump all our
690 * queued console messages into the bitbucket.
693 if (time_before(jiffies, atleast)) {
694 printk(KERN_NOTICE "%s: carrier detect appears"
695 " untrustworthy, waiting 4 seconds\n",
701 if (!memcmp(np->local_mac, "\0\0\0\0\0\0", 6) && ndev->dev_addr)
702 memcpy(np->local_mac, ndev->dev_addr, 6);
706 in_dev = __in_dev_get(ndev);
708 if (!in_dev || !in_dev->ifa_list) {
710 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
711 np->name, np->dev_name);
715 np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
717 printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
718 np->name, HIPQUAD(np->local_ip));
722 spin_lock_irqsave(&npinfo->rx_lock, flags);
723 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
725 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
728 /* fill up the skb queue */
731 /* last thing to do is link it to the net device structure */
732 ndev->npinfo = npinfo;
734 /* avoid racing with NAPI reading npinfo */
747 void netpoll_cleanup(struct netpoll *np)
749 struct netpoll_info *npinfo;
753 npinfo = np->dev->npinfo;
754 if (npinfo && npinfo->rx_np == np) {
755 spin_lock_irqsave(&npinfo->rx_lock, flags);
756 npinfo->rx_np = NULL;
757 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
758 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
766 int netpoll_trap(void)
768 return atomic_read(&trapped);
771 void netpoll_set_trap(int trap)
774 atomic_inc(&trapped);
776 atomic_dec(&trapped);
779 EXPORT_SYMBOL(netpoll_set_trap);
780 EXPORT_SYMBOL(netpoll_trap);
781 EXPORT_SYMBOL(netpoll_parse_options);
782 EXPORT_SYMBOL(netpoll_setup);
783 EXPORT_SYMBOL(netpoll_cleanup);
784 EXPORT_SYMBOL(netpoll_send_udp);
785 EXPORT_SYMBOL(netpoll_poll);
786 EXPORT_SYMBOL(netpoll_queue);