2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
43 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * Copyright (C) 2011, <lokec@ccs.neu.edu>
48 * This program is free software; you can redistribute it and/or
49 * modify it under the terms of the GNU General Public License
50 * as published by the Free Software Foundation; either version
51 * 2 of the License, or (at your option) any later version.
55 #include <linux/types.h>
57 #include <linux/capability.h>
58 #include <linux/fcntl.h>
59 #include <linux/socket.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/if_packet.h>
64 #include <linux/wireless.h>
65 #include <linux/kernel.h>
66 #include <linux/kmod.h>
67 #include <linux/slab.h>
68 #include <linux/vmalloc.h>
69 #include <net/net_namespace.h>
71 #include <net/protocol.h>
72 #include <linux/skbuff.h>
74 #include <linux/errno.h>
75 #include <linux/timer.h>
76 #include <asm/uaccess.h>
77 #include <asm/ioctls.h>
79 #include <asm/cacheflush.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/poll.h>
84 #include <linux/module.h>
85 #include <linux/init.h>
86 #include <linux/mutex.h>
87 #include <linux/if_vlan.h>
88 #include <linux/virtio_net.h>
89 #include <linux/errqueue.h>
90 #include <linux/net_tstamp.h>
93 #include <net/inet_common.h>
100 - if device has no dev->hard_header routine, it adds and removes ll header
101 inside itself. In this case ll header is invisible outside of device,
102 but higher levels still should reserve dev->hard_header_len.
103 Some devices are enough clever to reallocate skb, when header
104 will not fit to reserved space (tunnel), another ones are silly
106 - packet socket receives packets with pulled ll header,
107 so that SOCK_RAW should push it back.
112 Incoming, dev->hard_header!=NULL
113 mac_header -> ll header
116 Outgoing, dev->hard_header!=NULL
117 mac_header -> ll header
120 Incoming, dev->hard_header==NULL
121 mac_header -> UNKNOWN position. It is very likely, that it points to ll
122 header. PPP makes it, that is wrong, because introduce
123 assymetry between rx and tx paths.
126 Outgoing, dev->hard_header==NULL
127 mac_header -> data. ll header is still not built!
131 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
137 dev->hard_header != NULL
138 mac_header -> ll header
141 dev->hard_header == NULL (ll header is added by device, we cannot control it)
145 We should set nh.raw on output to correct posistion,
146 packet classifier depends on it.
149 /* Private packet socket structures. */
151 /* identical to struct packet_mreq except it has
152 * a longer address field.
154 struct packet_mreq_max {
156 unsigned short mr_type;
157 unsigned short mr_alen;
158 unsigned char mr_address[MAX_ADDR_LEN];
161 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
162 int closing, int tx_ring);
165 #define V3_ALIGNMENT (8)
167 #define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
169 #define BLK_PLUS_PRIV(sz_of_priv) \
170 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
172 #define PGV_FROM_VMALLOC 1
174 #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
175 #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
176 #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
177 #define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
178 #define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
179 #define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
180 #define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
183 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
184 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
185 struct packet_type *pt, struct net_device *orig_dev);
187 static void *packet_previous_frame(struct packet_sock *po,
188 struct packet_ring_buffer *rb,
190 static void packet_increment_head(struct packet_ring_buffer *buff);
191 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
192 struct tpacket_block_desc *);
193 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
194 struct packet_sock *);
195 static void prb_retire_current_block(struct tpacket_kbdq_core *,
196 struct packet_sock *, unsigned int status);
197 static int prb_queue_frozen(struct tpacket_kbdq_core *);
198 static void prb_open_block(struct tpacket_kbdq_core *,
199 struct tpacket_block_desc *);
200 static void prb_retire_rx_blk_timer_expired(unsigned long);
201 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
202 static void prb_init_blk_timer(struct packet_sock *,
203 struct tpacket_kbdq_core *,
204 void (*func) (unsigned long));
205 static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
206 static void prb_clear_rxhash(struct tpacket_kbdq_core *,
207 struct tpacket3_hdr *);
208 static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
209 struct tpacket3_hdr *);
210 static void packet_flush_mclist(struct sock *sk);
212 struct packet_skb_cb {
213 unsigned int origlen;
215 struct sockaddr_pkt pkt;
216 struct sockaddr_ll ll;
220 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
222 #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
223 #define GET_PBLOCK_DESC(x, bid) \
224 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
225 #define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
226 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
227 #define GET_NEXT_PRB_BLK_NUM(x) \
228 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
229 ((x)->kactive_blk_num+1) : 0)
231 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
232 static void __fanout_link(struct sock *sk, struct packet_sock *po);
234 /* register_prot_hook must be invoked with the po->bind_lock held,
235 * or from a context in which asynchronous accesses to the packet
236 * socket is not possible (packet_create()).
238 static void register_prot_hook(struct sock *sk)
240 struct packet_sock *po = pkt_sk(sk);
243 __fanout_link(sk, po);
245 dev_add_pack(&po->prot_hook);
251 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
252 * held. If the sync parameter is true, we will temporarily drop
253 * the po->bind_lock and do a synchronize_net to make sure no
254 * asynchronous packet processing paths still refer to the elements
255 * of po->prot_hook. If the sync parameter is false, it is the
256 * callers responsibility to take care of this.
258 static void __unregister_prot_hook(struct sock *sk, bool sync)
260 struct packet_sock *po = pkt_sk(sk);
264 __fanout_unlink(sk, po);
266 __dev_remove_pack(&po->prot_hook);
270 spin_unlock(&po->bind_lock);
272 spin_lock(&po->bind_lock);
276 static void unregister_prot_hook(struct sock *sk, bool sync)
278 struct packet_sock *po = pkt_sk(sk);
281 __unregister_prot_hook(sk, sync);
284 static inline __pure struct page *pgv_to_page(void *addr)
286 if (is_vmalloc_addr(addr))
287 return vmalloc_to_page(addr);
288 return virt_to_page(addr);
291 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
294 struct tpacket_hdr *h1;
295 struct tpacket2_hdr *h2;
300 switch (po->tp_version) {
302 h.h1->tp_status = status;
303 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
306 h.h2->tp_status = status;
307 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
311 WARN(1, "TPACKET version not supported.\n");
318 static int __packet_get_status(struct packet_sock *po, void *frame)
321 struct tpacket_hdr *h1;
322 struct tpacket2_hdr *h2;
329 switch (po->tp_version) {
331 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
332 return h.h1->tp_status;
334 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
335 return h.h2->tp_status;
338 WARN(1, "TPACKET version not supported.\n");
344 static void *packet_lookup_frame(struct packet_sock *po,
345 struct packet_ring_buffer *rb,
346 unsigned int position,
349 unsigned int pg_vec_pos, frame_offset;
351 struct tpacket_hdr *h1;
352 struct tpacket2_hdr *h2;
356 pg_vec_pos = position / rb->frames_per_block;
357 frame_offset = position % rb->frames_per_block;
359 h.raw = rb->pg_vec[pg_vec_pos].buffer +
360 (frame_offset * rb->frame_size);
362 if (status != __packet_get_status(po, h.raw))
368 static void *packet_current_frame(struct packet_sock *po,
369 struct packet_ring_buffer *rb,
372 return packet_lookup_frame(po, rb, rb->head, status);
375 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
377 del_timer_sync(&pkc->retire_blk_timer);
380 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
382 struct sk_buff_head *rb_queue)
384 struct tpacket_kbdq_core *pkc;
386 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
388 spin_lock(&rb_queue->lock);
389 pkc->delete_blk_timer = 1;
390 spin_unlock(&rb_queue->lock);
392 prb_del_retire_blk_timer(pkc);
395 static void prb_init_blk_timer(struct packet_sock *po,
396 struct tpacket_kbdq_core *pkc,
397 void (*func) (unsigned long))
399 init_timer(&pkc->retire_blk_timer);
400 pkc->retire_blk_timer.data = (long)po;
401 pkc->retire_blk_timer.function = func;
402 pkc->retire_blk_timer.expires = jiffies;
405 static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
407 struct tpacket_kbdq_core *pkc;
412 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
413 prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
416 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
417 int blk_size_in_bytes)
419 struct net_device *dev;
420 unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
421 struct ethtool_cmd ecmd;
426 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
427 if (unlikely(!dev)) {
429 return DEFAULT_PRB_RETIRE_TOV;
431 err = __ethtool_get_settings(dev, &ecmd);
432 speed = ethtool_cmd_speed(&ecmd);
436 * If the link speed is so slow you don't really
437 * need to worry about perf anyways
439 if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
440 return DEFAULT_PRB_RETIRE_TOV;
447 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
459 static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
460 union tpacket_req_u *req_u)
462 p1->feature_req_word = req_u->req3.tp_feature_req_word;
465 static void init_prb_bdqc(struct packet_sock *po,
466 struct packet_ring_buffer *rb,
468 union tpacket_req_u *req_u, int tx_ring)
470 struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
471 struct tpacket_block_desc *pbd;
473 memset(p1, 0x0, sizeof(*p1));
475 p1->knxt_seq_num = 1;
477 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
478 p1->pkblk_start = pg_vec[0].buffer;
479 p1->kblk_size = req_u->req3.tp_block_size;
480 p1->knum_blocks = req_u->req3.tp_block_nr;
481 p1->hdrlen = po->tp_hdrlen;
482 p1->version = po->tp_version;
483 p1->last_kactive_blk_num = 0;
484 po->stats_u.stats3.tp_freeze_q_cnt = 0;
485 if (req_u->req3.tp_retire_blk_tov)
486 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
488 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
489 req_u->req3.tp_block_size);
490 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
491 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
493 prb_init_ft_ops(p1, req_u);
494 prb_setup_retire_blk_timer(po, tx_ring);
495 prb_open_block(p1, pbd);
498 /* Do NOT update the last_blk_num first.
499 * Assumes sk_buff_head lock is held.
501 static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
503 mod_timer(&pkc->retire_blk_timer,
504 jiffies + pkc->tov_in_jiffies);
505 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
510 * 1) We refresh the timer only when we open a block.
511 * By doing this we don't waste cycles refreshing the timer
512 * on packet-by-packet basis.
514 * With a 1MB block-size, on a 1Gbps line, it will take
515 * i) ~8 ms to fill a block + ii) memcpy etc.
516 * In this cut we are not accounting for the memcpy time.
518 * So, if the user sets the 'tmo' to 10ms then the timer
519 * will never fire while the block is still getting filled
520 * (which is what we want). However, the user could choose
521 * to close a block early and that's fine.
523 * But when the timer does fire, we check whether or not to refresh it.
524 * Since the tmo granularity is in msecs, it is not too expensive
525 * to refresh the timer, lets say every '8' msecs.
526 * Either the user can set the 'tmo' or we can derive it based on
527 * a) line-speed and b) block-size.
528 * prb_calc_retire_blk_tmo() calculates the tmo.
531 static void prb_retire_rx_blk_timer_expired(unsigned long data)
533 struct packet_sock *po = (struct packet_sock *)data;
534 struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
536 struct tpacket_block_desc *pbd;
538 spin_lock(&po->sk.sk_receive_queue.lock);
540 frozen = prb_queue_frozen(pkc);
541 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
543 if (unlikely(pkc->delete_blk_timer))
546 /* We only need to plug the race when the block is partially filled.
548 * lock(); increment BLOCK_NUM_PKTS; unlock()
549 * copy_bits() is in progress ...
550 * timer fires on other cpu:
551 * we can't retire the current block because copy_bits
555 if (BLOCK_NUM_PKTS(pbd)) {
556 while (atomic_read(&pkc->blk_fill_in_prog)) {
557 /* Waiting for skb_copy_bits to finish... */
562 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
564 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
565 if (!prb_dispatch_next_block(pkc, po))
570 /* Case 1. Queue was frozen because user-space was
573 if (prb_curr_blk_in_use(pkc, pbd)) {
575 * Ok, user-space is still behind.
576 * So just refresh the timer.
580 /* Case 2. queue was frozen,user-space caught up,
581 * now the link went idle && the timer fired.
582 * We don't have a block to close.So we open this
583 * block and restart the timer.
584 * opening a block thaws the queue,restarts timer
585 * Thawing/timer-refresh is a side effect.
587 prb_open_block(pkc, pbd);
594 _prb_refresh_rx_retire_blk_timer(pkc);
597 spin_unlock(&po->sk.sk_receive_queue.lock);
600 static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
601 struct tpacket_block_desc *pbd1, __u32 status)
603 /* Flush everything minus the block header */
605 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
610 /* Skip the block header(we know header WILL fit in 4K) */
613 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
614 for (; start < end; start += PAGE_SIZE)
615 flush_dcache_page(pgv_to_page(start));
620 /* Now update the block status. */
622 BLOCK_STATUS(pbd1) = status;
624 /* Flush the block header */
626 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
628 flush_dcache_page(pgv_to_page(start));
638 * 2) Increment active_blk_num
640 * Note:We DONT refresh the timer on purpose.
641 * Because almost always the next block will be opened.
643 static void prb_close_block(struct tpacket_kbdq_core *pkc1,
644 struct tpacket_block_desc *pbd1,
645 struct packet_sock *po, unsigned int stat)
647 __u32 status = TP_STATUS_USER | stat;
649 struct tpacket3_hdr *last_pkt;
650 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
652 if (po->stats.tp_drops)
653 status |= TP_STATUS_LOSING;
655 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
656 last_pkt->tp_next_offset = 0;
658 /* Get the ts of the last pkt */
659 if (BLOCK_NUM_PKTS(pbd1)) {
660 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
661 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
663 /* Ok, we tmo'd - so get the current time */
666 h1->ts_last_pkt.ts_sec = ts.tv_sec;
667 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
672 /* Flush the block */
673 prb_flush_block(pkc1, pbd1, status);
675 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
678 static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
680 pkc->reset_pending_on_curr_blk = 0;
684 * Side effect of opening a block:
686 * 1) prb_queue is thawed.
687 * 2) retire_blk_timer is refreshed.
690 static void prb_open_block(struct tpacket_kbdq_core *pkc1,
691 struct tpacket_block_desc *pbd1)
694 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
698 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
700 /* We could have just memset this but we will lose the
701 * flexibility of making the priv area sticky
703 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
704 BLOCK_NUM_PKTS(pbd1) = 0;
705 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
707 h1->ts_first_pkt.ts_sec = ts.tv_sec;
708 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
709 pkc1->pkblk_start = (char *)pbd1;
710 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
711 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
712 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
713 pbd1->version = pkc1->version;
714 pkc1->prev = pkc1->nxt_offset;
715 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
716 prb_thaw_queue(pkc1);
717 _prb_refresh_rx_retire_blk_timer(pkc1);
724 WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
725 pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
731 * Queue freeze logic:
732 * 1) Assume tp_block_nr = 8 blocks.
733 * 2) At time 't0', user opens Rx ring.
734 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
735 * 4) user-space is either sleeping or processing block '0'.
736 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
737 * it will close block-7,loop around and try to fill block '0'.
739 * __packet_lookup_frame_in_block
740 * prb_retire_current_block()
741 * prb_dispatch_next_block()
742 * |->(BLOCK_STATUS == USER) evaluates to true
743 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
744 * 6) Now there are two cases:
745 * 6.1) Link goes idle right after the queue is frozen.
746 * But remember, the last open_block() refreshed the timer.
747 * When this timer expires,it will refresh itself so that we can
748 * re-open block-0 in near future.
749 * 6.2) Link is busy and keeps on receiving packets. This is a simple
750 * case and __packet_lookup_frame_in_block will check if block-0
751 * is free and can now be re-used.
753 static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
754 struct packet_sock *po)
756 pkc->reset_pending_on_curr_blk = 1;
757 po->stats_u.stats3.tp_freeze_q_cnt++;
760 #define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
763 * If the next block is free then we will dispatch it
764 * and return a good offset.
765 * Else, we will freeze the queue.
766 * So, caller must check the return value.
768 static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
769 struct packet_sock *po)
771 struct tpacket_block_desc *pbd;
775 /* 1. Get current block num */
776 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
778 /* 2. If this block is currently in_use then freeze the queue */
779 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
780 prb_freeze_queue(pkc, po);
786 * open this block and return the offset where the first packet
787 * needs to get stored.
789 prb_open_block(pkc, pbd);
790 return (void *)pkc->nxt_offset;
793 static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
794 struct packet_sock *po, unsigned int status)
796 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
798 /* retire/close the current block */
799 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
801 * Plug the case where copy_bits() is in progress on
802 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
803 * have space to copy the pkt in the current block and
804 * called prb_retire_current_block()
806 * We don't need to worry about the TMO case because
807 * the timer-handler already handled this case.
809 if (!(status & TP_STATUS_BLK_TMO)) {
810 while (atomic_read(&pkc->blk_fill_in_prog)) {
811 /* Waiting for skb_copy_bits to finish... */
815 prb_close_block(pkc, pbd, po, status);
819 WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
824 static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
825 struct tpacket_block_desc *pbd)
827 return TP_STATUS_USER & BLOCK_STATUS(pbd);
830 static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
832 return pkc->reset_pending_on_curr_blk;
835 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
837 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
838 atomic_dec(&pkc->blk_fill_in_prog);
841 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
842 struct tpacket3_hdr *ppd)
844 ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
847 static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
848 struct tpacket3_hdr *ppd)
850 ppd->hv1.tp_rxhash = 0;
853 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
854 struct tpacket3_hdr *ppd)
856 if (vlan_tx_tag_present(pkc->skb)) {
857 ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
858 ppd->tp_status = TP_STATUS_VLAN_VALID;
860 ppd->hv1.tp_vlan_tci = 0;
861 ppd->tp_status = TP_STATUS_AVAILABLE;
865 static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
866 struct tpacket3_hdr *ppd)
868 prb_fill_vlan_info(pkc, ppd);
870 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
871 prb_fill_rxhash(pkc, ppd);
873 prb_clear_rxhash(pkc, ppd);
876 static void prb_fill_curr_block(char *curr,
877 struct tpacket_kbdq_core *pkc,
878 struct tpacket_block_desc *pbd,
881 struct tpacket3_hdr *ppd;
883 ppd = (struct tpacket3_hdr *)curr;
884 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
886 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
887 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
888 BLOCK_NUM_PKTS(pbd) += 1;
889 atomic_inc(&pkc->blk_fill_in_prog);
890 prb_run_all_ft_ops(pkc, ppd);
893 /* Assumes caller has the sk->rx_queue.lock */
894 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
900 struct tpacket_kbdq_core *pkc;
901 struct tpacket_block_desc *pbd;
904 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
905 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
907 /* Queue is frozen when user space is lagging behind */
908 if (prb_queue_frozen(pkc)) {
910 * Check if that last block which caused the queue to freeze,
911 * is still in_use by user-space.
913 if (prb_curr_blk_in_use(pkc, pbd)) {
914 /* Can't record this packet */
918 * Ok, the block was released by user-space.
919 * Now let's open that block.
920 * opening a block also thaws the queue.
921 * Thawing is a side effect.
923 prb_open_block(pkc, pbd);
928 curr = pkc->nxt_offset;
930 end = (char *)pbd + pkc->kblk_size;
932 /* first try the current block */
933 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
934 prb_fill_curr_block(curr, pkc, pbd, len);
938 /* Ok, close the current block */
939 prb_retire_current_block(pkc, po, 0);
941 /* Now, try to dispatch the next block */
942 curr = (char *)prb_dispatch_next_block(pkc, po);
944 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
945 prb_fill_curr_block(curr, pkc, pbd, len);
950 * No free blocks are available.user_space hasn't caught up yet.
951 * Queue was just frozen and now this packet will get dropped.
956 static void *packet_current_rx_frame(struct packet_sock *po,
958 int status, unsigned int len)
961 switch (po->tp_version) {
964 curr = packet_lookup_frame(po, &po->rx_ring,
965 po->rx_ring.head, status);
968 return __packet_lookup_frame_in_block(po, skb, status, len);
970 WARN(1, "TPACKET version not supported\n");
976 static void *prb_lookup_block(struct packet_sock *po,
977 struct packet_ring_buffer *rb,
981 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
982 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
984 if (status != BLOCK_STATUS(pbd))
989 static int prb_previous_blk_num(struct packet_ring_buffer *rb)
992 if (rb->prb_bdqc.kactive_blk_num)
993 prev = rb->prb_bdqc.kactive_blk_num-1;
995 prev = rb->prb_bdqc.knum_blocks-1;
999 /* Assumes caller has held the rx_queue.lock */
1000 static void *__prb_previous_block(struct packet_sock *po,
1001 struct packet_ring_buffer *rb,
1004 unsigned int previous = prb_previous_blk_num(rb);
1005 return prb_lookup_block(po, rb, previous, status);
1008 static void *packet_previous_rx_frame(struct packet_sock *po,
1009 struct packet_ring_buffer *rb,
1012 if (po->tp_version <= TPACKET_V2)
1013 return packet_previous_frame(po, rb, status);
1015 return __prb_previous_block(po, rb, status);
1018 static void packet_increment_rx_head(struct packet_sock *po,
1019 struct packet_ring_buffer *rb)
1021 switch (po->tp_version) {
1024 return packet_increment_head(rb);
1027 WARN(1, "TPACKET version not supported.\n");
1033 static void *packet_previous_frame(struct packet_sock *po,
1034 struct packet_ring_buffer *rb,
1037 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1038 return packet_lookup_frame(po, rb, previous, status);
1041 static void packet_increment_head(struct packet_ring_buffer *buff)
1043 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1046 static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1048 struct sock *sk = &po->sk;
1051 if (po->prot_hook.func != tpacket_rcv)
1052 return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1055 spin_lock(&sk->sk_receive_queue.lock);
1056 if (po->tp_version == TPACKET_V3)
1057 has_room = prb_lookup_block(po, &po->rx_ring,
1058 po->rx_ring.prb_bdqc.kactive_blk_num,
1061 has_room = packet_lookup_frame(po, &po->rx_ring,
1064 spin_unlock(&sk->sk_receive_queue.lock);
1069 static void packet_sock_destruct(struct sock *sk)
1071 skb_queue_purge(&sk->sk_error_queue);
1073 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1074 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1076 if (!sock_flag(sk, SOCK_DEAD)) {
1077 pr_err("Attempt to release alive packet socket: %p\n", sk);
1081 sk_refcnt_debug_dec(sk);
1084 static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1086 int x = atomic_read(&f->rr_cur) + 1;
1094 static unsigned int fanout_demux_hash(struct packet_fanout *f,
1095 struct sk_buff *skb,
1098 return (((u64)skb->rxhash) * num) >> 32;
1101 static unsigned int fanout_demux_lb(struct packet_fanout *f,
1102 struct sk_buff *skb,
1107 cur = atomic_read(&f->rr_cur);
1108 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1109 fanout_rr_next(f, num))) != cur)
1114 static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1115 struct sk_buff *skb,
1118 return smp_processor_id() % num;
1121 static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1122 struct sk_buff *skb,
1123 unsigned int idx, unsigned int skip,
1128 i = j = min_t(int, f->next[idx], num - 1);
1130 if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1142 static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1144 return f->flags & (flag >> 8);
1147 static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1148 struct packet_type *pt, struct net_device *orig_dev)
1150 struct packet_fanout *f = pt->af_packet_priv;
1151 unsigned int num = f->num_members;
1152 struct packet_sock *po;
1155 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1162 case PACKET_FANOUT_HASH:
1164 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1165 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1169 skb_get_rxhash(skb);
1170 idx = fanout_demux_hash(f, skb, num);
1172 case PACKET_FANOUT_LB:
1173 idx = fanout_demux_lb(f, skb, num);
1175 case PACKET_FANOUT_CPU:
1176 idx = fanout_demux_cpu(f, skb, num);
1178 case PACKET_FANOUT_ROLLOVER:
1179 idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
1183 po = pkt_sk(f->arr[idx]);
1184 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1185 unlikely(!packet_rcv_has_room(po, skb))) {
1186 idx = fanout_demux_rollover(f, skb, idx, idx, num);
1187 po = pkt_sk(f->arr[idx]);
1190 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1193 DEFINE_MUTEX(fanout_mutex);
1194 EXPORT_SYMBOL_GPL(fanout_mutex);
1195 static LIST_HEAD(fanout_list);
1197 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1199 struct packet_fanout *f = po->fanout;
1201 spin_lock(&f->lock);
1202 f->arr[f->num_members] = sk;
1205 spin_unlock(&f->lock);
1208 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1210 struct packet_fanout *f = po->fanout;
1213 spin_lock(&f->lock);
1214 for (i = 0; i < f->num_members; i++) {
1215 if (f->arr[i] == sk)
1218 BUG_ON(i >= f->num_members);
1219 f->arr[i] = f->arr[f->num_members - 1];
1221 spin_unlock(&f->lock);
1224 static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1226 if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1232 static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1234 struct packet_sock *po = pkt_sk(sk);
1235 struct packet_fanout *f, *match;
1236 u8 type = type_flags & 0xff;
1237 u8 flags = type_flags >> 8;
1241 case PACKET_FANOUT_ROLLOVER:
1242 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1244 case PACKET_FANOUT_HASH:
1245 case PACKET_FANOUT_LB:
1246 case PACKET_FANOUT_CPU:
1258 mutex_lock(&fanout_mutex);
1260 list_for_each_entry(f, &fanout_list, list) {
1262 read_pnet(&f->net) == sock_net(sk)) {
1268 if (match && match->flags != flags)
1272 match = kzalloc(sizeof(*match), GFP_KERNEL);
1275 write_pnet(&match->net, sock_net(sk));
1278 match->flags = flags;
1279 atomic_set(&match->rr_cur, 0);
1280 INIT_LIST_HEAD(&match->list);
1281 spin_lock_init(&match->lock);
1282 atomic_set(&match->sk_ref, 0);
1283 match->prot_hook.type = po->prot_hook.type;
1284 match->prot_hook.dev = po->prot_hook.dev;
1285 match->prot_hook.func = packet_rcv_fanout;
1286 match->prot_hook.af_packet_priv = match;
1287 match->prot_hook.id_match = match_fanout_group;
1288 dev_add_pack(&match->prot_hook);
1289 list_add(&match->list, &fanout_list);
1292 if (match->type == type &&
1293 match->prot_hook.type == po->prot_hook.type &&
1294 match->prot_hook.dev == po->prot_hook.dev) {
1296 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1297 __dev_remove_pack(&po->prot_hook);
1299 atomic_inc(&match->sk_ref);
1300 __fanout_link(sk, po);
1305 mutex_unlock(&fanout_mutex);
1309 static void fanout_release(struct sock *sk)
1311 struct packet_sock *po = pkt_sk(sk);
1312 struct packet_fanout *f;
1318 mutex_lock(&fanout_mutex);
1321 if (atomic_dec_and_test(&f->sk_ref)) {
1323 dev_remove_pack(&f->prot_hook);
1326 mutex_unlock(&fanout_mutex);
1329 static const struct proto_ops packet_ops;
1331 static const struct proto_ops packet_ops_spkt;
1333 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1334 struct packet_type *pt, struct net_device *orig_dev)
1337 struct sockaddr_pkt *spkt;
1340 * When we registered the protocol we saved the socket in the data
1341 * field for just this event.
1344 sk = pt->af_packet_priv;
1347 * Yank back the headers [hope the device set this
1348 * right or kerboom...]
1350 * Incoming packets have ll header pulled,
1353 * For outgoing ones skb->data == skb_mac_header(skb)
1354 * so that this procedure is noop.
1357 if (skb->pkt_type == PACKET_LOOPBACK)
1360 if (!net_eq(dev_net(dev), sock_net(sk)))
1363 skb = skb_share_check(skb, GFP_ATOMIC);
1367 /* drop any routing info */
1370 /* drop conntrack reference */
1373 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1375 skb_push(skb, skb->data - skb_mac_header(skb));
1378 * The SOCK_PACKET socket receives _all_ frames.
1381 spkt->spkt_family = dev->type;
1382 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1383 spkt->spkt_protocol = skb->protocol;
1386 * Charge the memory to the socket. This is done specifically
1387 * to prevent sockets using all the memory up.
1390 if (sock_queue_rcv_skb(sk, skb) == 0)
1401 * Output a raw packet to a device layer. This bypasses all the other
1402 * protocol layers and you must therefore supply it with a complete frame
1405 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1406 struct msghdr *msg, size_t len)
1408 struct sock *sk = sock->sk;
1409 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1410 struct sk_buff *skb = NULL;
1411 struct net_device *dev;
1417 * Get and verify the address.
1421 if (msg->msg_namelen < sizeof(struct sockaddr))
1423 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1424 proto = saddr->spkt_protocol;
1426 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1429 * Find the device first to size check it
1432 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1435 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1441 if (!(dev->flags & IFF_UP))
1445 * You may not queue a frame bigger than the mtu. This is the lowest level
1446 * raw protocol and you must do your own fragmentation at this level.
1449 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1450 if (!netif_supports_nofcs(dev)) {
1451 err = -EPROTONOSUPPORT;
1454 extra_len = 4; /* We're doing our own CRC */
1458 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1462 size_t reserved = LL_RESERVED_SPACE(dev);
1463 int tlen = dev->needed_tailroom;
1464 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1467 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1470 /* FIXME: Save some space for broken drivers that write a hard
1471 * header at transmission time by themselves. PPP is the notable
1472 * one here. This should really be fixed at the driver level.
1474 skb_reserve(skb, reserved);
1475 skb_reset_network_header(skb);
1477 /* Try to align data part correctly */
1482 skb_reset_network_header(skb);
1484 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1490 if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1491 /* Earlier code assumed this would be a VLAN pkt,
1492 * double-check this now that we have the actual
1495 struct ethhdr *ehdr;
1496 skb_reset_mac_header(skb);
1497 ehdr = eth_hdr(skb);
1498 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1504 skb->protocol = proto;
1506 skb->priority = sk->sk_priority;
1507 skb->mark = sk->sk_mark;
1508 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1512 if (unlikely(extra_len == 4))
1515 skb_probe_transport_header(skb, 0);
1517 dev_queue_xmit(skb);
1528 static unsigned int run_filter(const struct sk_buff *skb,
1529 const struct sock *sk,
1532 struct sk_filter *filter;
1535 filter = rcu_dereference(sk->sk_filter);
1537 res = SK_RUN_FILTER(filter, skb);
1544 * This function makes lazy skb cloning in hope that most of packets
1545 * are discarded by BPF.
1547 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1548 * and skb->cb are mangled. It works because (and until) packets
1549 * falling here are owned by current CPU. Output packets are cloned
1550 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1551 * sequencially, so that if we return skb to original state on exit,
1552 * we will not harm anyone.
1555 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1556 struct packet_type *pt, struct net_device *orig_dev)
1559 struct sockaddr_ll *sll;
1560 struct packet_sock *po;
1561 u8 *skb_head = skb->data;
1562 int skb_len = skb->len;
1563 unsigned int snaplen, res;
1565 if (skb->pkt_type == PACKET_LOOPBACK)
1568 sk = pt->af_packet_priv;
1571 if (!net_eq(dev_net(dev), sock_net(sk)))
1576 if (dev->header_ops) {
1577 /* The device has an explicit notion of ll header,
1578 * exported to higher levels.
1580 * Otherwise, the device hides details of its frame
1581 * structure, so that corresponding packet head is
1582 * never delivered to user.
1584 if (sk->sk_type != SOCK_DGRAM)
1585 skb_push(skb, skb->data - skb_mac_header(skb));
1586 else if (skb->pkt_type == PACKET_OUTGOING) {
1587 /* Special case: outgoing packets have ll header at head */
1588 skb_pull(skb, skb_network_offset(skb));
1594 res = run_filter(skb, sk, snaplen);
1596 goto drop_n_restore;
1600 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1603 if (skb_shared(skb)) {
1604 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1608 if (skb_head != skb->data) {
1609 skb->data = skb_head;
1616 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1619 sll = &PACKET_SKB_CB(skb)->sa.ll;
1620 sll->sll_family = AF_PACKET;
1621 sll->sll_hatype = dev->type;
1622 sll->sll_protocol = skb->protocol;
1623 sll->sll_pkttype = skb->pkt_type;
1624 if (unlikely(po->origdev))
1625 sll->sll_ifindex = orig_dev->ifindex;
1627 sll->sll_ifindex = dev->ifindex;
1629 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1631 PACKET_SKB_CB(skb)->origlen = skb->len;
1633 if (pskb_trim(skb, snaplen))
1636 skb_set_owner_r(skb, sk);
1640 /* drop conntrack reference */
1643 spin_lock(&sk->sk_receive_queue.lock);
1644 po->stats.tp_packets++;
1645 skb->dropcount = atomic_read(&sk->sk_drops);
1646 __skb_queue_tail(&sk->sk_receive_queue, skb);
1647 spin_unlock(&sk->sk_receive_queue.lock);
1648 sk->sk_data_ready(sk, skb->len);
1652 spin_lock(&sk->sk_receive_queue.lock);
1653 po->stats.tp_drops++;
1654 atomic_inc(&sk->sk_drops);
1655 spin_unlock(&sk->sk_receive_queue.lock);
1658 if (skb_head != skb->data && skb_shared(skb)) {
1659 skb->data = skb_head;
1667 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1668 struct packet_type *pt, struct net_device *orig_dev)
1671 struct packet_sock *po;
1672 struct sockaddr_ll *sll;
1674 struct tpacket_hdr *h1;
1675 struct tpacket2_hdr *h2;
1676 struct tpacket3_hdr *h3;
1679 u8 *skb_head = skb->data;
1680 int skb_len = skb->len;
1681 unsigned int snaplen, res;
1682 unsigned long status = TP_STATUS_USER;
1683 unsigned short macoff, netoff, hdrlen;
1684 struct sk_buff *copy_skb = NULL;
1687 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
1689 if (skb->pkt_type == PACKET_LOOPBACK)
1692 sk = pt->af_packet_priv;
1695 if (!net_eq(dev_net(dev), sock_net(sk)))
1698 if (dev->header_ops) {
1699 if (sk->sk_type != SOCK_DGRAM)
1700 skb_push(skb, skb->data - skb_mac_header(skb));
1701 else if (skb->pkt_type == PACKET_OUTGOING) {
1702 /* Special case: outgoing packets have ll header at head */
1703 skb_pull(skb, skb_network_offset(skb));
1707 if (skb->ip_summed == CHECKSUM_PARTIAL)
1708 status |= TP_STATUS_CSUMNOTREADY;
1712 res = run_filter(skb, sk, snaplen);
1714 goto drop_n_restore;
1718 if (sk->sk_type == SOCK_DGRAM) {
1719 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1722 unsigned int maclen = skb_network_offset(skb);
1723 netoff = TPACKET_ALIGN(po->tp_hdrlen +
1724 (maclen < 16 ? 16 : maclen)) +
1726 macoff = netoff - maclen;
1728 if (po->tp_version <= TPACKET_V2) {
1729 if (macoff + snaplen > po->rx_ring.frame_size) {
1730 if (po->copy_thresh &&
1731 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1732 if (skb_shared(skb)) {
1733 copy_skb = skb_clone(skb, GFP_ATOMIC);
1735 copy_skb = skb_get(skb);
1736 skb_head = skb->data;
1739 skb_set_owner_r(copy_skb, sk);
1741 snaplen = po->rx_ring.frame_size - macoff;
1742 if ((int)snaplen < 0)
1746 spin_lock(&sk->sk_receive_queue.lock);
1747 h.raw = packet_current_rx_frame(po, skb,
1748 TP_STATUS_KERNEL, (macoff+snaplen));
1751 if (po->tp_version <= TPACKET_V2) {
1752 packet_increment_rx_head(po, &po->rx_ring);
1754 * LOSING will be reported till you read the stats,
1755 * because it's COR - Clear On Read.
1756 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1759 if (po->stats.tp_drops)
1760 status |= TP_STATUS_LOSING;
1762 po->stats.tp_packets++;
1764 status |= TP_STATUS_COPY;
1765 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1767 spin_unlock(&sk->sk_receive_queue.lock);
1769 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1771 switch (po->tp_version) {
1773 h.h1->tp_len = skb->len;
1774 h.h1->tp_snaplen = snaplen;
1775 h.h1->tp_mac = macoff;
1776 h.h1->tp_net = netoff;
1777 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1778 && shhwtstamps->syststamp.tv64)
1779 tv = ktime_to_timeval(shhwtstamps->syststamp);
1780 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1781 && shhwtstamps->hwtstamp.tv64)
1782 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1783 else if (skb->tstamp.tv64)
1784 tv = ktime_to_timeval(skb->tstamp);
1786 do_gettimeofday(&tv);
1787 h.h1->tp_sec = tv.tv_sec;
1788 h.h1->tp_usec = tv.tv_usec;
1789 hdrlen = sizeof(*h.h1);
1792 h.h2->tp_len = skb->len;
1793 h.h2->tp_snaplen = snaplen;
1794 h.h2->tp_mac = macoff;
1795 h.h2->tp_net = netoff;
1796 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1797 && shhwtstamps->syststamp.tv64)
1798 ts = ktime_to_timespec(shhwtstamps->syststamp);
1799 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1800 && shhwtstamps->hwtstamp.tv64)
1801 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1802 else if (skb->tstamp.tv64)
1803 ts = ktime_to_timespec(skb->tstamp);
1805 getnstimeofday(&ts);
1806 h.h2->tp_sec = ts.tv_sec;
1807 h.h2->tp_nsec = ts.tv_nsec;
1808 if (vlan_tx_tag_present(skb)) {
1809 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1810 status |= TP_STATUS_VLAN_VALID;
1812 h.h2->tp_vlan_tci = 0;
1814 h.h2->tp_padding = 0;
1815 hdrlen = sizeof(*h.h2);
1818 /* tp_nxt_offset,vlan are already populated above.
1819 * So DONT clear those fields here
1821 h.h3->tp_status |= status;
1822 h.h3->tp_len = skb->len;
1823 h.h3->tp_snaplen = snaplen;
1824 h.h3->tp_mac = macoff;
1825 h.h3->tp_net = netoff;
1826 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1827 && shhwtstamps->syststamp.tv64)
1828 ts = ktime_to_timespec(shhwtstamps->syststamp);
1829 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1830 && shhwtstamps->hwtstamp.tv64)
1831 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1832 else if (skb->tstamp.tv64)
1833 ts = ktime_to_timespec(skb->tstamp);
1835 getnstimeofday(&ts);
1836 h.h3->tp_sec = ts.tv_sec;
1837 h.h3->tp_nsec = ts.tv_nsec;
1838 hdrlen = sizeof(*h.h3);
1844 sll = h.raw + TPACKET_ALIGN(hdrlen);
1845 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1846 sll->sll_family = AF_PACKET;
1847 sll->sll_hatype = dev->type;
1848 sll->sll_protocol = skb->protocol;
1849 sll->sll_pkttype = skb->pkt_type;
1850 if (unlikely(po->origdev))
1851 sll->sll_ifindex = orig_dev->ifindex;
1853 sll->sll_ifindex = dev->ifindex;
1856 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1860 if (po->tp_version <= TPACKET_V2) {
1861 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1862 + macoff + snaplen);
1863 for (start = h.raw; start < end; start += PAGE_SIZE)
1864 flush_dcache_page(pgv_to_page(start));
1869 if (po->tp_version <= TPACKET_V2)
1870 __packet_set_status(po, h.raw, status);
1872 prb_clear_blk_fill_status(&po->rx_ring);
1874 sk->sk_data_ready(sk, 0);
1877 if (skb_head != skb->data && skb_shared(skb)) {
1878 skb->data = skb_head;
1886 po->stats.tp_drops++;
1887 spin_unlock(&sk->sk_receive_queue.lock);
1889 sk->sk_data_ready(sk, 0);
1890 kfree_skb(copy_skb);
1891 goto drop_n_restore;
1894 static void tpacket_destruct_skb(struct sk_buff *skb)
1896 struct packet_sock *po = pkt_sk(skb->sk);
1899 if (likely(po->tx_ring.pg_vec)) {
1900 ph = skb_shinfo(skb)->destructor_arg;
1901 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1902 atomic_dec(&po->tx_ring.pending);
1903 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
1909 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1910 void *frame, struct net_device *dev, int size_max,
1911 __be16 proto, unsigned char *addr, int hlen)
1914 struct tpacket_hdr *h1;
1915 struct tpacket2_hdr *h2;
1918 int to_write, offset, len, tp_len, nr_frags, len_max;
1919 struct socket *sock = po->sk.sk_socket;
1926 skb->protocol = proto;
1928 skb->priority = po->sk.sk_priority;
1929 skb->mark = po->sk.sk_mark;
1930 skb_shinfo(skb)->destructor_arg = ph.raw;
1932 switch (po->tp_version) {
1934 tp_len = ph.h2->tp_len;
1937 tp_len = ph.h1->tp_len;
1940 if (unlikely(tp_len > size_max)) {
1941 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
1945 skb_reserve(skb, hlen);
1946 skb_reset_network_header(skb);
1947 skb_probe_transport_header(skb, 0);
1949 if (po->tp_tx_has_off) {
1950 int off_min, off_max, off;
1951 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
1952 off_max = po->tx_ring.frame_size - tp_len;
1953 if (sock->type == SOCK_DGRAM) {
1954 switch (po->tp_version) {
1956 off = ph.h2->tp_net;
1959 off = ph.h1->tp_net;
1963 switch (po->tp_version) {
1965 off = ph.h2->tp_mac;
1968 off = ph.h1->tp_mac;
1972 if (unlikely((off < off_min) || (off_max < off)))
1974 data = ph.raw + off;
1976 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
1980 if (sock->type == SOCK_DGRAM) {
1981 err = dev_hard_header(skb, dev, ntohs(proto), addr,
1983 if (unlikely(err < 0))
1985 } else if (dev->hard_header_len) {
1986 /* net device doesn't like empty head */
1987 if (unlikely(tp_len <= dev->hard_header_len)) {
1988 pr_err("packet size is too short (%d < %d)\n",
1989 tp_len, dev->hard_header_len);
1993 skb_push(skb, dev->hard_header_len);
1994 err = skb_store_bits(skb, 0, data,
1995 dev->hard_header_len);
1999 data += dev->hard_header_len;
2000 to_write -= dev->hard_header_len;
2003 offset = offset_in_page(data);
2004 len_max = PAGE_SIZE - offset;
2005 len = ((to_write > len_max) ? len_max : to_write);
2007 skb->data_len = to_write;
2008 skb->len += to_write;
2009 skb->truesize += to_write;
2010 atomic_add(to_write, &po->sk.sk_wmem_alloc);
2012 while (likely(to_write)) {
2013 nr_frags = skb_shinfo(skb)->nr_frags;
2015 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2016 pr_err("Packet exceed the number of skb frags(%lu)\n",
2021 page = pgv_to_page(data);
2023 flush_dcache_page(page);
2025 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2028 len_max = PAGE_SIZE;
2029 len = ((to_write > len_max) ? len_max : to_write);
2035 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2037 struct sk_buff *skb;
2038 struct net_device *dev;
2040 bool need_rls_dev = false;
2041 int err, reserve = 0;
2043 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2044 int tp_len, size_max;
2045 unsigned char *addr;
2047 int status = TP_STATUS_AVAILABLE;
2050 mutex_lock(&po->pg_vec_lock);
2052 if (saddr == NULL) {
2053 dev = po->prot_hook.dev;
2058 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2060 if (msg->msg_namelen < (saddr->sll_halen
2061 + offsetof(struct sockaddr_ll,
2064 proto = saddr->sll_protocol;
2065 addr = saddr->sll_addr;
2066 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2067 need_rls_dev = true;
2071 if (unlikely(dev == NULL))
2074 reserve = dev->hard_header_len;
2077 if (unlikely(!(dev->flags & IFF_UP)))
2080 size_max = po->tx_ring.frame_size
2081 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2083 if (size_max > dev->mtu + reserve)
2084 size_max = dev->mtu + reserve;
2087 ph = packet_current_frame(po, &po->tx_ring,
2088 TP_STATUS_SEND_REQUEST);
2090 if (unlikely(ph == NULL)) {
2095 status = TP_STATUS_SEND_REQUEST;
2096 hlen = LL_RESERVED_SPACE(dev);
2097 tlen = dev->needed_tailroom;
2098 skb = sock_alloc_send_skb(&po->sk,
2099 hlen + tlen + sizeof(struct sockaddr_ll),
2102 if (unlikely(skb == NULL))
2105 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2108 if (unlikely(tp_len < 0)) {
2110 __packet_set_status(po, ph,
2111 TP_STATUS_AVAILABLE);
2112 packet_increment_head(&po->tx_ring);
2116 status = TP_STATUS_WRONG_FORMAT;
2122 skb->destructor = tpacket_destruct_skb;
2123 __packet_set_status(po, ph, TP_STATUS_SENDING);
2124 atomic_inc(&po->tx_ring.pending);
2126 status = TP_STATUS_SEND_REQUEST;
2127 err = dev_queue_xmit(skb);
2128 if (unlikely(err > 0)) {
2129 err = net_xmit_errno(err);
2130 if (err && __packet_get_status(po, ph) ==
2131 TP_STATUS_AVAILABLE) {
2132 /* skb was destructed already */
2137 * skb was dropped but not destructed yet;
2138 * let's treat it like congestion or err < 0
2142 packet_increment_head(&po->tx_ring);
2144 } while (likely((ph != NULL) ||
2145 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
2146 (atomic_read(&po->tx_ring.pending))))
2153 __packet_set_status(po, ph, status);
2159 mutex_unlock(&po->pg_vec_lock);
2163 static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2164 size_t reserve, size_t len,
2165 size_t linear, int noblock,
2168 struct sk_buff *skb;
2170 /* Under a page? Don't bother with paged skb. */
2171 if (prepad + len < PAGE_SIZE || !linear)
2174 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2179 skb_reserve(skb, reserve);
2180 skb_put(skb, linear);
2181 skb->data_len = len - linear;
2182 skb->len += len - linear;
2187 static int packet_snd(struct socket *sock,
2188 struct msghdr *msg, size_t len)
2190 struct sock *sk = sock->sk;
2191 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2192 struct sk_buff *skb;
2193 struct net_device *dev;
2195 bool need_rls_dev = false;
2196 unsigned char *addr;
2197 int err, reserve = 0;
2198 struct virtio_net_hdr vnet_hdr = { 0 };
2201 struct packet_sock *po = pkt_sk(sk);
2202 unsigned short gso_type = 0;
2207 * Get and verify the address.
2210 if (saddr == NULL) {
2211 dev = po->prot_hook.dev;
2216 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2218 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2220 proto = saddr->sll_protocol;
2221 addr = saddr->sll_addr;
2222 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2223 need_rls_dev = true;
2229 if (sock->type == SOCK_RAW)
2230 reserve = dev->hard_header_len;
2233 if (!(dev->flags & IFF_UP))
2236 if (po->has_vnet_hdr) {
2237 vnet_hdr_len = sizeof(vnet_hdr);
2240 if (len < vnet_hdr_len)
2243 len -= vnet_hdr_len;
2245 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2250 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2251 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2253 vnet_hdr.hdr_len = vnet_hdr.csum_start +
2254 vnet_hdr.csum_offset + 2;
2257 if (vnet_hdr.hdr_len > len)
2260 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2261 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2262 case VIRTIO_NET_HDR_GSO_TCPV4:
2263 gso_type = SKB_GSO_TCPV4;
2265 case VIRTIO_NET_HDR_GSO_TCPV6:
2266 gso_type = SKB_GSO_TCPV6;
2268 case VIRTIO_NET_HDR_GSO_UDP:
2269 gso_type = SKB_GSO_UDP;
2275 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2276 gso_type |= SKB_GSO_TCP_ECN;
2278 if (vnet_hdr.gso_size == 0)
2284 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2285 if (!netif_supports_nofcs(dev)) {
2286 err = -EPROTONOSUPPORT;
2289 extra_len = 4; /* We're doing our own CRC */
2293 if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2297 hlen = LL_RESERVED_SPACE(dev);
2298 tlen = dev->needed_tailroom;
2299 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
2300 msg->msg_flags & MSG_DONTWAIT, &err);
2304 skb_set_network_header(skb, reserve);
2307 if (sock->type == SOCK_DGRAM &&
2308 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
2311 /* Returns -EFAULT on error */
2312 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
2315 err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2319 if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2320 /* Earlier code assumed this would be a VLAN pkt,
2321 * double-check this now that we have the actual
2324 struct ethhdr *ehdr;
2325 skb_reset_mac_header(skb);
2326 ehdr = eth_hdr(skb);
2327 if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2333 skb->protocol = proto;
2335 skb->priority = sk->sk_priority;
2336 skb->mark = sk->sk_mark;
2338 if (po->has_vnet_hdr) {
2339 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2340 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2341 vnet_hdr.csum_offset)) {
2347 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2348 skb_shinfo(skb)->gso_type = gso_type;
2350 /* Header must be checked, and gso_segs computed. */
2351 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2352 skb_shinfo(skb)->gso_segs = 0;
2354 len += vnet_hdr_len;
2357 skb_probe_transport_header(skb, reserve);
2359 if (unlikely(extra_len == 4))
2366 err = dev_queue_xmit(skb);
2367 if (err > 0 && (err = net_xmit_errno(err)) != 0)
2378 if (dev && need_rls_dev)
2384 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2385 struct msghdr *msg, size_t len)
2387 struct sock *sk = sock->sk;
2388 struct packet_sock *po = pkt_sk(sk);
2389 if (po->tx_ring.pg_vec)
2390 return tpacket_snd(po, msg);
2392 return packet_snd(sock, msg, len);
2396 * Close a PACKET socket. This is fairly simple. We immediately go
2397 * to 'closed' state and remove our protocol entry in the device list.
2400 static int packet_release(struct socket *sock)
2402 struct sock *sk = sock->sk;
2403 struct packet_sock *po;
2405 union tpacket_req_u req_u;
2413 mutex_lock(&net->packet.sklist_lock);
2414 sk_del_node_init_rcu(sk);
2415 mutex_unlock(&net->packet.sklist_lock);
2418 sock_prot_inuse_add(net, sk->sk_prot, -1);
2421 spin_lock(&po->bind_lock);
2422 unregister_prot_hook(sk, false);
2423 if (po->prot_hook.dev) {
2424 dev_put(po->prot_hook.dev);
2425 po->prot_hook.dev = NULL;
2427 spin_unlock(&po->bind_lock);
2429 packet_flush_mclist(sk);
2431 if (po->rx_ring.pg_vec) {
2432 memset(&req_u, 0, sizeof(req_u));
2433 packet_set_ring(sk, &req_u, 1, 0);
2436 if (po->tx_ring.pg_vec) {
2437 memset(&req_u, 0, sizeof(req_u));
2438 packet_set_ring(sk, &req_u, 1, 1);
2445 * Now the socket is dead. No more input will appear.
2452 skb_queue_purge(&sk->sk_receive_queue);
2453 sk_refcnt_debug_release(sk);
2460 * Attach a packet hook.
2463 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
2465 struct packet_sock *po = pkt_sk(sk);
2476 spin_lock(&po->bind_lock);
2477 unregister_prot_hook(sk, true);
2479 po->prot_hook.type = protocol;
2480 if (po->prot_hook.dev)
2481 dev_put(po->prot_hook.dev);
2482 po->prot_hook.dev = dev;
2484 po->ifindex = dev ? dev->ifindex : 0;
2489 if (!dev || (dev->flags & IFF_UP)) {
2490 register_prot_hook(sk);
2492 sk->sk_err = ENETDOWN;
2493 if (!sock_flag(sk, SOCK_DEAD))
2494 sk->sk_error_report(sk);
2498 spin_unlock(&po->bind_lock);
2504 * Bind a packet socket to a device
2507 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2510 struct sock *sk = sock->sk;
2512 struct net_device *dev;
2519 if (addr_len != sizeof(struct sockaddr))
2521 strlcpy(name, uaddr->sa_data, sizeof(name));
2523 dev = dev_get_by_name(sock_net(sk), name);
2525 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2529 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2531 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2532 struct sock *sk = sock->sk;
2533 struct net_device *dev = NULL;
2541 if (addr_len < sizeof(struct sockaddr_ll))
2543 if (sll->sll_family != AF_PACKET)
2546 if (sll->sll_ifindex) {
2548 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2552 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2558 static struct proto packet_proto = {
2560 .owner = THIS_MODULE,
2561 .obj_size = sizeof(struct packet_sock),
2565 * Create a packet of type SOCK_PACKET.
2568 static int packet_create(struct net *net, struct socket *sock, int protocol,
2572 struct packet_sock *po;
2573 __be16 proto = (__force __be16)protocol; /* weird, but documented */
2576 if (!ns_capable(net->user_ns, CAP_NET_RAW))
2578 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2579 sock->type != SOCK_PACKET)
2580 return -ESOCKTNOSUPPORT;
2582 sock->state = SS_UNCONNECTED;
2585 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2589 sock->ops = &packet_ops;
2590 if (sock->type == SOCK_PACKET)
2591 sock->ops = &packet_ops_spkt;
2593 sock_init_data(sock, sk);
2596 sk->sk_family = PF_PACKET;
2599 sk->sk_destruct = packet_sock_destruct;
2600 sk_refcnt_debug_inc(sk);
2603 * Attach a protocol block
2606 spin_lock_init(&po->bind_lock);
2607 mutex_init(&po->pg_vec_lock);
2608 po->prot_hook.func = packet_rcv;
2610 if (sock->type == SOCK_PACKET)
2611 po->prot_hook.func = packet_rcv_spkt;
2613 po->prot_hook.af_packet_priv = sk;
2616 po->prot_hook.type = proto;
2617 register_prot_hook(sk);
2620 mutex_lock(&net->packet.sklist_lock);
2621 sk_add_node_rcu(sk, &net->packet.sklist);
2622 mutex_unlock(&net->packet.sklist_lock);
2625 sock_prot_inuse_add(net, &packet_proto, 1);
2633 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2635 struct sock_exterr_skb *serr;
2636 struct sk_buff *skb, *skb2;
2640 skb = skb_dequeue(&sk->sk_error_queue);
2646 msg->msg_flags |= MSG_TRUNC;
2649 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2653 sock_recv_timestamp(msg, sk, skb);
2655 serr = SKB_EXT_ERR(skb);
2656 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2657 sizeof(serr->ee), &serr->ee);
2659 msg->msg_flags |= MSG_ERRQUEUE;
2662 /* Reset and regenerate socket error */
2663 spin_lock_bh(&sk->sk_error_queue.lock);
2665 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2666 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2667 spin_unlock_bh(&sk->sk_error_queue.lock);
2668 sk->sk_error_report(sk);
2670 spin_unlock_bh(&sk->sk_error_queue.lock);
2679 * Pull a packet from our receive queue and hand it to the user.
2680 * If necessary we block.
2683 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2684 struct msghdr *msg, size_t len, int flags)
2686 struct sock *sk = sock->sk;
2687 struct sk_buff *skb;
2689 struct sockaddr_ll *sll;
2690 int vnet_hdr_len = 0;
2693 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2697 /* What error should we return now? EUNATTACH? */
2698 if (pkt_sk(sk)->ifindex < 0)
2702 if (flags & MSG_ERRQUEUE) {
2703 err = packet_recv_error(sk, msg, len);
2708 * Call the generic datagram receiver. This handles all sorts
2709 * of horrible races and re-entrancy so we can forget about it
2710 * in the protocol layers.
2712 * Now it will return ENETDOWN, if device have just gone down,
2713 * but then it will block.
2716 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2719 * An error occurred so return it. Because skb_recv_datagram()
2720 * handles the blocking we don't see and worry about blocking
2727 if (pkt_sk(sk)->has_vnet_hdr) {
2728 struct virtio_net_hdr vnet_hdr = { 0 };
2731 vnet_hdr_len = sizeof(vnet_hdr);
2732 if (len < vnet_hdr_len)
2735 len -= vnet_hdr_len;
2737 if (skb_is_gso(skb)) {
2738 struct skb_shared_info *sinfo = skb_shinfo(skb);
2740 /* This is a hint as to how much should be linear. */
2741 vnet_hdr.hdr_len = skb_headlen(skb);
2742 vnet_hdr.gso_size = sinfo->gso_size;
2743 if (sinfo->gso_type & SKB_GSO_TCPV4)
2744 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2745 else if (sinfo->gso_type & SKB_GSO_TCPV6)
2746 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2747 else if (sinfo->gso_type & SKB_GSO_UDP)
2748 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2749 else if (sinfo->gso_type & SKB_GSO_FCOE)
2753 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2754 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2756 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2758 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2759 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2760 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
2761 vnet_hdr.csum_offset = skb->csum_offset;
2762 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2763 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2764 } /* else everything is zero */
2766 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2773 * If the address length field is there to be filled in, we fill
2777 sll = &PACKET_SKB_CB(skb)->sa.ll;
2778 if (sock->type == SOCK_PACKET)
2779 msg->msg_namelen = sizeof(struct sockaddr_pkt);
2781 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2784 * You lose any data beyond the buffer you gave. If it worries a
2785 * user program they can ask the device for its MTU anyway.
2791 msg->msg_flags |= MSG_TRUNC;
2794 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2798 sock_recv_ts_and_drops(msg, sk, skb);
2801 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2804 if (pkt_sk(sk)->auxdata) {
2805 struct tpacket_auxdata aux;
2807 aux.tp_status = TP_STATUS_USER;
2808 if (skb->ip_summed == CHECKSUM_PARTIAL)
2809 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2810 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
2811 aux.tp_snaplen = skb->len;
2813 aux.tp_net = skb_network_offset(skb);
2814 if (vlan_tx_tag_present(skb)) {
2815 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2816 aux.tp_status |= TP_STATUS_VLAN_VALID;
2818 aux.tp_vlan_tci = 0;
2821 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
2825 * Free or return the buffer as appropriate. Again this
2826 * hides all the races and re-entrancy issues from us.
2828 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
2831 skb_free_datagram(sk, skb);
2836 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2837 int *uaddr_len, int peer)
2839 struct net_device *dev;
2840 struct sock *sk = sock->sk;
2845 uaddr->sa_family = AF_PACKET;
2847 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2849 strncpy(uaddr->sa_data, dev->name, 14);
2851 memset(uaddr->sa_data, 0, 14);
2853 *uaddr_len = sizeof(*uaddr);
2858 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2859 int *uaddr_len, int peer)
2861 struct net_device *dev;
2862 struct sock *sk = sock->sk;
2863 struct packet_sock *po = pkt_sk(sk);
2864 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2869 sll->sll_family = AF_PACKET;
2870 sll->sll_ifindex = po->ifindex;
2871 sll->sll_protocol = po->num;
2872 sll->sll_pkttype = 0;
2874 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2876 sll->sll_hatype = dev->type;
2877 sll->sll_halen = dev->addr_len;
2878 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
2880 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
2884 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
2889 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2893 case PACKET_MR_MULTICAST:
2894 if (i->alen != dev->addr_len)
2897 return dev_mc_add(dev, i->addr);
2899 return dev_mc_del(dev, i->addr);
2901 case PACKET_MR_PROMISC:
2902 return dev_set_promiscuity(dev, what);
2904 case PACKET_MR_ALLMULTI:
2905 return dev_set_allmulti(dev, what);
2907 case PACKET_MR_UNICAST:
2908 if (i->alen != dev->addr_len)
2911 return dev_uc_add(dev, i->addr);
2913 return dev_uc_del(dev, i->addr);
2921 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
2923 for ( ; i; i = i->next) {
2924 if (i->ifindex == dev->ifindex)
2925 packet_dev_mc(dev, i, what);
2929 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
2931 struct packet_sock *po = pkt_sk(sk);
2932 struct packet_mclist *ml, *i;
2933 struct net_device *dev;
2939 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
2944 if (mreq->mr_alen > dev->addr_len)
2948 i = kmalloc(sizeof(*i), GFP_KERNEL);
2953 for (ml = po->mclist; ml; ml = ml->next) {
2954 if (ml->ifindex == mreq->mr_ifindex &&
2955 ml->type == mreq->mr_type &&
2956 ml->alen == mreq->mr_alen &&
2957 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2959 /* Free the new element ... */
2965 i->type = mreq->mr_type;
2966 i->ifindex = mreq->mr_ifindex;
2967 i->alen = mreq->mr_alen;
2968 memcpy(i->addr, mreq->mr_address, i->alen);
2970 i->next = po->mclist;
2972 err = packet_dev_mc(dev, i, 1);
2974 po->mclist = i->next;
2983 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
2985 struct packet_mclist *ml, **mlp;
2989 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
2990 if (ml->ifindex == mreq->mr_ifindex &&
2991 ml->type == mreq->mr_type &&
2992 ml->alen == mreq->mr_alen &&
2993 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2994 if (--ml->count == 0) {
2995 struct net_device *dev;
2997 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
2999 packet_dev_mc(dev, ml, -1);
3007 return -EADDRNOTAVAIL;
3010 static void packet_flush_mclist(struct sock *sk)
3012 struct packet_sock *po = pkt_sk(sk);
3013 struct packet_mclist *ml;
3019 while ((ml = po->mclist) != NULL) {
3020 struct net_device *dev;
3022 po->mclist = ml->next;
3023 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3025 packet_dev_mc(dev, ml, -1);
3032 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
3034 struct sock *sk = sock->sk;
3035 struct packet_sock *po = pkt_sk(sk);
3038 if (level != SOL_PACKET)
3039 return -ENOPROTOOPT;
3042 case PACKET_ADD_MEMBERSHIP:
3043 case PACKET_DROP_MEMBERSHIP:
3045 struct packet_mreq_max mreq;
3047 memset(&mreq, 0, sizeof(mreq));
3048 if (len < sizeof(struct packet_mreq))
3050 if (len > sizeof(mreq))
3052 if (copy_from_user(&mreq, optval, len))
3054 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3056 if (optname == PACKET_ADD_MEMBERSHIP)
3057 ret = packet_mc_add(sk, &mreq);
3059 ret = packet_mc_drop(sk, &mreq);
3063 case PACKET_RX_RING:
3064 case PACKET_TX_RING:
3066 union tpacket_req_u req_u;
3069 switch (po->tp_version) {
3072 len = sizeof(req_u.req);
3076 len = sizeof(req_u.req3);
3081 if (pkt_sk(sk)->has_vnet_hdr)
3083 if (copy_from_user(&req_u.req, optval, len))
3085 return packet_set_ring(sk, &req_u, 0,
3086 optname == PACKET_TX_RING);
3088 case PACKET_COPY_THRESH:
3092 if (optlen != sizeof(val))
3094 if (copy_from_user(&val, optval, sizeof(val)))
3097 pkt_sk(sk)->copy_thresh = val;
3100 case PACKET_VERSION:
3104 if (optlen != sizeof(val))
3106 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3108 if (copy_from_user(&val, optval, sizeof(val)))
3114 po->tp_version = val;
3120 case PACKET_RESERVE:
3124 if (optlen != sizeof(val))
3126 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3128 if (copy_from_user(&val, optval, sizeof(val)))
3130 po->tp_reserve = val;
3137 if (optlen != sizeof(val))
3139 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3141 if (copy_from_user(&val, optval, sizeof(val)))
3143 po->tp_loss = !!val;
3146 case PACKET_AUXDATA:
3150 if (optlen < sizeof(val))
3152 if (copy_from_user(&val, optval, sizeof(val)))
3155 po->auxdata = !!val;
3158 case PACKET_ORIGDEV:
3162 if (optlen < sizeof(val))
3164 if (copy_from_user(&val, optval, sizeof(val)))
3167 po->origdev = !!val;
3170 case PACKET_VNET_HDR:
3174 if (sock->type != SOCK_RAW)
3176 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3178 if (optlen < sizeof(val))
3180 if (copy_from_user(&val, optval, sizeof(val)))
3183 po->has_vnet_hdr = !!val;
3186 case PACKET_TIMESTAMP:
3190 if (optlen != sizeof(val))
3192 if (copy_from_user(&val, optval, sizeof(val)))
3195 po->tp_tstamp = val;
3202 if (optlen != sizeof(val))
3204 if (copy_from_user(&val, optval, sizeof(val)))
3207 return fanout_add(sk, val & 0xffff, val >> 16);
3209 case PACKET_TX_HAS_OFF:
3213 if (optlen != sizeof(val))
3215 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3217 if (copy_from_user(&val, optval, sizeof(val)))
3219 po->tp_tx_has_off = !!val;
3223 return -ENOPROTOOPT;
3227 static int packet_getsockopt(struct socket *sock, int level, int optname,
3228 char __user *optval, int __user *optlen)
3231 int val, lv = sizeof(val);
3232 struct sock *sk = sock->sk;
3233 struct packet_sock *po = pkt_sk(sk);
3235 struct tpacket_stats st;
3236 union tpacket_stats_u st_u;
3238 if (level != SOL_PACKET)
3239 return -ENOPROTOOPT;
3241 if (get_user(len, optlen))
3248 case PACKET_STATISTICS:
3249 spin_lock_bh(&sk->sk_receive_queue.lock);
3250 if (po->tp_version == TPACKET_V3) {
3251 lv = sizeof(struct tpacket_stats_v3);
3252 memcpy(&st_u.stats3, &po->stats,
3253 sizeof(struct tpacket_stats));
3254 st_u.stats3.tp_freeze_q_cnt =
3255 po->stats_u.stats3.tp_freeze_q_cnt;
3256 st_u.stats3.tp_packets += po->stats.tp_drops;
3257 data = &st_u.stats3;
3259 lv = sizeof(struct tpacket_stats);
3261 st.tp_packets += st.tp_drops;
3264 memset(&po->stats, 0, sizeof(st));
3265 spin_unlock_bh(&sk->sk_receive_queue.lock);
3267 case PACKET_AUXDATA:
3270 case PACKET_ORIGDEV:
3273 case PACKET_VNET_HDR:
3274 val = po->has_vnet_hdr;
3276 case PACKET_VERSION:
3277 val = po->tp_version;
3280 if (len > sizeof(int))
3282 if (copy_from_user(&val, optval, len))
3286 val = sizeof(struct tpacket_hdr);
3289 val = sizeof(struct tpacket2_hdr);
3292 val = sizeof(struct tpacket3_hdr);
3298 case PACKET_RESERVE:
3299 val = po->tp_reserve;
3304 case PACKET_TIMESTAMP:
3305 val = po->tp_tstamp;
3309 ((u32)po->fanout->id |
3310 ((u32)po->fanout->type << 16) |
3311 ((u32)po->fanout->flags << 24)) :
3314 case PACKET_TX_HAS_OFF:
3315 val = po->tp_tx_has_off;
3318 return -ENOPROTOOPT;
3323 if (put_user(len, optlen))
3325 if (copy_to_user(optval, data, len))
3331 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3334 struct net_device *dev = data;
3335 struct net *net = dev_net(dev);
3338 sk_for_each_rcu(sk, &net->packet.sklist) {
3339 struct packet_sock *po = pkt_sk(sk);
3342 case NETDEV_UNREGISTER:
3344 packet_dev_mclist(dev, po->mclist, -1);
3348 if (dev->ifindex == po->ifindex) {
3349 spin_lock(&po->bind_lock);
3351 __unregister_prot_hook(sk, false);
3352 sk->sk_err = ENETDOWN;
3353 if (!sock_flag(sk, SOCK_DEAD))
3354 sk->sk_error_report(sk);
3356 if (msg == NETDEV_UNREGISTER) {
3358 if (po->prot_hook.dev)
3359 dev_put(po->prot_hook.dev);
3360 po->prot_hook.dev = NULL;
3362 spin_unlock(&po->bind_lock);
3366 if (dev->ifindex == po->ifindex) {
3367 spin_lock(&po->bind_lock);
3369 register_prot_hook(sk);
3370 spin_unlock(&po->bind_lock);
3380 static int packet_ioctl(struct socket *sock, unsigned int cmd,
3383 struct sock *sk = sock->sk;
3388 int amount = sk_wmem_alloc_get(sk);
3390 return put_user(amount, (int __user *)arg);
3394 struct sk_buff *skb;
3397 spin_lock_bh(&sk->sk_receive_queue.lock);
3398 skb = skb_peek(&sk->sk_receive_queue);
3401 spin_unlock_bh(&sk->sk_receive_queue.lock);
3402 return put_user(amount, (int __user *)arg);
3405 return sock_get_timestamp(sk, (struct timeval __user *)arg);
3407 return sock_get_timestampns(sk, (struct timespec __user *)arg);
3417 case SIOCGIFBRDADDR:
3418 case SIOCSIFBRDADDR:
3419 case SIOCGIFNETMASK:
3420 case SIOCSIFNETMASK:
3421 case SIOCGIFDSTADDR:
3422 case SIOCSIFDSTADDR:
3424 return inet_dgram_ops.ioctl(sock, cmd, arg);
3428 return -ENOIOCTLCMD;
3433 static unsigned int packet_poll(struct file *file, struct socket *sock,
3436 struct sock *sk = sock->sk;
3437 struct packet_sock *po = pkt_sk(sk);
3438 unsigned int mask = datagram_poll(file, sock, wait);
3440 spin_lock_bh(&sk->sk_receive_queue.lock);
3441 if (po->rx_ring.pg_vec) {
3442 if (!packet_previous_rx_frame(po, &po->rx_ring,
3444 mask |= POLLIN | POLLRDNORM;
3446 spin_unlock_bh(&sk->sk_receive_queue.lock);
3447 spin_lock_bh(&sk->sk_write_queue.lock);
3448 if (po->tx_ring.pg_vec) {
3449 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3450 mask |= POLLOUT | POLLWRNORM;
3452 spin_unlock_bh(&sk->sk_write_queue.lock);
3457 /* Dirty? Well, I still did not learn better way to account
3461 static void packet_mm_open(struct vm_area_struct *vma)
3463 struct file *file = vma->vm_file;
3464 struct socket *sock = file->private_data;
3465 struct sock *sk = sock->sk;
3468 atomic_inc(&pkt_sk(sk)->mapped);
3471 static void packet_mm_close(struct vm_area_struct *vma)
3473 struct file *file = vma->vm_file;
3474 struct socket *sock = file->private_data;
3475 struct sock *sk = sock->sk;
3478 atomic_dec(&pkt_sk(sk)->mapped);
3481 static const struct vm_operations_struct packet_mmap_ops = {
3482 .open = packet_mm_open,
3483 .close = packet_mm_close,
3486 static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3491 for (i = 0; i < len; i++) {
3492 if (likely(pg_vec[i].buffer)) {
3493 if (is_vmalloc_addr(pg_vec[i].buffer))
3494 vfree(pg_vec[i].buffer);
3496 free_pages((unsigned long)pg_vec[i].buffer,
3498 pg_vec[i].buffer = NULL;
3504 static char *alloc_one_pg_vec_page(unsigned long order)
3506 char *buffer = NULL;
3507 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3508 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3510 buffer = (char *) __get_free_pages(gfp_flags, order);
3516 * __get_free_pages failed, fall back to vmalloc
3518 buffer = vzalloc((1 << order) * PAGE_SIZE);
3524 * vmalloc failed, lets dig into swap here
3526 gfp_flags &= ~__GFP_NORETRY;
3527 buffer = (char *)__get_free_pages(gfp_flags, order);
3532 * complete and utter failure
3537 static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3539 unsigned int block_nr = req->tp_block_nr;
3543 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3544 if (unlikely(!pg_vec))
3547 for (i = 0; i < block_nr; i++) {
3548 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3549 if (unlikely(!pg_vec[i].buffer))
3550 goto out_free_pgvec;
3557 free_pg_vec(pg_vec, order, block_nr);
3562 static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3563 int closing, int tx_ring)
3565 struct pgv *pg_vec = NULL;
3566 struct packet_sock *po = pkt_sk(sk);
3567 int was_running, order = 0;
3568 struct packet_ring_buffer *rb;
3569 struct sk_buff_head *rb_queue;
3572 /* Added to avoid minimal code churn */
3573 struct tpacket_req *req = &req_u->req;
3575 /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3576 if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3577 WARN(1, "Tx-ring is not supported.\n");
3581 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3582 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3586 if (atomic_read(&po->mapped))
3588 if (atomic_read(&rb->pending))
3592 if (req->tp_block_nr) {
3593 /* Sanity tests and some calculations */
3595 if (unlikely(rb->pg_vec))
3598 switch (po->tp_version) {
3600 po->tp_hdrlen = TPACKET_HDRLEN;
3603 po->tp_hdrlen = TPACKET2_HDRLEN;
3606 po->tp_hdrlen = TPACKET3_HDRLEN;
3611 if (unlikely((int)req->tp_block_size <= 0))
3613 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3615 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3618 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3621 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3622 if (unlikely(rb->frames_per_block <= 0))
3624 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3629 order = get_order(req->tp_block_size);
3630 pg_vec = alloc_pg_vec(req, order);
3631 if (unlikely(!pg_vec))
3633 switch (po->tp_version) {
3635 /* Transmit path is not supported. We checked
3636 * it above but just being paranoid
3639 init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3648 if (unlikely(req->tp_frame_nr))
3654 /* Detach socket from network */
3655 spin_lock(&po->bind_lock);
3656 was_running = po->running;
3660 __unregister_prot_hook(sk, false);
3662 spin_unlock(&po->bind_lock);
3667 mutex_lock(&po->pg_vec_lock);
3668 if (closing || atomic_read(&po->mapped) == 0) {
3670 spin_lock_bh(&rb_queue->lock);
3671 swap(rb->pg_vec, pg_vec);
3672 rb->frame_max = (req->tp_frame_nr - 1);
3674 rb->frame_size = req->tp_frame_size;
3675 spin_unlock_bh(&rb_queue->lock);
3677 swap(rb->pg_vec_order, order);
3678 swap(rb->pg_vec_len, req->tp_block_nr);
3680 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3681 po->prot_hook.func = (po->rx_ring.pg_vec) ?
3682 tpacket_rcv : packet_rcv;
3683 skb_queue_purge(rb_queue);
3684 if (atomic_read(&po->mapped))
3685 pr_err("packet_mmap: vma is busy: %d\n",
3686 atomic_read(&po->mapped));
3688 mutex_unlock(&po->pg_vec_lock);
3690 spin_lock(&po->bind_lock);
3693 register_prot_hook(sk);
3695 spin_unlock(&po->bind_lock);
3696 if (closing && (po->tp_version > TPACKET_V2)) {
3697 /* Because we don't support block-based V3 on tx-ring */
3699 prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3704 free_pg_vec(pg_vec, order, req->tp_block_nr);
3709 static int packet_mmap(struct file *file, struct socket *sock,
3710 struct vm_area_struct *vma)
3712 struct sock *sk = sock->sk;
3713 struct packet_sock *po = pkt_sk(sk);
3714 unsigned long size, expected_size;
3715 struct packet_ring_buffer *rb;
3716 unsigned long start;
3723 mutex_lock(&po->pg_vec_lock);
3726 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3728 expected_size += rb->pg_vec_len
3734 if (expected_size == 0)
3737 size = vma->vm_end - vma->vm_start;
3738 if (size != expected_size)
3741 start = vma->vm_start;
3742 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3743 if (rb->pg_vec == NULL)
3746 for (i = 0; i < rb->pg_vec_len; i++) {
3748 void *kaddr = rb->pg_vec[i].buffer;
3751 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3752 page = pgv_to_page(kaddr);
3753 err = vm_insert_page(vma, start, page);
3762 atomic_inc(&po->mapped);
3763 vma->vm_ops = &packet_mmap_ops;
3767 mutex_unlock(&po->pg_vec_lock);
3771 static const struct proto_ops packet_ops_spkt = {
3772 .family = PF_PACKET,
3773 .owner = THIS_MODULE,
3774 .release = packet_release,
3775 .bind = packet_bind_spkt,
3776 .connect = sock_no_connect,
3777 .socketpair = sock_no_socketpair,
3778 .accept = sock_no_accept,
3779 .getname = packet_getname_spkt,
3780 .poll = datagram_poll,
3781 .ioctl = packet_ioctl,
3782 .listen = sock_no_listen,
3783 .shutdown = sock_no_shutdown,
3784 .setsockopt = sock_no_setsockopt,
3785 .getsockopt = sock_no_getsockopt,
3786 .sendmsg = packet_sendmsg_spkt,
3787 .recvmsg = packet_recvmsg,
3788 .mmap = sock_no_mmap,
3789 .sendpage = sock_no_sendpage,
3792 static const struct proto_ops packet_ops = {
3793 .family = PF_PACKET,
3794 .owner = THIS_MODULE,
3795 .release = packet_release,
3796 .bind = packet_bind,
3797 .connect = sock_no_connect,
3798 .socketpair = sock_no_socketpair,
3799 .accept = sock_no_accept,
3800 .getname = packet_getname,
3801 .poll = packet_poll,
3802 .ioctl = packet_ioctl,
3803 .listen = sock_no_listen,
3804 .shutdown = sock_no_shutdown,
3805 .setsockopt = packet_setsockopt,
3806 .getsockopt = packet_getsockopt,
3807 .sendmsg = packet_sendmsg,
3808 .recvmsg = packet_recvmsg,
3809 .mmap = packet_mmap,
3810 .sendpage = sock_no_sendpage,
3813 static const struct net_proto_family packet_family_ops = {
3814 .family = PF_PACKET,
3815 .create = packet_create,
3816 .owner = THIS_MODULE,
3819 static struct notifier_block packet_netdev_notifier = {
3820 .notifier_call = packet_notifier,
3823 #ifdef CONFIG_PROC_FS
3825 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
3828 struct net *net = seq_file_net(seq);
3831 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
3834 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3836 struct net *net = seq_file_net(seq);
3837 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
3840 static void packet_seq_stop(struct seq_file *seq, void *v)
3846 static int packet_seq_show(struct seq_file *seq, void *v)
3848 if (v == SEQ_START_TOKEN)
3849 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3851 struct sock *s = sk_entry(v);
3852 const struct packet_sock *po = pkt_sk(s);
3855 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
3857 atomic_read(&s->sk_refcnt),
3862 atomic_read(&s->sk_rmem_alloc),
3863 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
3870 static const struct seq_operations packet_seq_ops = {
3871 .start = packet_seq_start,
3872 .next = packet_seq_next,
3873 .stop = packet_seq_stop,
3874 .show = packet_seq_show,
3877 static int packet_seq_open(struct inode *inode, struct file *file)
3879 return seq_open_net(inode, file, &packet_seq_ops,
3880 sizeof(struct seq_net_private));
3883 static const struct file_operations packet_seq_fops = {
3884 .owner = THIS_MODULE,
3885 .open = packet_seq_open,
3887 .llseek = seq_lseek,
3888 .release = seq_release_net,
3893 static int __net_init packet_net_init(struct net *net)
3895 mutex_init(&net->packet.sklist_lock);
3896 INIT_HLIST_HEAD(&net->packet.sklist);
3898 if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
3904 static void __net_exit packet_net_exit(struct net *net)
3906 remove_proc_entry("packet", net->proc_net);
3909 static struct pernet_operations packet_net_ops = {
3910 .init = packet_net_init,
3911 .exit = packet_net_exit,
3915 static void __exit packet_exit(void)
3917 unregister_netdevice_notifier(&packet_netdev_notifier);
3918 unregister_pernet_subsys(&packet_net_ops);
3919 sock_unregister(PF_PACKET);
3920 proto_unregister(&packet_proto);
3923 static int __init packet_init(void)
3925 int rc = proto_register(&packet_proto, 0);
3930 sock_register(&packet_family_ops);
3931 register_pernet_subsys(&packet_net_ops);
3932 register_netdevice_notifier(&packet_netdev_notifier);
3937 module_init(packet_init);
3938 module_exit(packet_exit);
3939 MODULE_LICENSE("GPL");
3940 MODULE_ALIAS_NETPROTO(PF_PACKET);