1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/if_vlan.h>
4 #include <linux/interrupt.h>
5 #include <linux/nsproxy.h>
6 #include <linux/compat.h>
7 #include <linux/if_tun.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/cache.h>
11 #include <linux/sched.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/cdev.h>
16 #include <linux/idr.h>
18 #include <linux/uio.h>
21 #include <net/net_namespace.h>
22 #include <net/rtnetlink.h>
24 #include <linux/virtio_net.h>
27 * A macvtap queue is the central object of this driver, it connects
28 * an open character device to a macvlan interface. There can be
29 * multiple queues on one interface, which map back to queues
30 * implemented in hardware on the underlying device.
32 * macvtap_proto is used to allocate queues through the sock allocation
36 struct macvtap_queue {
41 struct macvlan_dev __rcu *vlan;
46 struct list_head next;
49 static struct proto macvtap_proto = {
52 .obj_size = sizeof (struct macvtap_queue),
56 * Variables for dealing with macvtaps device numbers.
58 static dev_t macvtap_major;
59 #define MACVTAP_NUM_DEVS (1U << MINORBITS)
60 static DEFINE_MUTEX(minor_lock);
61 static DEFINE_IDR(minor_idr);
63 #define GOODCOPY_LEN 128
64 static struct class *macvtap_class;
65 static struct cdev macvtap_cdev;
67 static const struct proto_ops macvtap_socket_ops;
69 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
71 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
72 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
74 static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
76 return rcu_dereference(dev->rx_handler_data);
81 * The macvtap_queue and the macvlan_dev are loosely coupled, the
82 * pointers from one to the other can only be read while rcu_read_lock
85 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
86 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
87 * q->vlan becomes inaccessible. When the files gets closed,
88 * macvtap_get_queue() fails.
90 * There may still be references to the struct sock inside of the
91 * queue from outbound SKBs, but these never reference back to the
92 * file or the dev. The data structure is freed through __sk_free
93 * when both our references and any pending SKBs are gone.
96 static int macvtap_enable_queue(struct net_device *dev, struct file *file,
97 struct macvtap_queue *q)
99 struct macvlan_dev *vlan = netdev_priv(dev);
108 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
109 q->queue_index = vlan->numvtaps;
118 static int macvtap_set_queue(struct net_device *dev, struct file *file,
119 struct macvtap_queue *q)
121 struct macvlan_dev *vlan = netdev_priv(dev);
123 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
126 rcu_assign_pointer(q->vlan, vlan);
127 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
131 q->queue_index = vlan->numvtaps;
133 file->private_data = q;
134 list_add_tail(&q->next, &vlan->queue_list);
142 static int macvtap_disable_queue(struct macvtap_queue *q)
144 struct macvlan_dev *vlan;
145 struct macvtap_queue *nq;
151 vlan = rtnl_dereference(q->vlan);
154 int index = q->queue_index;
155 BUG_ON(index >= vlan->numvtaps);
156 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
157 nq->queue_index = index;
159 rcu_assign_pointer(vlan->taps[index], nq);
160 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
170 * The file owning the queue got closed, give up both
171 * the reference that the files holds as well as the
172 * one from the macvlan_dev if that still exists.
174 * Using the spinlock makes sure that we don't get
175 * to the queue again after destroying it.
177 static void macvtap_put_queue(struct macvtap_queue *q)
179 struct macvlan_dev *vlan;
182 vlan = rtnl_dereference(q->vlan);
186 BUG_ON(macvtap_disable_queue(q));
189 RCU_INIT_POINTER(q->vlan, NULL);
191 list_del_init(&q->next);
201 * Select a queue based on the rxq of the device on which this packet
202 * arrived. If the incoming device is not mq, calculate a flow hash
203 * to select a queue. If all fails, find the first available queue.
204 * Cache vlan->numvtaps since it can become zero during the execution
207 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
210 struct macvlan_dev *vlan = netdev_priv(dev);
211 struct macvtap_queue *tap = NULL;
212 /* Access to taps array is protected by rcu, but access to numvtaps
213 * isn't. Below we use it to lookup a queue, but treat it as a hint
214 * and validate that the result isn't NULL - in case we are
215 * racing against queue removal.
217 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
223 /* Check if we can use flow to select a queue */
224 rxq = skb_get_hash(skb);
226 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
230 if (likely(skb_rx_queue_recorded(skb))) {
231 rxq = skb_get_rx_queue(skb);
233 while (unlikely(rxq >= numvtaps))
236 tap = rcu_dereference(vlan->taps[rxq]);
240 tap = rcu_dereference(vlan->taps[0]);
246 * The net_device is going away, give up the reference
247 * that it holds on all queues and safely set the pointer
248 * from the queues to NULL.
250 static void macvtap_del_queues(struct net_device *dev)
252 struct macvlan_dev *vlan = netdev_priv(dev);
253 struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
257 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
258 list_del_init(&q->next);
260 RCU_INIT_POINTER(q->vlan, NULL);
265 for (i = 0; i < vlan->numvtaps; i++)
266 RCU_INIT_POINTER(vlan->taps[i], NULL);
267 BUG_ON(vlan->numvtaps);
268 BUG_ON(vlan->numqueues);
269 /* guarantee that any future macvtap_set_queue will fail */
270 vlan->numvtaps = MAX_MACVTAP_QUEUES;
272 for (--j; j >= 0; j--)
273 sock_put(&qlist[j]->sk);
276 static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
278 struct sk_buff *skb = *pskb;
279 struct net_device *dev = skb->dev;
280 struct macvlan_dev *vlan;
281 struct macvtap_queue *q;
282 netdev_features_t features = TAP_FEATURES;
284 vlan = macvtap_get_vlan_rcu(dev);
286 return RX_HANDLER_PASS;
288 q = macvtap_get_queue(dev, skb);
290 return RX_HANDLER_PASS;
292 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
295 skb_push(skb, ETH_HLEN);
297 /* Apply the forward feature mask so that we perform segmentation
298 * according to users wishes. This only works if VNET_HDR is
301 if (q->flags & IFF_VNET_HDR)
302 features |= vlan->tap_features;
303 if (netif_needs_gso(dev, skb, features)) {
304 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
310 skb_queue_tail(&q->sk.sk_receive_queue, skb);
316 struct sk_buff *nskb = segs->next;
319 skb_queue_tail(&q->sk.sk_receive_queue, segs);
323 /* If we receive a partial checksum and the tap side
324 * doesn't support checksum offload, compute the checksum.
325 * Note: it doesn't matter which checksum feature to
326 * check, we either support them all or none.
328 if (skb->ip_summed == CHECKSUM_PARTIAL &&
329 !(features & NETIF_F_ALL_CSUM) &&
330 skb_checksum_help(skb))
332 skb_queue_tail(&q->sk.sk_receive_queue, skb);
336 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
337 return RX_HANDLER_CONSUMED;
340 /* Count errors/drops only here, thus don't care about args. */
341 macvlan_count_rx(vlan, 0, 0, 0);
343 return RX_HANDLER_CONSUMED;
346 static int macvtap_get_minor(struct macvlan_dev *vlan)
348 int retval = -ENOMEM;
350 mutex_lock(&minor_lock);
351 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
353 vlan->minor = retval;
354 } else if (retval == -ENOSPC) {
355 printk(KERN_ERR "too many macvtap devices\n");
358 mutex_unlock(&minor_lock);
359 return retval < 0 ? retval : 0;
362 static void macvtap_free_minor(struct macvlan_dev *vlan)
364 mutex_lock(&minor_lock);
366 idr_remove(&minor_idr, vlan->minor);
369 mutex_unlock(&minor_lock);
372 static struct net_device *dev_get_by_macvtap_minor(int minor)
374 struct net_device *dev = NULL;
375 struct macvlan_dev *vlan;
377 mutex_lock(&minor_lock);
378 vlan = idr_find(&minor_idr, minor);
383 mutex_unlock(&minor_lock);
387 static int macvtap_newlink(struct net *src_net,
388 struct net_device *dev,
390 struct nlattr *data[])
392 struct macvlan_dev *vlan = netdev_priv(dev);
395 INIT_LIST_HEAD(&vlan->queue_list);
397 /* Since macvlan supports all offloads by default, make
398 * tap support all offloads also.
400 vlan->tap_features = TUN_OFFLOADS;
402 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
406 /* Don't put anything that may fail after macvlan_common_newlink
407 * because we can't undo what it does.
409 return macvlan_common_newlink(src_net, dev, tb, data);
412 static void macvtap_dellink(struct net_device *dev,
413 struct list_head *head)
415 netdev_rx_handler_unregister(dev);
416 macvtap_del_queues(dev);
417 macvlan_dellink(dev, head);
420 static void macvtap_setup(struct net_device *dev)
422 macvlan_common_setup(dev);
423 dev->tx_queue_len = TUN_READQ_SIZE;
426 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
428 .setup = macvtap_setup,
429 .newlink = macvtap_newlink,
430 .dellink = macvtap_dellink,
434 static void macvtap_sock_write_space(struct sock *sk)
436 wait_queue_head_t *wqueue;
438 if (!sock_writeable(sk) ||
439 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
442 wqueue = sk_sleep(sk);
443 if (wqueue && waitqueue_active(wqueue))
444 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
447 static void macvtap_sock_destruct(struct sock *sk)
449 skb_queue_purge(&sk->sk_receive_queue);
452 static int macvtap_open(struct inode *inode, struct file *file)
454 struct net *net = current->nsproxy->net_ns;
455 struct net_device *dev;
456 struct macvtap_queue *q;
460 dev = dev_get_by_macvtap_minor(iminor(inode));
465 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
470 RCU_INIT_POINTER(q->sock.wq, &q->wq);
471 init_waitqueue_head(&q->wq.wait);
472 q->sock.type = SOCK_RAW;
473 q->sock.state = SS_CONNECTED;
475 q->sock.ops = &macvtap_socket_ops;
476 sock_init_data(&q->sock, &q->sk);
477 q->sk.sk_write_space = macvtap_sock_write_space;
478 q->sk.sk_destruct = macvtap_sock_destruct;
479 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
480 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
483 * so far only KVM virtio_net uses macvtap, enable zero copy between
484 * guest kernel and host kernel when lower device supports zerocopy
486 * The macvlan supports zerocopy iff the lower device supports zero
487 * copy so we don't have to look at the lower device directly.
489 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
490 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
492 err = macvtap_set_queue(dev, file, q);
504 static int macvtap_release(struct inode *inode, struct file *file)
506 struct macvtap_queue *q = file->private_data;
507 macvtap_put_queue(q);
511 static unsigned int macvtap_poll(struct file *file, poll_table * wait)
513 struct macvtap_queue *q = file->private_data;
514 unsigned int mask = POLLERR;
520 poll_wait(file, &q->wq.wait, wait);
522 if (!skb_queue_empty(&q->sk.sk_receive_queue))
523 mask |= POLLIN | POLLRDNORM;
525 if (sock_writeable(&q->sk) ||
526 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
527 sock_writeable(&q->sk)))
528 mask |= POLLOUT | POLLWRNORM;
534 static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
535 size_t len, size_t linear,
536 int noblock, int *err)
540 /* Under a page? Don't bother with paged skb. */
541 if (prepad + len < PAGE_SIZE || !linear)
544 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
549 skb_reserve(skb, prepad);
550 skb_put(skb, linear);
551 skb->data_len = len - linear;
552 skb->len += len - linear;
558 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
559 * be shared with the tun/tap driver.
561 static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
562 struct virtio_net_hdr *vnet_hdr)
564 unsigned short gso_type = 0;
565 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
566 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
567 case VIRTIO_NET_HDR_GSO_TCPV4:
568 gso_type = SKB_GSO_TCPV4;
570 case VIRTIO_NET_HDR_GSO_TCPV6:
571 gso_type = SKB_GSO_TCPV6;
573 case VIRTIO_NET_HDR_GSO_UDP:
574 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
576 gso_type = SKB_GSO_UDP;
577 if (skb->protocol == htons(ETH_P_IPV6))
578 ipv6_proxy_select_ident(skb);
584 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
585 gso_type |= SKB_GSO_TCP_ECN;
587 if (vnet_hdr->gso_size == 0)
591 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
592 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
593 vnet_hdr->csum_offset))
597 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
598 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
599 skb_shinfo(skb)->gso_type = gso_type;
601 /* Header must be checked, and gso_segs computed. */
602 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
603 skb_shinfo(skb)->gso_segs = 0;
608 static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
609 struct virtio_net_hdr *vnet_hdr)
611 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
613 if (skb_is_gso(skb)) {
614 struct skb_shared_info *sinfo = skb_shinfo(skb);
616 /* This is a hint as to how much should be linear. */
617 vnet_hdr->hdr_len = skb_headlen(skb);
618 vnet_hdr->gso_size = sinfo->gso_size;
619 if (sinfo->gso_type & SKB_GSO_TCPV4)
620 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
621 else if (sinfo->gso_type & SKB_GSO_TCPV6)
622 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
625 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
626 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
628 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
630 if (skb->ip_summed == CHECKSUM_PARTIAL) {
631 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
632 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
633 if (vlan_tx_tag_present(skb))
634 vnet_hdr->csum_start += VLAN_HLEN;
635 vnet_hdr->csum_offset = skb->csum_offset;
636 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
637 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
638 } /* else everything is zero */
641 /* Get packet from user space buffer */
642 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
643 struct iov_iter *from, int noblock)
645 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
647 struct macvlan_dev *vlan;
648 unsigned long total_len = iov_iter_count(from);
649 unsigned long len = total_len;
651 struct virtio_net_hdr vnet_hdr = { 0 };
652 int vnet_hdr_len = 0;
654 bool zerocopy = false;
658 if (q->flags & IFF_VNET_HDR) {
659 vnet_hdr_len = q->vnet_hdr_sz;
662 if (len < vnet_hdr_len)
667 n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
668 if (n != sizeof(vnet_hdr))
670 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
671 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
672 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
674 vnet_hdr.hdr_len = vnet_hdr.csum_start +
675 vnet_hdr.csum_offset + 2;
677 if (vnet_hdr.hdr_len > len)
682 if (unlikely(len < ETH_HLEN))
685 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
688 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
689 if (copylen > good_linear)
690 copylen = good_linear;
693 iov_iter_advance(&i, copylen);
694 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
700 if (vnet_hdr.hdr_len > good_linear)
701 linear = good_linear;
703 linear = vnet_hdr.hdr_len;
706 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
707 linear, noblock, &err);
712 err = zerocopy_sg_from_iter(skb, from);
714 err = skb_copy_datagram_from_iter(skb, 0, from, len);
715 if (!err && m && m->msg_control) {
716 struct ubuf_info *uarg = m->msg_control;
717 uarg->callback(uarg, false);
724 skb_set_network_header(skb, ETH_HLEN);
725 skb_reset_mac_header(skb);
726 skb->protocol = eth_hdr(skb)->h_proto;
729 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
734 skb_probe_transport_header(skb, ETH_HLEN);
737 vlan = rcu_dereference(q->vlan);
738 /* copy skb_ubuf_info for callback when skb has no error */
740 skb_shinfo(skb)->destructor_arg = m->msg_control;
741 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
742 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
745 skb->dev = vlan->dev;
759 vlan = rcu_dereference(q->vlan);
761 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
767 static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from)
769 struct file *file = iocb->ki_filp;
770 struct macvtap_queue *q = file->private_data;
772 return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
775 /* Put packet to the user space buffer */
776 static ssize_t macvtap_put_user(struct macvtap_queue *q,
777 const struct sk_buff *skb,
778 struct iov_iter *iter)
781 int vnet_hdr_len = 0;
785 if (q->flags & IFF_VNET_HDR) {
786 struct virtio_net_hdr vnet_hdr;
787 vnet_hdr_len = q->vnet_hdr_sz;
788 if (iov_iter_count(iter) < vnet_hdr_len)
791 macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
793 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
797 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
799 total = vnet_hdr_len;
802 if (vlan_tx_tag_present(skb)) {
807 veth.h_vlan_proto = skb->vlan_proto;
808 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
810 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
813 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
814 if (ret || !iov_iter_count(iter))
817 ret = copy_to_iter(&veth, sizeof(veth), iter);
818 if (ret != sizeof(veth) || !iov_iter_count(iter))
822 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
823 skb->len - vlan_offset);
826 return ret ? ret : total;
829 static ssize_t macvtap_do_read(struct macvtap_queue *q,
837 if (!iov_iter_count(to))
842 prepare_to_wait(sk_sleep(&q->sk), &wait,
845 /* Read frames from the queue */
846 skb = skb_dequeue(&q->sk.sk_receive_queue);
853 if (signal_pending(current)) {
857 /* Nothing to read, let's sleep */
861 ret = macvtap_put_user(q, skb, to);
862 if (unlikely(ret < 0))
868 finish_wait(sk_sleep(&q->sk), &wait);
872 static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to)
874 struct file *file = iocb->ki_filp;
875 struct macvtap_queue *q = file->private_data;
876 ssize_t len = iov_iter_count(to), ret;
878 ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK);
879 ret = min_t(ssize_t, ret, len);
885 static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
887 struct macvlan_dev *vlan;
890 vlan = rtnl_dereference(q->vlan);
897 static void macvtap_put_vlan(struct macvlan_dev *vlan)
902 static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
904 struct macvtap_queue *q = file->private_data;
905 struct macvlan_dev *vlan;
908 vlan = macvtap_get_vlan(q);
912 if (flags & IFF_ATTACH_QUEUE)
913 ret = macvtap_enable_queue(vlan->dev, file, q);
914 else if (flags & IFF_DETACH_QUEUE)
915 ret = macvtap_disable_queue(q);
919 macvtap_put_vlan(vlan);
923 static int set_offload(struct macvtap_queue *q, unsigned long arg)
925 struct macvlan_dev *vlan;
926 netdev_features_t features;
927 netdev_features_t feature_mask = 0;
929 vlan = rtnl_dereference(q->vlan);
933 features = vlan->dev->features;
935 if (arg & TUN_F_CSUM) {
936 feature_mask = NETIF_F_HW_CSUM;
938 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
939 if (arg & TUN_F_TSO_ECN)
940 feature_mask |= NETIF_F_TSO_ECN;
941 if (arg & TUN_F_TSO4)
942 feature_mask |= NETIF_F_TSO;
943 if (arg & TUN_F_TSO6)
944 feature_mask |= NETIF_F_TSO6;
948 /* tun/tap driver inverts the usage for TSO offloads, where
949 * setting the TSO bit means that the userspace wants to
950 * accept TSO frames and turning it off means that user space
951 * does not support TSO.
952 * For macvtap, we have to invert it to mean the same thing.
953 * When user space turns off TSO, we turn off GSO/LRO so that
954 * user-space will not receive TSO frames.
956 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
957 features |= RX_OFFLOADS;
959 features &= ~RX_OFFLOADS;
961 /* tap_features are the same as features on tun/tap and
962 * reflect user expectations.
964 vlan->tap_features = feature_mask;
965 vlan->set_features = features;
966 netdev_update_features(vlan->dev);
972 * provide compatibility with generic tun/tap interface
974 static long macvtap_ioctl(struct file *file, unsigned int cmd,
977 struct macvtap_queue *q = file->private_data;
978 struct macvlan_dev *vlan;
979 void __user *argp = (void __user *)arg;
980 struct ifreq __user *ifr = argp;
981 unsigned int __user *up = argp;
983 int __user *sp = argp;
989 /* ignore the name, just look at flags */
990 if (get_user(u, &ifr->ifr_flags))
994 if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
995 (IFF_NO_PI | IFF_TAP))
1004 vlan = macvtap_get_vlan(q);
1011 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1012 put_user(q->flags, &ifr->ifr_flags))
1014 macvtap_put_vlan(vlan);
1019 if (get_user(u, &ifr->ifr_flags))
1022 ret = macvtap_ioctl_set_queue(file, u);
1026 case TUNGETFEATURES:
1027 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
1028 IFF_MULTI_QUEUE, up))
1033 if (get_user(u, up))
1036 q->sk.sk_sndbuf = u;
1039 case TUNGETVNETHDRSZ:
1041 if (put_user(s, sp))
1045 case TUNSETVNETHDRSZ:
1046 if (get_user(s, sp))
1048 if (s < (int)sizeof(struct virtio_net_hdr))
1055 /* let the user check for future flags */
1056 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1061 ret = set_offload(q, arg);
1070 #ifdef CONFIG_COMPAT
1071 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
1074 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1078 static const struct file_operations macvtap_fops = {
1079 .owner = THIS_MODULE,
1080 .open = macvtap_open,
1081 .release = macvtap_release,
1082 .read = new_sync_read,
1083 .write = new_sync_write,
1084 .read_iter = macvtap_read_iter,
1085 .write_iter = macvtap_write_iter,
1086 .poll = macvtap_poll,
1087 .llseek = no_llseek,
1088 .unlocked_ioctl = macvtap_ioctl,
1089 #ifdef CONFIG_COMPAT
1090 .compat_ioctl = macvtap_compat_ioctl,
1094 static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
1095 struct msghdr *m, size_t total_len)
1097 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1098 struct iov_iter from;
1099 iov_iter_init(&from, WRITE, m->msg_iov, m->msg_iovlen, total_len);
1100 return macvtap_get_user(q, m, &from, m->msg_flags & MSG_DONTWAIT);
1103 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
1104 struct msghdr *m, size_t total_len,
1107 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1110 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1112 iov_iter_init(&to, READ, m->msg_iov, m->msg_iovlen, total_len);
1113 ret = macvtap_do_read(q, &to, flags & MSG_DONTWAIT);
1114 if (ret > total_len) {
1115 m->msg_flags |= MSG_TRUNC;
1116 ret = flags & MSG_TRUNC ? ret : total_len;
1121 /* Ops structure to mimic raw sockets with tun */
1122 static const struct proto_ops macvtap_socket_ops = {
1123 .sendmsg = macvtap_sendmsg,
1124 .recvmsg = macvtap_recvmsg,
1127 /* Get an underlying socket object from tun file. Returns error unless file is
1128 * attached to a device. The returned object works like a packet socket, it
1129 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1130 * holding a reference to the file for as long as the socket is in use. */
1131 struct socket *macvtap_get_socket(struct file *file)
1133 struct macvtap_queue *q;
1134 if (file->f_op != &macvtap_fops)
1135 return ERR_PTR(-EINVAL);
1136 q = file->private_data;
1138 return ERR_PTR(-EBADFD);
1141 EXPORT_SYMBOL_GPL(macvtap_get_socket);
1143 static int macvtap_device_event(struct notifier_block *unused,
1144 unsigned long event, void *ptr)
1146 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1147 struct macvlan_dev *vlan;
1148 struct device *classdev;
1152 if (dev->rtnl_link_ops != &macvtap_link_ops)
1155 vlan = netdev_priv(dev);
1158 case NETDEV_REGISTER:
1159 /* Create the device node here after the network device has
1160 * been registered but before register_netdevice has
1163 err = macvtap_get_minor(vlan);
1165 return notifier_from_errno(err);
1167 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1168 classdev = device_create(macvtap_class, &dev->dev, devt,
1169 dev, "tap%d", dev->ifindex);
1170 if (IS_ERR(classdev)) {
1171 macvtap_free_minor(vlan);
1172 return notifier_from_errno(PTR_ERR(classdev));
1175 case NETDEV_UNREGISTER:
1176 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1177 device_destroy(macvtap_class, devt);
1178 macvtap_free_minor(vlan);
1185 static struct notifier_block macvtap_notifier_block __read_mostly = {
1186 .notifier_call = macvtap_device_event,
1189 static int macvtap_init(void)
1193 err = alloc_chrdev_region(&macvtap_major, 0,
1194 MACVTAP_NUM_DEVS, "macvtap");
1198 cdev_init(&macvtap_cdev, &macvtap_fops);
1199 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1203 macvtap_class = class_create(THIS_MODULE, "macvtap");
1204 if (IS_ERR(macvtap_class)) {
1205 err = PTR_ERR(macvtap_class);
1209 err = register_netdevice_notifier(&macvtap_notifier_block);
1213 err = macvlan_link_register(&macvtap_link_ops);
1220 unregister_netdevice_notifier(&macvtap_notifier_block);
1222 class_unregister(macvtap_class);
1224 cdev_del(&macvtap_cdev);
1226 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1230 module_init(macvtap_init);
1232 static void macvtap_exit(void)
1234 rtnl_link_unregister(&macvtap_link_ops);
1235 unregister_netdevice_notifier(&macvtap_notifier_block);
1236 class_unregister(macvtap_class);
1237 cdev_del(&macvtap_cdev);
1238 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1240 module_exit(macvtap_exit);
1242 MODULE_ALIAS_RTNL_LINK("macvtap");
1243 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1244 MODULE_LICENSE("GPL");