1 #include <linux/etherdevice.h>
2 #include <linux/if_macvlan.h>
3 #include <linux/if_vlan.h>
4 #include <linux/interrupt.h>
5 #include <linux/nsproxy.h>
6 #include <linux/compat.h>
7 #include <linux/if_tun.h>
8 #include <linux/module.h>
9 #include <linux/skbuff.h>
10 #include <linux/cache.h>
11 #include <linux/sched.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/wait.h>
15 #include <linux/cdev.h>
16 #include <linux/idr.h>
20 #include <net/net_namespace.h>
21 #include <net/rtnetlink.h>
23 #include <linux/virtio_net.h>
26 * A macvtap queue is the central object of this driver, it connects
27 * an open character device to a macvlan interface. There can be
28 * multiple queues on one interface, which map back to queues
29 * implemented in hardware on the underlying device.
31 * macvtap_proto is used to allocate queues through the sock allocation
35 struct macvtap_queue {
40 struct macvlan_dev __rcu *vlan;
45 struct list_head next;
48 static struct proto macvtap_proto = {
51 .obj_size = sizeof (struct macvtap_queue),
55 * Variables for dealing with macvtaps device numbers.
57 static dev_t macvtap_major;
58 #define MACVTAP_NUM_DEVS (1U << MINORBITS)
59 static DEFINE_MUTEX(minor_lock);
60 static DEFINE_IDR(minor_idr);
62 #define GOODCOPY_LEN 128
63 static struct class *macvtap_class;
64 static struct cdev macvtap_cdev;
66 static const struct proto_ops macvtap_socket_ops;
68 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
70 #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
71 #define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
73 static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
75 return rcu_dereference(dev->rx_handler_data);
80 * The macvtap_queue and the macvlan_dev are loosely coupled, the
81 * pointers from one to the other can only be read while rcu_read_lock
84 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
85 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
86 * q->vlan becomes inaccessible. When the files gets closed,
87 * macvtap_get_queue() fails.
89 * There may still be references to the struct sock inside of the
90 * queue from outbound SKBs, but these never reference back to the
91 * file or the dev. The data structure is freed through __sk_free
92 * when both our references and any pending SKBs are gone.
95 static int macvtap_enable_queue(struct net_device *dev, struct file *file,
96 struct macvtap_queue *q)
98 struct macvlan_dev *vlan = netdev_priv(dev);
107 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
108 q->queue_index = vlan->numvtaps;
117 static int macvtap_set_queue(struct net_device *dev, struct file *file,
118 struct macvtap_queue *q)
120 struct macvlan_dev *vlan = netdev_priv(dev);
122 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
125 rcu_assign_pointer(q->vlan, vlan);
126 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
130 q->queue_index = vlan->numvtaps;
132 file->private_data = q;
133 list_add_tail(&q->next, &vlan->queue_list);
141 static int macvtap_disable_queue(struct macvtap_queue *q)
143 struct macvlan_dev *vlan;
144 struct macvtap_queue *nq;
150 vlan = rtnl_dereference(q->vlan);
153 int index = q->queue_index;
154 BUG_ON(index >= vlan->numvtaps);
155 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
156 nq->queue_index = index;
158 rcu_assign_pointer(vlan->taps[index], nq);
159 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
169 * The file owning the queue got closed, give up both
170 * the reference that the files holds as well as the
171 * one from the macvlan_dev if that still exists.
173 * Using the spinlock makes sure that we don't get
174 * to the queue again after destroying it.
176 static void macvtap_put_queue(struct macvtap_queue *q)
178 struct macvlan_dev *vlan;
181 vlan = rtnl_dereference(q->vlan);
185 BUG_ON(macvtap_disable_queue(q));
188 RCU_INIT_POINTER(q->vlan, NULL);
190 list_del_init(&q->next);
200 * Select a queue based on the rxq of the device on which this packet
201 * arrived. If the incoming device is not mq, calculate a flow hash
202 * to select a queue. If all fails, find the first available queue.
203 * Cache vlan->numvtaps since it can become zero during the execution
206 static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
209 struct macvlan_dev *vlan = netdev_priv(dev);
210 struct macvtap_queue *tap = NULL;
211 /* Access to taps array is protected by rcu, but access to numvtaps
212 * isn't. Below we use it to lookup a queue, but treat it as a hint
213 * and validate that the result isn't NULL - in case we are
214 * racing against queue removal.
216 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
222 /* Check if we can use flow to select a queue */
223 rxq = skb_get_hash(skb);
225 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
229 if (likely(skb_rx_queue_recorded(skb))) {
230 rxq = skb_get_rx_queue(skb);
232 while (unlikely(rxq >= numvtaps))
235 tap = rcu_dereference(vlan->taps[rxq]);
239 tap = rcu_dereference(vlan->taps[0]);
245 * The net_device is going away, give up the reference
246 * that it holds on all queues and safely set the pointer
247 * from the queues to NULL.
249 static void macvtap_del_queues(struct net_device *dev)
251 struct macvlan_dev *vlan = netdev_priv(dev);
252 struct macvtap_queue *q, *tmp, *qlist[MAX_MACVTAP_QUEUES];
256 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
257 list_del_init(&q->next);
259 RCU_INIT_POINTER(q->vlan, NULL);
264 for (i = 0; i < vlan->numvtaps; i++)
265 RCU_INIT_POINTER(vlan->taps[i], NULL);
266 BUG_ON(vlan->numvtaps);
267 BUG_ON(vlan->numqueues);
268 /* guarantee that any future macvtap_set_queue will fail */
269 vlan->numvtaps = MAX_MACVTAP_QUEUES;
271 for (--j; j >= 0; j--)
272 sock_put(&qlist[j]->sk);
275 static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
277 struct sk_buff *skb = *pskb;
278 struct net_device *dev = skb->dev;
279 struct macvlan_dev *vlan;
280 struct macvtap_queue *q;
281 netdev_features_t features = TAP_FEATURES;
283 vlan = macvtap_get_vlan_rcu(dev);
285 return RX_HANDLER_PASS;
287 q = macvtap_get_queue(dev, skb);
289 return RX_HANDLER_PASS;
291 if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len)
294 skb_push(skb, ETH_HLEN);
296 /* Apply the forward feature mask so that we perform segmentation
297 * according to users wishes. This only works if VNET_HDR is
300 if (q->flags & IFF_VNET_HDR)
301 features |= vlan->tap_features;
302 if (netif_needs_gso(dev, skb, features)) {
303 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
309 skb_queue_tail(&q->sk.sk_receive_queue, skb);
315 struct sk_buff *nskb = segs->next;
318 skb_queue_tail(&q->sk.sk_receive_queue, segs);
322 /* If we receive a partial checksum and the tap side
323 * doesn't support checksum offload, compute the checksum.
324 * Note: it doesn't matter which checksum feature to
325 * check, we either support them all or none.
327 if (skb->ip_summed == CHECKSUM_PARTIAL &&
328 !(features & NETIF_F_ALL_CSUM) &&
329 skb_checksum_help(skb))
331 skb_queue_tail(&q->sk.sk_receive_queue, skb);
335 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
336 return RX_HANDLER_CONSUMED;
339 /* Count errors/drops only here, thus don't care about args. */
340 macvlan_count_rx(vlan, 0, 0, 0);
342 return RX_HANDLER_CONSUMED;
345 static int macvtap_get_minor(struct macvlan_dev *vlan)
347 int retval = -ENOMEM;
349 mutex_lock(&minor_lock);
350 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
352 vlan->minor = retval;
353 } else if (retval == -ENOSPC) {
354 printk(KERN_ERR "too many macvtap devices\n");
357 mutex_unlock(&minor_lock);
358 return retval < 0 ? retval : 0;
361 static void macvtap_free_minor(struct macvlan_dev *vlan)
363 mutex_lock(&minor_lock);
365 idr_remove(&minor_idr, vlan->minor);
368 mutex_unlock(&minor_lock);
371 static struct net_device *dev_get_by_macvtap_minor(int minor)
373 struct net_device *dev = NULL;
374 struct macvlan_dev *vlan;
376 mutex_lock(&minor_lock);
377 vlan = idr_find(&minor_idr, minor);
382 mutex_unlock(&minor_lock);
386 static int macvtap_newlink(struct net *src_net,
387 struct net_device *dev,
389 struct nlattr *data[])
391 struct macvlan_dev *vlan = netdev_priv(dev);
394 INIT_LIST_HEAD(&vlan->queue_list);
396 /* Since macvlan supports all offloads by default, make
397 * tap support all offloads also.
399 vlan->tap_features = TUN_OFFLOADS;
401 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
405 /* Don't put anything that may fail after macvlan_common_newlink
406 * because we can't undo what it does.
408 return macvlan_common_newlink(src_net, dev, tb, data);
411 static void macvtap_dellink(struct net_device *dev,
412 struct list_head *head)
414 netdev_rx_handler_unregister(dev);
415 macvtap_del_queues(dev);
416 macvlan_dellink(dev, head);
419 static void macvtap_setup(struct net_device *dev)
421 macvlan_common_setup(dev);
422 dev->tx_queue_len = TUN_READQ_SIZE;
425 static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
427 .setup = macvtap_setup,
428 .newlink = macvtap_newlink,
429 .dellink = macvtap_dellink,
433 static void macvtap_sock_write_space(struct sock *sk)
435 wait_queue_head_t *wqueue;
437 if (!sock_writeable(sk) ||
438 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
441 wqueue = sk_sleep(sk);
442 if (wqueue && waitqueue_active(wqueue))
443 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
446 static void macvtap_sock_destruct(struct sock *sk)
448 skb_queue_purge(&sk->sk_receive_queue);
451 static int macvtap_open(struct inode *inode, struct file *file)
453 struct net *net = current->nsproxy->net_ns;
454 struct net_device *dev;
455 struct macvtap_queue *q;
459 dev = dev_get_by_macvtap_minor(iminor(inode));
464 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
469 RCU_INIT_POINTER(q->sock.wq, &q->wq);
470 init_waitqueue_head(&q->wq.wait);
471 q->sock.type = SOCK_RAW;
472 q->sock.state = SS_CONNECTED;
474 q->sock.ops = &macvtap_socket_ops;
475 sock_init_data(&q->sock, &q->sk);
476 q->sk.sk_write_space = macvtap_sock_write_space;
477 q->sk.sk_destruct = macvtap_sock_destruct;
478 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
479 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
482 * so far only KVM virtio_net uses macvtap, enable zero copy between
483 * guest kernel and host kernel when lower device supports zerocopy
485 * The macvlan supports zerocopy iff the lower device supports zero
486 * copy so we don't have to look at the lower device directly.
488 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
489 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
491 err = macvtap_set_queue(dev, file, q);
503 static int macvtap_release(struct inode *inode, struct file *file)
505 struct macvtap_queue *q = file->private_data;
506 macvtap_put_queue(q);
510 static unsigned int macvtap_poll(struct file *file, poll_table * wait)
512 struct macvtap_queue *q = file->private_data;
513 unsigned int mask = POLLERR;
519 poll_wait(file, &q->wq.wait, wait);
521 if (!skb_queue_empty(&q->sk.sk_receive_queue))
522 mask |= POLLIN | POLLRDNORM;
524 if (sock_writeable(&q->sk) ||
525 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
526 sock_writeable(&q->sk)))
527 mask |= POLLOUT | POLLWRNORM;
533 static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
534 size_t len, size_t linear,
535 int noblock, int *err)
539 /* Under a page? Don't bother with paged skb. */
540 if (prepad + len < PAGE_SIZE || !linear)
543 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
548 skb_reserve(skb, prepad);
549 skb_put(skb, linear);
550 skb->data_len = len - linear;
551 skb->len += len - linear;
557 * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
558 * be shared with the tun/tap driver.
560 static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
561 struct virtio_net_hdr *vnet_hdr)
563 unsigned short gso_type = 0;
564 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
565 switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
566 case VIRTIO_NET_HDR_GSO_TCPV4:
567 gso_type = SKB_GSO_TCPV4;
569 case VIRTIO_NET_HDR_GSO_TCPV6:
570 gso_type = SKB_GSO_TCPV6;
572 case VIRTIO_NET_HDR_GSO_UDP:
573 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
575 gso_type = SKB_GSO_UDP;
576 if (skb->protocol == htons(ETH_P_IPV6))
577 ipv6_proxy_select_ident(skb);
583 if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
584 gso_type |= SKB_GSO_TCP_ECN;
586 if (vnet_hdr->gso_size == 0)
590 if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
591 if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
592 vnet_hdr->csum_offset))
596 if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
597 skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
598 skb_shinfo(skb)->gso_type = gso_type;
600 /* Header must be checked, and gso_segs computed. */
601 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
602 skb_shinfo(skb)->gso_segs = 0;
607 static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
608 struct virtio_net_hdr *vnet_hdr)
610 memset(vnet_hdr, 0, sizeof(*vnet_hdr));
612 if (skb_is_gso(skb)) {
613 struct skb_shared_info *sinfo = skb_shinfo(skb);
615 /* This is a hint as to how much should be linear. */
616 vnet_hdr->hdr_len = skb_headlen(skb);
617 vnet_hdr->gso_size = sinfo->gso_size;
618 if (sinfo->gso_type & SKB_GSO_TCPV4)
619 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
620 else if (sinfo->gso_type & SKB_GSO_TCPV6)
621 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
624 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
625 vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
627 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
629 if (skb->ip_summed == CHECKSUM_PARTIAL) {
630 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
631 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
632 if (vlan_tx_tag_present(skb))
633 vnet_hdr->csum_start += VLAN_HLEN;
634 vnet_hdr->csum_offset = skb->csum_offset;
635 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
636 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
637 } /* else everything is zero */
640 /* Get packet from user space buffer */
641 static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
642 const struct iovec *iv, unsigned long total_len,
643 size_t count, int noblock)
645 int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
647 struct macvlan_dev *vlan;
648 unsigned long len = total_len;
650 struct virtio_net_hdr vnet_hdr = { 0 };
651 int vnet_hdr_len = 0;
653 bool zerocopy = false;
656 if (q->flags & IFF_VNET_HDR) {
657 vnet_hdr_len = q->vnet_hdr_sz;
660 if (len < vnet_hdr_len)
664 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
668 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
669 vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
671 vnet_hdr.hdr_len = vnet_hdr.csum_start +
672 vnet_hdr.csum_offset + 2;
674 if (vnet_hdr.hdr_len > len)
679 if (unlikely(len < ETH_HLEN))
683 if (unlikely(count > UIO_MAXIOV))
686 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
687 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
688 if (copylen > good_linear)
689 copylen = good_linear;
691 if (iov_pages(iv, vnet_hdr_len + copylen, count)
698 if (vnet_hdr.hdr_len > good_linear)
699 linear = good_linear;
701 linear = vnet_hdr.hdr_len;
704 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
705 linear, noblock, &err);
710 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
712 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
714 if (!err && m && m->msg_control) {
715 struct ubuf_info *uarg = m->msg_control;
716 uarg->callback(uarg, false);
723 skb_set_network_header(skb, ETH_HLEN);
724 skb_reset_mac_header(skb);
725 skb->protocol = eth_hdr(skb)->h_proto;
728 err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
733 skb_probe_transport_header(skb, ETH_HLEN);
736 vlan = rcu_dereference(q->vlan);
737 /* copy skb_ubuf_info for callback when skb has no error */
739 skb_shinfo(skb)->destructor_arg = m->msg_control;
740 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
741 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
744 skb->dev = vlan->dev;
758 vlan = rcu_dereference(q->vlan);
760 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
766 static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
767 unsigned long count, loff_t pos)
769 struct file *file = iocb->ki_filp;
770 ssize_t result = -ENOLINK;
771 struct macvtap_queue *q = file->private_data;
773 result = macvtap_get_user(q, NULL, iv, iov_length(iv, count), count,
774 file->f_flags & O_NONBLOCK);
778 /* Put packet to the user space buffer */
779 static ssize_t macvtap_put_user(struct macvtap_queue *q,
780 const struct sk_buff *skb,
781 const struct iovec *iv, int len)
784 int vnet_hdr_len = 0;
788 if (q->flags & IFF_VNET_HDR) {
789 struct virtio_net_hdr vnet_hdr;
790 vnet_hdr_len = q->vnet_hdr_sz;
791 if ((len -= vnet_hdr_len) < 0)
794 macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
796 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
799 total = copied = vnet_hdr_len;
802 if (!vlan_tx_tag_present(skb))
803 len = min_t(int, skb->len, len);
810 veth.h_vlan_proto = skb->vlan_proto;
811 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
813 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
814 len = min_t(int, skb->len + VLAN_HLEN, len);
817 copy = min_t(int, vlan_offset, len);
818 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
824 copy = min_t(int, sizeof(veth), len);
825 ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
832 ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
835 return ret ? ret : total;
838 static ssize_t macvtap_do_read(struct macvtap_queue *q,
839 const struct iovec *iv, unsigned long len,
848 prepare_to_wait(sk_sleep(&q->sk), &wait,
851 /* Read frames from the queue */
852 skb = skb_dequeue(&q->sk.sk_receive_queue);
858 if (signal_pending(current)) {
862 /* Nothing to read, let's sleep */
866 ret = macvtap_put_user(q, skb, iv, len);
872 finish_wait(sk_sleep(&q->sk), &wait);
876 static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
877 unsigned long count, loff_t pos)
879 struct file *file = iocb->ki_filp;
880 struct macvtap_queue *q = file->private_data;
881 ssize_t len, ret = 0;
883 len = iov_length(iv, count);
889 ret = macvtap_do_read(q, iv, len, file->f_flags & O_NONBLOCK);
890 ret = min_t(ssize_t, ret, len);
897 static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
899 struct macvlan_dev *vlan;
902 vlan = rtnl_dereference(q->vlan);
909 static void macvtap_put_vlan(struct macvlan_dev *vlan)
914 static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
916 struct macvtap_queue *q = file->private_data;
917 struct macvlan_dev *vlan;
920 vlan = macvtap_get_vlan(q);
924 if (flags & IFF_ATTACH_QUEUE)
925 ret = macvtap_enable_queue(vlan->dev, file, q);
926 else if (flags & IFF_DETACH_QUEUE)
927 ret = macvtap_disable_queue(q);
931 macvtap_put_vlan(vlan);
935 static int set_offload(struct macvtap_queue *q, unsigned long arg)
937 struct macvlan_dev *vlan;
938 netdev_features_t features;
939 netdev_features_t feature_mask = 0;
941 vlan = rtnl_dereference(q->vlan);
945 features = vlan->dev->features;
947 if (arg & TUN_F_CSUM) {
948 feature_mask = NETIF_F_HW_CSUM;
950 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
951 if (arg & TUN_F_TSO_ECN)
952 feature_mask |= NETIF_F_TSO_ECN;
953 if (arg & TUN_F_TSO4)
954 feature_mask |= NETIF_F_TSO;
955 if (arg & TUN_F_TSO6)
956 feature_mask |= NETIF_F_TSO6;
960 /* tun/tap driver inverts the usage for TSO offloads, where
961 * setting the TSO bit means that the userspace wants to
962 * accept TSO frames and turning it off means that user space
963 * does not support TSO.
964 * For macvtap, we have to invert it to mean the same thing.
965 * When user space turns off TSO, we turn off GSO/LRO so that
966 * user-space will not receive TSO frames.
968 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
969 features |= RX_OFFLOADS;
971 features &= ~RX_OFFLOADS;
973 /* tap_features are the same as features on tun/tap and
974 * reflect user expectations.
976 vlan->tap_features = feature_mask;
977 vlan->set_features = features;
978 netdev_update_features(vlan->dev);
984 * provide compatibility with generic tun/tap interface
986 static long macvtap_ioctl(struct file *file, unsigned int cmd,
989 struct macvtap_queue *q = file->private_data;
990 struct macvlan_dev *vlan;
991 void __user *argp = (void __user *)arg;
992 struct ifreq __user *ifr = argp;
993 unsigned int __user *up = argp;
995 int __user *sp = argp;
1001 /* ignore the name, just look at flags */
1002 if (get_user(u, &ifr->ifr_flags))
1006 if ((u & ~(IFF_VNET_HDR | IFF_MULTI_QUEUE)) !=
1007 (IFF_NO_PI | IFF_TAP))
1016 vlan = macvtap_get_vlan(q);
1023 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1024 put_user(q->flags, &ifr->ifr_flags))
1026 macvtap_put_vlan(vlan);
1031 if (get_user(u, &ifr->ifr_flags))
1034 ret = macvtap_ioctl_set_queue(file, u);
1038 case TUNGETFEATURES:
1039 if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
1040 IFF_MULTI_QUEUE, up))
1045 if (get_user(u, up))
1048 q->sk.sk_sndbuf = u;
1051 case TUNGETVNETHDRSZ:
1053 if (put_user(s, sp))
1057 case TUNSETVNETHDRSZ:
1058 if (get_user(s, sp))
1060 if (s < (int)sizeof(struct virtio_net_hdr))
1067 /* let the user check for future flags */
1068 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1073 ret = set_offload(q, arg);
1082 #ifdef CONFIG_COMPAT
1083 static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
1086 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1090 static const struct file_operations macvtap_fops = {
1091 .owner = THIS_MODULE,
1092 .open = macvtap_open,
1093 .release = macvtap_release,
1094 .aio_read = macvtap_aio_read,
1095 .aio_write = macvtap_aio_write,
1096 .poll = macvtap_poll,
1097 .llseek = no_llseek,
1098 .unlocked_ioctl = macvtap_ioctl,
1099 #ifdef CONFIG_COMPAT
1100 .compat_ioctl = macvtap_compat_ioctl,
1104 static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
1105 struct msghdr *m, size_t total_len)
1107 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1108 return macvtap_get_user(q, m, m->msg_iov, total_len, m->msg_iovlen,
1109 m->msg_flags & MSG_DONTWAIT);
1112 static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
1113 struct msghdr *m, size_t total_len,
1116 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1118 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1120 ret = macvtap_do_read(q, m->msg_iov, total_len,
1121 flags & MSG_DONTWAIT);
1122 if (ret > total_len) {
1123 m->msg_flags |= MSG_TRUNC;
1124 ret = flags & MSG_TRUNC ? ret : total_len;
1129 /* Ops structure to mimic raw sockets with tun */
1130 static const struct proto_ops macvtap_socket_ops = {
1131 .sendmsg = macvtap_sendmsg,
1132 .recvmsg = macvtap_recvmsg,
1135 /* Get an underlying socket object from tun file. Returns error unless file is
1136 * attached to a device. The returned object works like a packet socket, it
1137 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1138 * holding a reference to the file for as long as the socket is in use. */
1139 struct socket *macvtap_get_socket(struct file *file)
1141 struct macvtap_queue *q;
1142 if (file->f_op != &macvtap_fops)
1143 return ERR_PTR(-EINVAL);
1144 q = file->private_data;
1146 return ERR_PTR(-EBADFD);
1149 EXPORT_SYMBOL_GPL(macvtap_get_socket);
1151 static int macvtap_device_event(struct notifier_block *unused,
1152 unsigned long event, void *ptr)
1154 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1155 struct macvlan_dev *vlan;
1156 struct device *classdev;
1160 if (dev->rtnl_link_ops != &macvtap_link_ops)
1163 vlan = netdev_priv(dev);
1166 case NETDEV_REGISTER:
1167 /* Create the device node here after the network device has
1168 * been registered but before register_netdevice has
1171 err = macvtap_get_minor(vlan);
1173 return notifier_from_errno(err);
1175 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1176 classdev = device_create(macvtap_class, &dev->dev, devt,
1177 dev, "tap%d", dev->ifindex);
1178 if (IS_ERR(classdev)) {
1179 macvtap_free_minor(vlan);
1180 return notifier_from_errno(PTR_ERR(classdev));
1183 case NETDEV_UNREGISTER:
1184 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
1185 device_destroy(macvtap_class, devt);
1186 macvtap_free_minor(vlan);
1193 static struct notifier_block macvtap_notifier_block __read_mostly = {
1194 .notifier_call = macvtap_device_event,
1197 static int macvtap_init(void)
1201 err = alloc_chrdev_region(&macvtap_major, 0,
1202 MACVTAP_NUM_DEVS, "macvtap");
1206 cdev_init(&macvtap_cdev, &macvtap_fops);
1207 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1211 macvtap_class = class_create(THIS_MODULE, "macvtap");
1212 if (IS_ERR(macvtap_class)) {
1213 err = PTR_ERR(macvtap_class);
1217 err = register_netdevice_notifier(&macvtap_notifier_block);
1221 err = macvlan_link_register(&macvtap_link_ops);
1228 unregister_netdevice_notifier(&macvtap_notifier_block);
1230 class_unregister(macvtap_class);
1232 cdev_del(&macvtap_cdev);
1234 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1238 module_init(macvtap_init);
1240 static void macvtap_exit(void)
1242 rtnl_link_unregister(&macvtap_link_ops);
1243 unregister_netdevice_notifier(&macvtap_notifier_block);
1244 class_unregister(macvtap_class);
1245 cdev_del(&macvtap_cdev);
1246 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1248 module_exit(macvtap_exit);
1250 MODULE_ALIAS_RTNL_LINK("macvtap");
1251 MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1252 MODULE_LICENSE("GPL");