Drivers: net: hyperv: Enable scatter gather I/O
[firefly-linux-kernel-4.4.55.git] / drivers / net / hyperv / netvsc_drv.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * Authors:
17  *   Haiyang Zhang <haiyangz@microsoft.com>
18  *   Hank Janssen  <hjanssen@microsoft.com>
19  */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/init.h>
23 #include <linux/atomic.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
27 #include <linux/io.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/if_vlan.h>
34 #include <linux/in.h>
35 #include <linux/slab.h>
36 #include <net/arp.h>
37 #include <net/route.h>
38 #include <net/sock.h>
39 #include <net/pkt_sched.h>
40
41 #include "hyperv_net.h"
42
43 struct net_device_context {
44         /* point back to our device context */
45         struct hv_device *device_ctx;
46         struct delayed_work dwork;
47         struct work_struct work;
48 };
49
50 #define RING_SIZE_MIN 64
51 static int ring_size = 128;
52 module_param(ring_size, int, S_IRUGO);
53 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
54
55 static void do_set_multicast(struct work_struct *w)
56 {
57         struct net_device_context *ndevctx =
58                 container_of(w, struct net_device_context, work);
59         struct netvsc_device *nvdev;
60         struct rndis_device *rdev;
61
62         nvdev = hv_get_drvdata(ndevctx->device_ctx);
63         if (nvdev == NULL || nvdev->ndev == NULL)
64                 return;
65
66         rdev = nvdev->extension;
67         if (rdev == NULL)
68                 return;
69
70         if (nvdev->ndev->flags & IFF_PROMISC)
71                 rndis_filter_set_packet_filter(rdev,
72                         NDIS_PACKET_TYPE_PROMISCUOUS);
73         else
74                 rndis_filter_set_packet_filter(rdev,
75                         NDIS_PACKET_TYPE_BROADCAST |
76                         NDIS_PACKET_TYPE_ALL_MULTICAST |
77                         NDIS_PACKET_TYPE_DIRECTED);
78 }
79
80 static void netvsc_set_multicast_list(struct net_device *net)
81 {
82         struct net_device_context *net_device_ctx = netdev_priv(net);
83
84         schedule_work(&net_device_ctx->work);
85 }
86
87 static int netvsc_open(struct net_device *net)
88 {
89         struct net_device_context *net_device_ctx = netdev_priv(net);
90         struct hv_device *device_obj = net_device_ctx->device_ctx;
91         struct netvsc_device *nvdev;
92         struct rndis_device *rdev;
93         int ret = 0;
94
95         netif_carrier_off(net);
96
97         /* Open up the device */
98         ret = rndis_filter_open(device_obj);
99         if (ret != 0) {
100                 netdev_err(net, "unable to open device (ret %d).\n", ret);
101                 return ret;
102         }
103
104         netif_start_queue(net);
105
106         nvdev = hv_get_drvdata(device_obj);
107         rdev = nvdev->extension;
108         if (!rdev->link_state)
109                 netif_carrier_on(net);
110
111         return ret;
112 }
113
114 static int netvsc_close(struct net_device *net)
115 {
116         struct net_device_context *net_device_ctx = netdev_priv(net);
117         struct hv_device *device_obj = net_device_ctx->device_ctx;
118         int ret;
119
120         netif_tx_disable(net);
121
122         /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
123         cancel_work_sync(&net_device_ctx->work);
124         ret = rndis_filter_close(device_obj);
125         if (ret != 0)
126                 netdev_err(net, "unable to close device (ret %d).\n", ret);
127
128         return ret;
129 }
130
131 static void netvsc_xmit_completion(void *context)
132 {
133         struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
134         struct sk_buff *skb = (struct sk_buff *)
135                 (unsigned long)packet->completion.send.send_completion_tid;
136
137         kfree(packet);
138
139         if (skb)
140                 dev_kfree_skb_any(skb);
141 }
142
143 static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
144                         struct hv_page_buffer *pb)
145 {
146         int j = 0;
147
148         /* Deal with compund pages by ignoring unused part
149          * of the page.
150          */
151         page += (offset >> PAGE_SHIFT);
152         offset &= ~PAGE_MASK;
153
154         while (len > 0) {
155                 unsigned long bytes;
156
157                 bytes = PAGE_SIZE - offset;
158                 if (bytes > len)
159                         bytes = len;
160                 pb[j].pfn = page_to_pfn(page);
161                 pb[j].offset = offset;
162                 pb[j].len = bytes;
163
164                 offset += bytes;
165                 len -= bytes;
166
167                 if (offset == PAGE_SIZE && len) {
168                         page++;
169                         offset = 0;
170                         j++;
171                 }
172         }
173
174         return j + 1;
175 }
176
177 static void init_page_array(void *hdr, u32 len, struct sk_buff *skb,
178                             struct hv_page_buffer *pb)
179 {
180         u32 slots_used = 0;
181         char *data = skb->data;
182         int frags = skb_shinfo(skb)->nr_frags;
183         int i;
184
185         /* The packet is laid out thus:
186          * 1. hdr
187          * 2. skb linear data
188          * 3. skb fragment data
189          */
190         if (hdr != NULL)
191                 slots_used += fill_pg_buf(virt_to_page(hdr),
192                                         offset_in_page(hdr),
193                                         len, &pb[slots_used]);
194
195         slots_used += fill_pg_buf(virt_to_page(data),
196                                 offset_in_page(data),
197                                 skb_headlen(skb), &pb[slots_used]);
198
199         for (i = 0; i < frags; i++) {
200                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
201
202                 slots_used += fill_pg_buf(skb_frag_page(frag),
203                                         frag->page_offset,
204                                         skb_frag_size(frag), &pb[slots_used]);
205         }
206 }
207
208 static int count_skb_frag_slots(struct sk_buff *skb)
209 {
210         int i, frags = skb_shinfo(skb)->nr_frags;
211         int pages = 0;
212
213         for (i = 0; i < frags; i++) {
214                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
215                 unsigned long size = skb_frag_size(frag);
216                 unsigned long offset = frag->page_offset;
217
218                 /* Skip unused frames from start of page */
219                 offset &= ~PAGE_MASK;
220                 pages += PFN_UP(offset + size);
221         }
222         return pages;
223 }
224
225 static int netvsc_get_slots(struct sk_buff *skb)
226 {
227         char *data = skb->data;
228         unsigned int offset = offset_in_page(data);
229         unsigned int len = skb_headlen(skb);
230         int slots;
231         int frag_slots;
232
233         slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
234         frag_slots = count_skb_frag_slots(skb);
235         return slots + frag_slots;
236 }
237
238 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
239 {
240         struct net_device_context *net_device_ctx = netdev_priv(net);
241         struct hv_netvsc_packet *packet;
242         int ret;
243         unsigned int num_data_pages;
244
245         /* We will atmost need two pages to describe the rndis
246          * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
247          * of pages in a single packet.
248          */
249         num_data_pages = netvsc_get_slots(skb) + 2;
250         if (num_data_pages > MAX_PAGE_BUFFER_COUNT) {
251                 netdev_err(net, "Packet too big: %u\n", skb->len);
252                 dev_kfree_skb(skb);
253                 net->stats.tx_dropped++;
254                 return NETDEV_TX_OK;
255         }
256
257         /* Allocate a netvsc packet based on # of frags. */
258         packet = kzalloc(sizeof(struct hv_netvsc_packet) +
259                          (num_data_pages * sizeof(struct hv_page_buffer)) +
260                          sizeof(struct rndis_message) +
261                          NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
262         if (!packet) {
263                 /* out of memory, drop packet */
264                 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
265
266                 dev_kfree_skb(skb);
267                 net->stats.tx_dropped++;
268                 return NETDEV_TX_OK;
269         }
270
271         packet->vlan_tci = skb->vlan_tci;
272
273         packet->extension = (void *)(unsigned long)packet +
274                         sizeof(struct hv_netvsc_packet) +
275                         (num_data_pages * sizeof(struct hv_page_buffer));
276
277         /* If the rndis msg goes beyond 1 page, we will add 1 later */
278         packet->page_buf_cnt = num_data_pages - 1;
279
280         /* Initialize it from the skb */
281         packet->total_data_buflen = skb->len;
282
283         /* Start filling in the page buffers starting after RNDIS buffer. */
284         init_page_array(NULL, 0, skb, &packet->page_buf[1]);
285
286         /* Set the completion routine */
287         packet->completion.send.send_completion = netvsc_xmit_completion;
288         packet->completion.send.send_completion_ctx = packet;
289         packet->completion.send.send_completion_tid = (unsigned long)skb;
290
291         ret = rndis_filter_send(net_device_ctx->device_ctx,
292                                   packet);
293         if (ret == 0) {
294                 net->stats.tx_bytes += skb->len;
295                 net->stats.tx_packets++;
296         } else {
297                 kfree(packet);
298                 if (ret != -EAGAIN) {
299                         dev_kfree_skb_any(skb);
300                         net->stats.tx_dropped++;
301                 }
302         }
303
304         return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
305 }
306
307 /*
308  * netvsc_linkstatus_callback - Link up/down notification
309  */
310 void netvsc_linkstatus_callback(struct hv_device *device_obj,
311                                        unsigned int status)
312 {
313         struct net_device *net;
314         struct net_device_context *ndev_ctx;
315         struct netvsc_device *net_device;
316         struct rndis_device *rdev;
317
318         net_device = hv_get_drvdata(device_obj);
319         rdev = net_device->extension;
320
321         rdev->link_state = status != 1;
322
323         net = net_device->ndev;
324
325         if (!net || net->reg_state != NETREG_REGISTERED)
326                 return;
327
328         ndev_ctx = netdev_priv(net);
329         if (status == 1) {
330                 schedule_delayed_work(&ndev_ctx->dwork, 0);
331                 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
332         } else {
333                 schedule_delayed_work(&ndev_ctx->dwork, 0);
334         }
335 }
336
337 /*
338  * netvsc_recv_callback -  Callback when we receive a packet from the
339  * "wire" on the specified device.
340  */
341 int netvsc_recv_callback(struct hv_device *device_obj,
342                                 struct hv_netvsc_packet *packet)
343 {
344         struct net_device *net;
345         struct sk_buff *skb;
346
347         net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
348         if (!net || net->reg_state != NETREG_REGISTERED) {
349                 packet->status = NVSP_STAT_FAIL;
350                 return 0;
351         }
352
353         /* Allocate a skb - TODO direct I/O to pages? */
354         skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
355         if (unlikely(!skb)) {
356                 ++net->stats.rx_dropped;
357                 packet->status = NVSP_STAT_FAIL;
358                 return 0;
359         }
360
361         /*
362          * Copy to skb. This copy is needed here since the memory pointed by
363          * hv_netvsc_packet cannot be deallocated
364          */
365         memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
366                 packet->total_data_buflen);
367
368         skb->protocol = eth_type_trans(skb, net);
369         skb->ip_summed = CHECKSUM_NONE;
370         if (packet->vlan_tci & VLAN_TAG_PRESENT)
371                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
372                                        packet->vlan_tci);
373
374         net->stats.rx_packets++;
375         net->stats.rx_bytes += packet->total_data_buflen;
376
377         /*
378          * Pass the skb back up. Network stack will deallocate the skb when it
379          * is done.
380          * TODO - use NAPI?
381          */
382         netif_rx(skb);
383
384         return 0;
385 }
386
387 static void netvsc_get_drvinfo(struct net_device *net,
388                                struct ethtool_drvinfo *info)
389 {
390         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
391         strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
392 }
393
394 static int netvsc_change_mtu(struct net_device *ndev, int mtu)
395 {
396         struct net_device_context *ndevctx = netdev_priv(ndev);
397         struct hv_device *hdev =  ndevctx->device_ctx;
398         struct netvsc_device *nvdev = hv_get_drvdata(hdev);
399         struct netvsc_device_info device_info;
400         int limit = ETH_DATA_LEN;
401
402         if (nvdev == NULL || nvdev->destroy)
403                 return -ENODEV;
404
405         if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
406                 limit = NETVSC_MTU;
407
408         if (mtu < 68 || mtu > limit)
409                 return -EINVAL;
410
411         nvdev->start_remove = true;
412         cancel_work_sync(&ndevctx->work);
413         netif_tx_disable(ndev);
414         rndis_filter_device_remove(hdev);
415
416         ndev->mtu = mtu;
417
418         ndevctx->device_ctx = hdev;
419         hv_set_drvdata(hdev, ndev);
420         device_info.ring_size = ring_size;
421         rndis_filter_device_add(hdev, &device_info);
422         netif_wake_queue(ndev);
423
424         return 0;
425 }
426
427
428 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
429 {
430         struct net_device_context *ndevctx = netdev_priv(ndev);
431         struct hv_device *hdev =  ndevctx->device_ctx;
432         struct sockaddr *addr = p;
433         char save_adr[ETH_ALEN];
434         unsigned char save_aatype;
435         int err;
436
437         memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
438         save_aatype = ndev->addr_assign_type;
439
440         err = eth_mac_addr(ndev, p);
441         if (err != 0)
442                 return err;
443
444         err = rndis_filter_set_device_mac(hdev, addr->sa_data);
445         if (err != 0) {
446                 /* roll back to saved MAC */
447                 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
448                 ndev->addr_assign_type = save_aatype;
449         }
450
451         return err;
452 }
453
454
455 static const struct ethtool_ops ethtool_ops = {
456         .get_drvinfo    = netvsc_get_drvinfo,
457         .get_link       = ethtool_op_get_link,
458 };
459
460 static const struct net_device_ops device_ops = {
461         .ndo_open =                     netvsc_open,
462         .ndo_stop =                     netvsc_close,
463         .ndo_start_xmit =               netvsc_start_xmit,
464         .ndo_set_rx_mode =              netvsc_set_multicast_list,
465         .ndo_change_mtu =               netvsc_change_mtu,
466         .ndo_validate_addr =            eth_validate_addr,
467         .ndo_set_mac_address =          netvsc_set_mac_addr,
468 };
469
470 /*
471  * Send GARP packet to network peers after migrations.
472  * After Quick Migration, the network is not immediately operational in the
473  * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
474  * another netif_notify_peers() into a delayed work, otherwise GARP packet
475  * will not be sent after quick migration, and cause network disconnection.
476  * Also, we update the carrier status here.
477  */
478 static void netvsc_link_change(struct work_struct *w)
479 {
480         struct net_device_context *ndev_ctx;
481         struct net_device *net;
482         struct netvsc_device *net_device;
483         struct rndis_device *rdev;
484         bool notify;
485
486         rtnl_lock();
487
488         ndev_ctx = container_of(w, struct net_device_context, dwork.work);
489         net_device = hv_get_drvdata(ndev_ctx->device_ctx);
490         rdev = net_device->extension;
491         net = net_device->ndev;
492
493         if (rdev->link_state) {
494                 netif_carrier_off(net);
495                 notify = false;
496         } else {
497                 netif_carrier_on(net);
498                 notify = true;
499         }
500
501         rtnl_unlock();
502
503         if (notify)
504                 netdev_notify_peers(net);
505 }
506
507
508 static int netvsc_probe(struct hv_device *dev,
509                         const struct hv_vmbus_device_id *dev_id)
510 {
511         struct net_device *net = NULL;
512         struct net_device_context *net_device_ctx;
513         struct netvsc_device_info device_info;
514         int ret;
515
516         net = alloc_etherdev(sizeof(struct net_device_context));
517         if (!net)
518                 return -ENOMEM;
519
520         net_device_ctx = netdev_priv(net);
521         net_device_ctx->device_ctx = dev;
522         hv_set_drvdata(dev, net);
523         INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
524         INIT_WORK(&net_device_ctx->work, do_set_multicast);
525
526         net->netdev_ops = &device_ops;
527
528         /* TODO: Add GSO and Checksum offload */
529         net->hw_features = NETIF_F_SG;
530         net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG;
531
532         SET_ETHTOOL_OPS(net, &ethtool_ops);
533         SET_NETDEV_DEV(net, &dev->device);
534
535         /* Notify the netvsc driver of the new device */
536         device_info.ring_size = ring_size;
537         ret = rndis_filter_device_add(dev, &device_info);
538         if (ret != 0) {
539                 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
540                 free_netdev(net);
541                 hv_set_drvdata(dev, NULL);
542                 return ret;
543         }
544         memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
545
546         ret = register_netdev(net);
547         if (ret != 0) {
548                 pr_err("Unable to register netdev.\n");
549                 rndis_filter_device_remove(dev);
550                 free_netdev(net);
551         }
552
553         return ret;
554 }
555
556 static int netvsc_remove(struct hv_device *dev)
557 {
558         struct net_device *net;
559         struct net_device_context *ndev_ctx;
560         struct netvsc_device *net_device;
561
562         net_device = hv_get_drvdata(dev);
563         net = net_device->ndev;
564
565         if (net == NULL) {
566                 dev_err(&dev->device, "No net device to remove\n");
567                 return 0;
568         }
569
570         net_device->start_remove = true;
571
572         ndev_ctx = netdev_priv(net);
573         cancel_delayed_work_sync(&ndev_ctx->dwork);
574         cancel_work_sync(&ndev_ctx->work);
575
576         /* Stop outbound asap */
577         netif_tx_disable(net);
578
579         unregister_netdev(net);
580
581         /*
582          * Call to the vsc driver to let it know that the device is being
583          * removed
584          */
585         rndis_filter_device_remove(dev);
586
587         free_netdev(net);
588         return 0;
589 }
590
591 static const struct hv_vmbus_device_id id_table[] = {
592         /* Network guid */
593         { HV_NIC_GUID, },
594         { },
595 };
596
597 MODULE_DEVICE_TABLE(vmbus, id_table);
598
599 /* The one and only one */
600 static struct  hv_driver netvsc_drv = {
601         .name = KBUILD_MODNAME,
602         .id_table = id_table,
603         .probe = netvsc_probe,
604         .remove = netvsc_remove,
605 };
606
607 static void __exit netvsc_drv_exit(void)
608 {
609         vmbus_driver_unregister(&netvsc_drv);
610 }
611
612 static int __init netvsc_drv_init(void)
613 {
614         if (ring_size < RING_SIZE_MIN) {
615                 ring_size = RING_SIZE_MIN;
616                 pr_info("Increased ring_size to %d (min allowed)\n",
617                         ring_size);
618         }
619         return vmbus_driver_register(&netvsc_drv);
620 }
621
622 MODULE_LICENSE("GPL");
623 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
624
625 module_init(netvsc_drv_init);
626 module_exit(netvsc_drv_exit);