xen-netfront: recreate queues correctly when reconnecting
authorDavid Vrabel <david.vrabel@citrix.com>
Wed, 18 Jun 2014 09:47:28 +0000 (10:47 +0100)
committerDavid S. Miller <davem@davemloft.net>
Sat, 21 Jun 2014 23:14:26 +0000 (16:14 -0700)
When reconnecting to the backend (after a resume/migration, for example),
a different number of queues may be required (since the guest may have
moved to a different host with different capabilities).  During the
reconnection the old queues are torn down and new ones created.

Introduce xennet_create_queues() and xennet_destroy_queues() that fixes
three bugs during the reconnection.

- The old info->queues was leaked.
- The old queue's napi instances were not deleted.
- The new queue's napi instances were left disabled (which meant no
  packets could be received).

The xennet_destroy_queues() calls is deferred until the reconnection
instead of the disconnection (in xennet_disconnect_backend()) because
napi_disable() might sleep.

Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/xen-netfront.c

index daaf1e56e41e30cf44f7d8fdb8a162ddfe668693..2ccb4a02368b9fab04b799f3ea1f8de72d17cbe6 100644 (file)
@@ -1699,8 +1699,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
                goto exit_free_tx;
        }
 
-       netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
-
        return 0;
 
  exit_free_tx:
@@ -1791,6 +1789,70 @@ error:
        return err;
 }
 
+static void xennet_destroy_queues(struct netfront_info *info)
+{
+       unsigned int i;
+
+       rtnl_lock();
+
+       for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+               struct netfront_queue *queue = &info->queues[i];
+
+               if (netif_running(info->netdev))
+                       napi_disable(&queue->napi);
+               netif_napi_del(&queue->napi);
+       }
+
+       rtnl_unlock();
+
+       kfree(info->queues);
+       info->queues = NULL;
+}
+
+static int xennet_create_queues(struct netfront_info *info,
+                               unsigned int num_queues)
+{
+       unsigned int i;
+       int ret;
+
+       info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+                              GFP_KERNEL);
+       if (!info->queues)
+               return -ENOMEM;
+
+       rtnl_lock();
+
+       for (i = 0; i < num_queues; i++) {
+               struct netfront_queue *queue = &info->queues[i];
+
+               queue->id = i;
+               queue->info = info;
+
+               ret = xennet_init_queue(queue);
+               if (ret < 0) {
+                       dev_warn(&info->netdev->dev, "only created %d queues\n",
+                                num_queues);
+                       num_queues = i;
+                       break;
+               }
+
+               netif_napi_add(queue->info->netdev, &queue->napi,
+                              xennet_poll, 64);
+               if (netif_running(info->netdev))
+                       napi_enable(&queue->napi);
+       }
+
+       netif_set_real_num_tx_queues(info->netdev, num_queues);
+
+       rtnl_unlock();
+
+       if (num_queues == 0) {
+               dev_err(&info->netdev->dev, "no queues\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
 /* Common code used when first setting up, and when resuming. */
 static int talk_to_netback(struct xenbus_device *dev,
                           struct netfront_info *info)
@@ -1827,42 +1889,20 @@ static int talk_to_netback(struct xenbus_device *dev,
                goto out;
        }
 
-       /* Allocate array of queues */
-       info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL);
-       if (!info->queues) {
-               err = -ENOMEM;
-               goto out;
-       }
-       rtnl_lock();
-       netif_set_real_num_tx_queues(info->netdev, num_queues);
-       rtnl_unlock();
+       if (info->queues)
+               xennet_destroy_queues(info);
+
+       err = xennet_create_queues(info, num_queues);
+       if (err < 0)
+               goto destroy_ring;
 
        /* Create shared ring, alloc event channel -- for each queue */
        for (i = 0; i < num_queues; ++i) {
                queue = &info->queues[i];
-               queue->id = i;
-               queue->info = info;
-               err = xennet_init_queue(queue);
-               if (err) {
-                       /* xennet_init_queue() cleans up after itself on failure,
-                        * but we still have to clean up any previously initialised
-                        * queues. If i > 0, set num_queues to i, then goto
-                        * destroy_ring, which calls xennet_disconnect_backend()
-                        * to tidy up.
-                        */
-                       if (i > 0) {
-                               rtnl_lock();
-                               netif_set_real_num_tx_queues(info->netdev, i);
-                               rtnl_unlock();
-                               goto destroy_ring;
-                       } else {
-                               goto out;
-                       }
-               }
                err = setup_netfront(dev, queue, feature_split_evtchn);
                if (err) {
-                       /* As for xennet_init_queue(), setup_netfront() will tidy
-                        * up the current queue on error, but we need to clean up
+                       /* setup_netfront() will tidy up the current
+                        * queue on error, but we need to clean up
                         * those already allocated.
                         */
                        if (i > 0) {