ixgbe: Make FCoE allocation and configuration closer to how rings work
authorAlexander Duyck <alexander.h.duyck@intel.com>
Sat, 5 May 2012 05:32:47 +0000 (05:32 +0000)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Fri, 20 Jul 2012 01:18:49 +0000 (18:18 -0700)
This patch changes the behavior of the FCoE configuration so that it is
much closer to how the main body of the ixgbe driver works for ring
allocation.

The first piece is the ixgbe_fcoe_ddp_enable/disable calls.  These allocate
the percpu values and if successful set the fcoe_ddp_xid value indicating
that we can support DDP.

The next piece is the ixgbe_setup/free_ddp_resources calls.  These are
called on open/close and will allocate and free the DMA pools.

Finally ixgbe_configure_fcoe is now just register configuration.  It can go
through and enable the registers for the FCoE redirection offload, and FIP
configuration without any interference from the DDP pool allocation.

The net result of all this is two fold.  First it adds a certain amount of
exception handling.  So for example if ixgbe_setup_fcoe_resources fails we
will actually generate an error in open and refuse to bring up the
interface.

Secondly it provides a much more graceful failure case than the previous
model which would skip setting up the registers for FCoE on failure to
allocate DDP resources leaving no Rx functionality enabled instead of just
disabling DDP.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Tested-by: Ross Brattain <ross.b.brattain@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c

index c2365005b54515f9973e71638b5b253b65cbed78..5a286adc65c0241518d39d2a80c4a9cf0670654f 100644 (file)
@@ -691,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
 extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
                     struct ixgbe_tx_buffer *first,
                     u8 *hdr_len);
-extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
 extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                          union ixgbe_adv_rx_desc *rx_desc,
                          struct sk_buff *skb);
@@ -700,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
 extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
                                 struct scatterlist *sgl, unsigned int sgc);
 extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
 extern int ixgbe_fcoe_enable(struct net_device *netdev);
 extern int ixgbe_fcoe_disable(struct net_device *netdev);
 #ifdef CONFIG_IXGBE_DCB
index e7c463c7b6a171abdbca9948b896984bdbe00e0d..e79ba3927344fba105e5b494427aa3d7fd6a6a3b 100644 (file)
@@ -578,17 +578,6 @@ static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
        ddp_pool->pool = NULL;
 }
 
-static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
-{
-       unsigned int cpu;
-
-       for_each_possible_cpu(cpu)
-               ixgbe_fcoe_dma_pool_free(fcoe, cpu);
-
-       free_percpu(fcoe->ddp_pool);
-       fcoe->ddp_pool = NULL;
-}
-
 static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
                                     struct device *dev,
                                     unsigned int cpu)
@@ -612,21 +601,6 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
        return 0;
 }
 
-static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
-{
-       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
-       struct device *dev = &adapter->pdev->dev;
-       unsigned int cpu;
-
-       fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
-       if (!fcoe->ddp_pool)
-               return;
-
-       /* allocate pci pool for each cpu */
-       for_each_possible_cpu(cpu)
-               ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
-}
-
 /**
  * ixgbe_configure_fcoe - configures registers for fcoe at start
  * @adapter: ptr to ixgbe adapter
@@ -637,39 +611,14 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
  */
 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 {
-       int i, fcoe_q, fcoe_i;
+       struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
        struct ixgbe_hw *hw = &adapter->hw;
-       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
-       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+       int i, fcoe_q, fcoe_i;
        u32 etqf;
 
-       if (!fcoe->ddp_pool) {
-               spin_lock_init(&fcoe->lock);
-
-               ixgbe_fcoe_ddp_pools_alloc(adapter);
-               if (!fcoe->ddp_pool) {
-                       e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
-                       return;
-               }
-
-               /* Extra buffer to be shared by all DDPs for HW work around */
-               fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
-               if (fcoe->extra_ddp_buffer == NULL) {
-                       e_err(drv, "failed to allocated extra DDP buffer\n");
-                       goto out_ddp_pools;
-               }
-
-               fcoe->extra_ddp_buffer_dma =
-                       dma_map_single(&adapter->pdev->dev,
-                                      fcoe->extra_ddp_buffer,
-                                      IXGBE_FCBUFF_MIN,
-                                      DMA_FROM_DEVICE);
-               if (dma_mapping_error(&adapter->pdev->dev,
-                                     fcoe->extra_ddp_buffer_dma)) {
-                       e_err(drv, "failed to map extra DDP buffer\n");
-                       goto out_extra_ddp_buffer;
-               }
-       }
+       /* leave registers unconfigued if FCoE is disabled */
+       if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+               return;
 
        /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */
        etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
@@ -682,7 +631,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 
        /* Use one or more Rx queues for FCoE by redirection table */
        for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
-               fcoe_i = f->offset + (i % f->indices);
+               fcoe_i = fcoe->offset + (i % fcoe->indices);
                fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
                fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
                IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
@@ -698,7 +647,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
 
        /* Send FIP frames to the first FCoE queue */
-       fcoe_q = adapter->rx_ring[f->offset]->reg_idx;
+       fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
        IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
                        IXGBE_ETQS_QUEUE_EN |
                        (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
@@ -707,40 +656,122 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
                        IXGBE_FCRXCTRL_FCCRCBO |
                        (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
-
-       return;
-out_extra_ddp_buffer:
-       kfree(fcoe->extra_ddp_buffer);
-out_ddp_pools:
-       ixgbe_fcoe_ddp_pools_free(fcoe);
 }
 
 /**
- * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
+ * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
  * @adapter : ixgbe adapter
  *
  * Cleans up outstanding ddp context resources
  *
  * Returns : none
  */
-void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
 {
-       int i;
        struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+       int cpu, i;
 
+       /* do nothing if no DDP pools were allocated */
        if (!fcoe->ddp_pool)
                return;
 
        for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
                ixgbe_fcoe_ddp_put(adapter->netdev, i);
 
+       for_each_possible_cpu(cpu)
+               ixgbe_fcoe_dma_pool_free(fcoe, cpu);
+
        dma_unmap_single(&adapter->pdev->dev,
                         fcoe->extra_ddp_buffer_dma,
                         IXGBE_FCBUFF_MIN,
                         DMA_FROM_DEVICE);
        kfree(fcoe->extra_ddp_buffer);
 
-       ixgbe_fcoe_ddp_pools_free(fcoe);
+       fcoe->extra_ddp_buffer = NULL;
+       fcoe->extra_ddp_buffer_dma = 0;
+}
+
+/**
+ * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
+ * @adapter: ixgbe adapter
+ *
+ * Sets up ddp context resouces
+ *
+ * Returns : 0 indicates success or -EINVAL on failure
+ */
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+       struct device *dev = &adapter->pdev->dev;
+       void *buffer;
+       dma_addr_t dma;
+       unsigned int cpu;
+
+       /* do nothing if no DDP pools were allocated */
+       if (!fcoe->ddp_pool)
+               return 0;
+
+       /* Extra buffer to be shared by all DDPs for HW work around */
+       buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+       if (!buffer) {
+               e_err(drv, "failed to allocate extra DDP buffer\n");
+               return -ENOMEM;
+       }
+
+       dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
+       if (dma_mapping_error(dev, dma)) {
+               e_err(drv, "failed to map extra DDP buffer\n");
+               kfree(buffer);
+               return -ENOMEM;
+       }
+
+       fcoe->extra_ddp_buffer = buffer;
+       fcoe->extra_ddp_buffer_dma = dma;
+
+       /* allocate pci pool for each cpu */
+       for_each_possible_cpu(cpu) {
+               int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
+               if (!err)
+                       continue;
+
+               e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
+               ixgbe_free_fcoe_ddp_resources(adapter);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+       if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
+               return -EINVAL;
+
+       fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
+
+       if (!fcoe->ddp_pool) {
+               e_err(drv, "failed to allocate percpu DDP resources\n");
+               return -ENOMEM;
+       }
+
+       adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+
+       return 0;
+}
+
+static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+       adapter->netdev->fcoe_ddp_xid = 0;
+
+       if (!fcoe->ddp_pool)
+               return;
+
+       free_percpu(fcoe->ddp_pool);
+       fcoe->ddp_pool = NULL;
 }
 
 /**
@@ -753,40 +784,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
  */
 int ixgbe_fcoe_enable(struct net_device *netdev)
 {
-       int rc = -EINVAL;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 
+       atomic_inc(&fcoe->refcnt);
 
        if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
-               goto out_enable;
+               return -EINVAL;
 
-       atomic_inc(&fcoe->refcnt);
        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-               goto out_enable;
+               return -EINVAL;
 
        e_info(drv, "Enabling FCoE offload features.\n");
        if (netif_running(netdev))
                netdev->netdev_ops->ndo_stop(netdev);
 
-       ixgbe_clear_interrupt_scheme(adapter);
+       /* Allocate per CPU memory to track DDP pools */
+       ixgbe_fcoe_ddp_enable(adapter);
 
+       /* enable FCoE and notify stack */
        adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
-       adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
-       netdev->features |= NETIF_F_FCOE_CRC;
-       netdev->features |= NETIF_F_FSO;
-       netdev->features |= NETIF_F_FCOE_MTU;
-       netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+       netdev->features |= NETIF_F_FSO | NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU;
+       netdev_features_change(netdev);
 
+       /* release existing queues and reallocate them */
+       ixgbe_clear_interrupt_scheme(adapter);
        ixgbe_init_interrupt_scheme(adapter);
-       netdev_features_change(netdev);
 
        if (netif_running(netdev))
                netdev->netdev_ops->ndo_open(netdev);
-       rc = 0;
 
-out_enable:
-       return rc;
+       return 0;
 }
 
 /**
@@ -799,41 +827,37 @@ out_enable:
  */
 int ixgbe_fcoe_disable(struct net_device *netdev)
 {
-       int rc = -EINVAL;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 
-       if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
-               goto out_disable;
+       if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
+               return -EINVAL;
 
        if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
-               goto out_disable;
-
-       if (!atomic_dec_and_test(&fcoe->refcnt))
-               goto out_disable;
+               return -EINVAL;
 
        e_info(drv, "Disabling FCoE offload features.\n");
-       netdev->features &= ~NETIF_F_FCOE_CRC;
-       netdev->features &= ~NETIF_F_FSO;
-       netdev->features &= ~NETIF_F_FCOE_MTU;
-       netdev->fcoe_ddp_xid = 0;
-       netdev_features_change(netdev);
-
        if (netif_running(netdev))
                netdev->netdev_ops->ndo_stop(netdev);
 
-       ixgbe_clear_interrupt_scheme(adapter);
+       /* Free per CPU memory to track DDP pools */
+       ixgbe_fcoe_ddp_disable(adapter);
+
+       /* disable FCoE and notify stack */
        adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
-       adapter->ring_feature[RING_F_FCOE].indices = 0;
-       ixgbe_cleanup_fcoe(adapter);
+       netdev->features &= ~(NETIF_F_FCOE_CRC |
+                             NETIF_F_FSO |
+                             NETIF_F_FCOE_MTU);
+
+       netdev_features_change(netdev);
+
+       /* release existing queues and reallocate them */
+       ixgbe_clear_interrupt_scheme(adapter);
        ixgbe_init_interrupt_scheme(adapter);
 
        if (netif_running(netdev))
                netdev->netdev_ops->ndo_open(netdev);
-       rc = 0;
 
-out_disable:
-       return rc;
+       return 0;
 }
 
 /**
index 5d028739fe3f10ecbdce59bca896328f9fc96eb1..bf724da99375bfb57e558280609f338d7de6ad7a 100644 (file)
@@ -77,7 +77,7 @@ struct ixgbe_fcoe {
        atomic_t refcnt;
        spinlock_t lock;
        struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
-       unsigned char *extra_ddp_buffer;
+       void *extra_ddp_buffer;
        dma_addr_t extra_ddp_buffer_dma;
        unsigned long mode;
 #ifdef CONFIG_IXGBE_DCB
index c66625945534abc5d075dddb1d715ef261f368b2..e006c05580ec7c6833c621da686dff1a7c271eb0 100644 (file)
@@ -3807,12 +3807,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
        ixgbe_set_rx_mode(adapter->netdev);
        ixgbe_restore_vlan(adapter);
 
-#ifdef IXGBE_FCOE
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-               ixgbe_configure_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
-
        switch (hw->mac.type) {
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
@@ -3842,6 +3836,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
 
        ixgbe_configure_virtualization(adapter);
 
+#ifdef IXGBE_FCOE
+       /* configure FCoE L2 filters, redirection table, and Rx control */
+       ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
        ixgbe_configure_tx(adapter);
        ixgbe_configure_rx(adapter);
 }
@@ -4434,6 +4433,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
                break;
        }
 
+#ifdef IXGBE_FCOE
+       /* FCoE support exists, always init the FCoE lock */
+       spin_lock_init(&adapter->fcoe.lock);
+
+#endif
        /* n-tuple support exists, always init our spinlock */
        spin_lock_init(&adapter->fdir_perfect_lock);
 
@@ -4662,7 +4666,11 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
                goto err_setup_rx;
        }
 
-       return 0;
+#ifdef IXGBE_FCOE
+       err = ixgbe_setup_fcoe_ddp_resources(adapter);
+       if (!err)
+#endif
+               return 0;
 err_setup_rx:
        /* rewind the index freeing the rings as we go */
        while (i--)
@@ -4741,6 +4749,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 {
        int i;
 
+#ifdef IXGBE_FCOE
+       ixgbe_free_fcoe_ddp_resources(adapter);
+
+#endif
        for (i = 0; i < adapter->num_rx_queues; i++)
                if (adapter->rx_ring[i]->desc)
                        ixgbe_free_rx_resources(adapter->rx_ring[i]);
@@ -7235,11 +7247,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                        if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
                                adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
                }
-       }
-       if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
-               netdev->vlan_features |= NETIF_F_FCOE_CRC;
-               netdev->vlan_features |= NETIF_F_FSO;
-               netdev->vlan_features |= NETIF_F_FCOE_MTU;
+
+               adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+               netdev->vlan_features |= NETIF_F_FSO |
+                                        NETIF_F_FCOE_CRC |
+                                        NETIF_F_FCOE_MTU;
        }
 #endif /* IXGBE_FCOE */
        if (pci_using_dac) {
@@ -7436,12 +7449,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        ixgbe_sysfs_exit(adapter);
 #endif /* CONFIG_IXGBE_HWMON */
 
-#ifdef IXGBE_FCOE
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-               ixgbe_cleanup_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
-
        /* remove the added san mac */
        ixgbe_del_sanmac_netdev(netdev);