Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / sfc / rx.c
index bb579a6128c8ce883ac7156ea24ac541d4901e38..e73e30bac10e268a6f031d717a3551cda1ddeeae 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/udp.h>
 #include <linux/prefetch.h>
 #include <linux/moduleparam.h>
+#include <linux/iommu.h>
 #include <net/ip.h>
 #include <net/checksum.h>
 #include "net_driver.h"
 #include "selftest.h"
 #include "workarounds.h"
 
-/* Number of RX descriptors pushed at once. */
-#define EFX_RX_BATCH  8
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
 
-/* Maximum size of a buffer sharing a page */
-#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
+/* Number of RX buffers to recycle pages for.  When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
+#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
 
 /* Size of buffer allocated for skb header area. */
 #define EFX_SKB_HEADERS  64u
 
-/*
- * rx_alloc_method - RX buffer allocation method
- *
- * This driver supports two methods for allocating and using RX buffers:
- * each RX buffer may be backed by an skb or by an order-n page.
- *
- * When GRO is in use then the second method has a lower overhead,
- * since we don't have to allocate then free skbs on reassembled frames.
- *
- * Values:
- *   - RX_ALLOC_METHOD_AUTO = 0
- *   - RX_ALLOC_METHOD_SKB  = 1
- *   - RX_ALLOC_METHOD_PAGE = 2
- *
- * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
- * controlled by the parameters below.
- *
- *   - Since pushing and popping descriptors are separated by the rx_queue
- *     size, so the watermarks should be ~rxd_size.
- *   - The performance win by using page-based allocation for GRO is less
- *     than the performance hit of using page-based allocation of non-GRO,
- *     so the watermarks should reflect this.
- *
- * Per channel we maintain a single variable, updated by each channel:
- *
- *   rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
- *                      RX_ALLOC_FACTOR_SKB)
- * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
- * limits the hysteresis), and update the allocation strategy:
- *
- *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
- *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
- */
-static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
-
-#define RX_ALLOC_LEVEL_GRO 0x2000
-#define RX_ALLOC_LEVEL_MAX 0x3000
-#define RX_ALLOC_FACTOR_GRO 1
-#define RX_ALLOC_FACTOR_SKB (-2)
-
 /* This is the percentage fill level below which new RX descriptors
  * will be added to the RX descriptor ring.
  */
 static unsigned int rx_refill_threshold;
 
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+                                     EFX_RX_USR_BUF_SIZE)
+
 /*
  * RX maximum head room required.
  *
- * This must be at least 1 to prevent overflow and at least 2 to allow
- * pipelined receives.
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
  */
-#define EFX_RXD_HEAD_ROOM 2
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
 
-/* Offset of ethernet header within page */
-static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
-                                            struct efx_rx_buffer *buf)
+static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
 {
-       return buf->page_offset + efx->type->rx_buffer_hash_size;
-}
-static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
-{
-       return PAGE_SIZE << efx->rx_buffer_order;
-}
-
-static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
-{
-       if (buf->flags & EFX_RX_BUF_PAGE)
-               return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
-       else
-               return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
+       return page_address(buf->page) + buf->page_offset;
 }
 
 static inline u32 efx_rx_buf_hash(const u8 *eh)
@@ -119,66 +74,81 @@ static inline u32 efx_rx_buf_hash(const u8 *eh)
 #endif
 }
 
-/**
- * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
- *
- * @rx_queue:          Efx RX queue
- *
- * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
- * struct efx_rx_buffer for each one. Return a negative error code or 0
- * on success. May fail having only inserted fewer than EFX_RX_BATCH
- * buffers.
- */
-static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+       if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+               return efx_rx_buffer(rx_queue, 0);
+       else
+               return rx_buf + 1;
+}
+
+static inline void efx_sync_rx_buffer(struct efx_nic *efx,
+                                     struct efx_rx_buffer *rx_buf,
+                                     unsigned int len)
+{
+       dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
+                               DMA_FROM_DEVICE);
+}
+
+void efx_rx_config_page_split(struct efx_nic *efx)
+{
+       efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN,
+                                     L1_CACHE_BYTES);
+       efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+               ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+                efx->rx_page_buf_step);
+       efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+               efx->rx_bufs_per_page;
+       efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+                                              efx->rx_bufs_per_page);
+}
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
 {
        struct efx_nic *efx = rx_queue->efx;
-       struct net_device *net_dev = efx->net_dev;
-       struct efx_rx_buffer *rx_buf;
-       struct sk_buff *skb;
-       int skb_len = efx->rx_buffer_len;
-       unsigned index, count;
+       struct page *page;
+       struct efx_rx_page_state *state;
+       unsigned index;
 
-       for (count = 0; count < EFX_RX_BATCH; ++count) {
-               index = rx_queue->added_count & rx_queue->ptr_mask;
-               rx_buf = efx_rx_buffer(rx_queue, index);
-
-               rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
-               if (unlikely(!skb))
-                       return -ENOMEM;
-
-               /* Adjust the SKB for padding */
-               skb_reserve(skb, NET_IP_ALIGN);
-               rx_buf->len = skb_len - NET_IP_ALIGN;
-               rx_buf->flags = 0;
-
-               rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
-                                                 skb->data, rx_buf->len,
-                                                 DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
-                                              rx_buf->dma_addr))) {
-                       dev_kfree_skb_any(skb);
-                       rx_buf->u.skb = NULL;
-                       return -EIO;
-               }
+       index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+       page = rx_queue->page_ring[index];
+       if (page == NULL)
+               return NULL;
+
+       rx_queue->page_ring[index] = NULL;
+       /* page_remove cannot exceed page_add. */
+       if (rx_queue->page_remove != rx_queue->page_add)
+               ++rx_queue->page_remove;
 
-               ++rx_queue->added_count;
-               ++rx_queue->alloc_skb_count;
+       /* If page_count is 1 then we hold the only reference to this page. */
+       if (page_count(page) == 1) {
+               ++rx_queue->page_recycle_count;
+               return page;
+       } else {
+               state = page_address(page);
+               dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+                              PAGE_SIZE << efx->rx_buffer_order,
+                              DMA_FROM_DEVICE);
+               put_page(page);
+               ++rx_queue->page_recycle_failed;
        }
 
-       return 0;
+       return NULL;
 }
 
 /**
- * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
+ * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
  *
  * @rx_queue:          Efx RX queue
  *
- * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
- * and populates struct efx_rx_buffers for each one. Return a negative error
- * code or 0 on success. If a single page can be split between two buffers,
- * then the page will either be inserted fully, or not at at all.
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
  */
-static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
 {
        struct efx_nic *efx = rx_queue->efx;
        struct efx_rx_buffer *rx_buf;
@@ -188,150 +158,140 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
        dma_addr_t dma_addr;
        unsigned index, count;
 
-       /* We can split a page between two buffers */
-       BUILD_BUG_ON(EFX_RX_BATCH & 1);
-
-       for (count = 0; count < EFX_RX_BATCH; ++count) {
-               page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
-                                  efx->rx_buffer_order);
-               if (unlikely(page == NULL))
-                       return -ENOMEM;
-               dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
-                                       efx_rx_buf_size(efx),
-                                       DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
-                       __free_pages(page, efx->rx_buffer_order);
-                       return -EIO;
+       count = 0;
+       do {
+               page = efx_reuse_page(rx_queue);
+               if (page == NULL) {
+                       page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+                                          efx->rx_buffer_order);
+                       if (unlikely(page == NULL))
+                               return -ENOMEM;
+                       dma_addr =
+                               dma_map_page(&efx->pci_dev->dev, page, 0,
+                                            PAGE_SIZE << efx->rx_buffer_order,
+                                            DMA_FROM_DEVICE);
+                       if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+                                                      dma_addr))) {
+                               __free_pages(page, efx->rx_buffer_order);
+                               return -EIO;
+                       }
+                       state = page_address(page);
+                       state->dma_addr = dma_addr;
+               } else {
+                       state = page_address(page);
+                       dma_addr = state->dma_addr;
                }
-               state = page_address(page);
-               state->refcnt = 0;
-               state->dma_addr = dma_addr;
 
                dma_addr += sizeof(struct efx_rx_page_state);
                page_offset = sizeof(struct efx_rx_page_state);
 
-       split:
-               index = rx_queue->added_count & rx_queue->ptr_mask;
-               rx_buf = efx_rx_buffer(rx_queue, index);
-               rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
-               rx_buf->u.page = page;
-               rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
-               rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
-               rx_buf->flags = EFX_RX_BUF_PAGE;
-               ++rx_queue->added_count;
-               ++rx_queue->alloc_page_count;
-               ++state->refcnt;
-
-               if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
-                       /* Use the second half of the page */
+               do {
+                       index = rx_queue->added_count & rx_queue->ptr_mask;
+                       rx_buf = efx_rx_buffer(rx_queue, index);
+                       rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+                       rx_buf->page = page;
+                       rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+                       rx_buf->len = efx->rx_dma_len;
+                       rx_buf->flags = 0;
+                       ++rx_queue->added_count;
                        get_page(page);
-                       dma_addr += (PAGE_SIZE >> 1);
-                       page_offset += (PAGE_SIZE >> 1);
-                       ++count;
-                       goto split;
-               }
-       }
+                       dma_addr += efx->rx_page_buf_step;
+                       page_offset += efx->rx_page_buf_step;
+               } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+               rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+       } while (++count < efx->rx_pages_per_batch);
 
        return 0;
 }
 
+/* Unmap a DMA-mapped page.  This function is only called for the final RX
+ * buffer in a page.
+ */
 static void efx_unmap_rx_buffer(struct efx_nic *efx,
-                               struct efx_rx_buffer *rx_buf,
-                               unsigned int used_len)
+                               struct efx_rx_buffer *rx_buf)
 {
-       if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
-               struct efx_rx_page_state *state;
-
-               state = page_address(rx_buf->u.page);
-               if (--state->refcnt == 0) {
-                       dma_unmap_page(&efx->pci_dev->dev,
-                                      state->dma_addr,
-                                      efx_rx_buf_size(efx),
-                                      DMA_FROM_DEVICE);
-               } else if (used_len) {
-                       dma_sync_single_for_cpu(&efx->pci_dev->dev,
-                                               rx_buf->dma_addr, used_len,
-                                               DMA_FROM_DEVICE);
-               }
-       } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
-               dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
-                                rx_buf->len, DMA_FROM_DEVICE);
+       struct page *page = rx_buf->page;
+
+       if (page) {
+               struct efx_rx_page_state *state = page_address(page);
+               dma_unmap_page(&efx->pci_dev->dev,
+                              state->dma_addr,
+                              PAGE_SIZE << efx->rx_buffer_order,
+                              DMA_FROM_DEVICE);
        }
 }
 
-static void efx_free_rx_buffer(struct efx_nic *efx,
-                              struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
 {
-       if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
-               __free_pages(rx_buf->u.page, efx->rx_buffer_order);
-               rx_buf->u.page = NULL;
-       } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
-               dev_kfree_skb_any(rx_buf->u.skb);
-               rx_buf->u.skb = NULL;
+       if (rx_buf->page) {
+               put_page(rx_buf->page);
+               rx_buf->page = NULL;
        }
 }
 
-static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
-                              struct efx_rx_buffer *rx_buf)
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void efx_recycle_rx_page(struct efx_channel *channel,
+                               struct efx_rx_buffer *rx_buf)
 {
-       efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
-       efx_free_rx_buffer(rx_queue->efx, rx_buf);
-}
+       struct page *page = rx_buf->page;
+       struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+       struct efx_nic *efx = rx_queue->efx;
+       unsigned index;
 
-/* Attempt to resurrect the other receive buffer that used to share this page,
- * which had previously been passed up to the kernel and freed. */
-static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
-                                   struct efx_rx_buffer *rx_buf)
-{
-       struct efx_rx_page_state *state = page_address(rx_buf->u.page);
-       struct efx_rx_buffer *new_buf;
-       unsigned fill_level, index;
-
-       /* +1 because efx_rx_packet() incremented removed_count. +1 because
-        * we'd like to insert an additional descriptor whilst leaving
-        * EFX_RXD_HEAD_ROOM for the non-recycle path */
-       fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
-       if (unlikely(fill_level > rx_queue->max_fill)) {
-               /* We could place "state" on a list, and drain the list in
-                * efx_fast_push_rx_descriptors(). For now, this will do. */
+       /* Only recycle the page after processing the final buffer. */
+       if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
                return;
-       }
 
-       ++state->refcnt;
-       get_page(rx_buf->u.page);
+       index = rx_queue->page_add & rx_queue->page_ptr_mask;
+       if (rx_queue->page_ring[index] == NULL) {
+               unsigned read_index = rx_queue->page_remove &
+                       rx_queue->page_ptr_mask;
 
-       index = rx_queue->added_count & rx_queue->ptr_mask;
-       new_buf = efx_rx_buffer(rx_queue, index);
-       new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
-       new_buf->u.page = rx_buf->u.page;
-       new_buf->len = rx_buf->len;
-       new_buf->flags = EFX_RX_BUF_PAGE;
-       ++rx_queue->added_count;
+               /* The next slot in the recycle ring is available, but
+                * increment page_remove if the read pointer currently
+                * points here.
+                */
+               if (read_index == index)
+                       ++rx_queue->page_remove;
+               rx_queue->page_ring[index] = page;
+               ++rx_queue->page_add;
+               return;
+       }
+       ++rx_queue->page_recycle_full;
+       efx_unmap_rx_buffer(efx, rx_buf);
+       put_page(rx_buf->page);
 }
 
-/* Recycle the given rx buffer directly back into the rx_queue. There is
- * always room to add this buffer, because we've just popped a buffer. */
-static void efx_recycle_rx_buffer(struct efx_channel *channel,
-                                 struct efx_rx_buffer *rx_buf)
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+                              struct efx_rx_buffer *rx_buf)
 {
-       struct efx_nic *efx = channel->efx;
-       struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
-       struct efx_rx_buffer *new_buf;
-       unsigned index;
-
-       rx_buf->flags &= EFX_RX_BUF_PAGE;
-
-       if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
-           efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
-           page_count(rx_buf->u.page) == 1)
-               efx_resurrect_rx_buffer(rx_queue, rx_buf);
+       /* Release the page reference we hold for the buffer. */
+       if (rx_buf->page)
+               put_page(rx_buf->page);
+
+       /* If this is the last buffer in a page, unmap and free it. */
+       if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+               efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+               efx_free_rx_buffer(rx_buf);
+       }
+       rx_buf->page = NULL;
+}
 
-       index = rx_queue->added_count & rx_queue->ptr_mask;
-       new_buf = efx_rx_buffer(rx_queue, index);
+/* Recycle the pages that are used by buffers that have just been received. */
+static void efx_recycle_rx_buffers(struct efx_channel *channel,
+                                  struct efx_rx_buffer *rx_buf,
+                                  unsigned int n_frags)
+{
+       struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
 
-       memcpy(new_buf, rx_buf, sizeof(*new_buf));
-       rx_buf->u.page = NULL;
-       ++rx_queue->added_count;
+       do {
+               efx_recycle_rx_page(channel, rx_buf);
+               rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+       } while (--n_frags);
 }
 
 /**
@@ -348,8 +308,8 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
  */
 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
 {
-       struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
-       unsigned fill_level;
+       struct efx_nic *efx = rx_queue->efx;
+       unsigned int fill_level, batch_size;
        int space, rc = 0;
 
        /* Calculate current fill level, and exit if we don't need to fill */
@@ -364,28 +324,26 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
                        rx_queue->min_fill = fill_level;
        }
 
+       batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
        space = rx_queue->max_fill - fill_level;
-       EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
+       EFX_BUG_ON_PARANOID(space < batch_size);
 
        netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
                   "RX queue %d fast-filling descriptor ring from"
-                  " level %d to level %d using %s allocation\n",
+                  " level %d to level %d\n",
                   efx_rx_queue_index(rx_queue), fill_level,
-                  rx_queue->max_fill,
-                  channel->rx_alloc_push_pages ? "page" : "skb");
+                  rx_queue->max_fill);
+
 
        do {
-               if (channel->rx_alloc_push_pages)
-                       rc = efx_init_rx_buffers_page(rx_queue);
-               else
-                       rc = efx_init_rx_buffers_skb(rx_queue);
+               rc = efx_init_rx_buffers(rx_queue);
                if (unlikely(rc)) {
                        /* Ensure that we don't leave the rx queue empty */
                        if (rx_queue->added_count == rx_queue->removed_count)
                                efx_schedule_slow_fill(rx_queue);
                        goto out;
                }
-       } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
+       } while ((space -= batch_size) >= batch_size);
 
        netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
                   "RX queue %d fast-filled descriptor ring "
@@ -408,7 +366,7 @@ void efx_rx_slow_fill(unsigned long context)
 
 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
                                     struct efx_rx_buffer *rx_buf,
-                                    int len, bool *leak_packet)
+                                    int len)
 {
        struct efx_nic *efx = rx_queue->efx;
        unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -428,11 +386,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
                                  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
                                  efx_rx_queue_index(rx_queue), len, max_len,
                                  efx->type->rx_buffer_padding);
-               /* If this buffer was skb-allocated, then the meta
-                * data at the end of the skb will be trashed. So
-                * we have no choice but to leak the fragment.
-                */
-               *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
                efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
        } else {
                if (net_ratelimit())
@@ -448,212 +401,238 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
 /* Pass a received packet up through GRO.  GRO can handle pages
  * regardless of checksum state and skbs with a good checksum.
  */
-static void efx_rx_packet_gro(struct efx_channel *channel,
-                             struct efx_rx_buffer *rx_buf,
-                             const u8 *eh)
+static void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+                 unsigned int n_frags, u8 *eh)
 {
        struct napi_struct *napi = &channel->napi_str;
        gro_result_t gro_result;
+       struct efx_nic *efx = channel->efx;
+       struct sk_buff *skb;
 
-       if (rx_buf->flags & EFX_RX_BUF_PAGE) {
-               struct efx_nic *efx = channel->efx;
-               struct page *page = rx_buf->u.page;
-               struct sk_buff *skb;
+       skb = napi_get_frags(napi);
+       if (unlikely(!skb)) {
+               while (n_frags--) {
+                       put_page(rx_buf->page);
+                       rx_buf->page = NULL;
+                       rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+               }
+               return;
+       }
 
-               rx_buf->u.page = NULL;
+       if (efx->net_dev->features & NETIF_F_RXHASH)
+               skb->rxhash = efx_rx_buf_hash(eh);
+       skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+                         CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+
+       for (;;) {
+               skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+                                  rx_buf->page, rx_buf->page_offset,
+                                  rx_buf->len);
+               rx_buf->page = NULL;
+               skb->len += rx_buf->len;
+               if (skb_shinfo(skb)->nr_frags == n_frags)
+                       break;
+
+               rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+       }
 
-               skb = napi_get_frags(napi);
-               if (!skb) {
-                       put_page(page);
-                       return;
-               }
+       skb->data_len = skb->len;
+       skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+       skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+       gro_result = napi_gro_frags(napi);
+       if (gro_result != GRO_DROP)
+               channel->irq_mod_score += 2;
+}
 
-               if (efx->net_dev->features & NETIF_F_RXHASH)
-                       skb->rxhash = efx_rx_buf_hash(eh);
+/* Allocate and construct an SKB around page fragments */
+static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
+                                    struct efx_rx_buffer *rx_buf,
+                                    unsigned int n_frags,
+                                    u8 *eh, int hdr_len)
+{
+       struct efx_nic *efx = channel->efx;
+       struct sk_buff *skb;
 
-               skb_fill_page_desc(skb, 0, page,
-                                  efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
+       /* Allocate an SKB to store the headers */
+       skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+       if (unlikely(skb == NULL))
+               return NULL;
 
-               skb->len = rx_buf->len;
-               skb->data_len = rx_buf->len;
-               skb->truesize += rx_buf->len;
-               skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
-                                 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+       EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
 
-               skb_record_rx_queue(skb, channel->rx_queue.core_index);
+       skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
+       memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
 
-               gro_result = napi_gro_frags(napi);
-       } else {
-               struct sk_buff *skb = rx_buf->u.skb;
+       /* Append the remaining page(s) onto the frag list */
+       if (rx_buf->len > hdr_len) {
+               rx_buf->page_offset += hdr_len;
+               rx_buf->len -= hdr_len;
 
-               EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
-               rx_buf->u.skb = NULL;
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               for (;;) {
+                       skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+                                          rx_buf->page, rx_buf->page_offset,
+                                          rx_buf->len);
+                       rx_buf->page = NULL;
+                       skb->len += rx_buf->len;
+                       skb->data_len += rx_buf->len;
+                       if (skb_shinfo(skb)->nr_frags == n_frags)
+                               break;
 
-               gro_result = napi_gro_receive(napi, skb);
+                       rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+               }
+       } else {
+               __free_pages(rx_buf->page, efx->rx_buffer_order);
+               rx_buf->page = NULL;
+               n_frags = 0;
        }
 
-       if (gro_result == GRO_NORMAL) {
-               channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
-       } else if (gro_result != GRO_DROP) {
-               channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
-               channel->irq_mod_score += 2;
-       }
+       skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+       /* Move past the ethernet header */
+       skb->protocol = eth_type_trans(skb, efx->net_dev);
+
+       return skb;
 }
 
 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
-                  unsigned int len, u16 flags)
+                  unsigned int n_frags, unsigned int len, u16 flags)
 {
        struct efx_nic *efx = rx_queue->efx;
        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
        struct efx_rx_buffer *rx_buf;
-       bool leak_packet = false;
 
        rx_buf = efx_rx_buffer(rx_queue, index);
        rx_buf->flags |= flags;
 
-       /* This allows the refill path to post another buffer.
-        * EFX_RXD_HEAD_ROOM ensures that the slot we are using
-        * isn't overwritten yet.
-        */
-       rx_queue->removed_count++;
-
-       /* Validate the length encoded in the event vs the descriptor pushed */
-       efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
+       /* Validate the number of fragments and completed length */
+       if (n_frags == 1) {
+               efx_rx_packet__check_len(rx_queue, rx_buf, len);
+       } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
+                  unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
+                  unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
+                  unlikely(!efx->rx_scatter)) {
+               /* If this isn't an explicit discard request, either
+                * the hardware or the driver is broken.
+                */
+               WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
+               rx_buf->flags |= EFX_RX_PKT_DISCARD;
+       }
 
        netif_vdbg(efx, rx_status, efx->net_dev,
-                  "RX queue %d received id %x at %llx+%x %s%s\n",
+                  "RX queue %d received ids %x-%x len %d %s%s\n",
                   efx_rx_queue_index(rx_queue), index,
-                  (unsigned long long)rx_buf->dma_addr, len,
+                  (index + n_frags - 1) & rx_queue->ptr_mask, len,
                   (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
                   (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
 
-       /* Discard packet, if instructed to do so */
+       /* Discard packet, if instructed to do so.  Process the
+        * previous receive first.
+        */
        if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
-               if (unlikely(leak_packet))
-                       channel->n_skbuff_leaks++;
-               else
-                       efx_recycle_rx_buffer(channel, rx_buf);
-
-               /* Don't hold off the previous receive */
-               rx_buf = NULL;
-               goto out;
+               efx_rx_flush_packet(channel);
+               put_page(rx_buf->page);
+               efx_recycle_rx_buffers(channel, rx_buf, n_frags);
+               return;
        }
 
-       /* Release and/or sync DMA mapping - assumes all RX buffers
-        * consumed in-order per RX queue
+       if (n_frags == 1)
+               rx_buf->len = len;
+
+       /* Release and/or sync the DMA mapping - assumes all RX buffers
+        * consumed in-order per RX queue.
         */
-       efx_unmap_rx_buffer(efx, rx_buf, len);
+       efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
 
        /* Prefetch nice and early so data will (hopefully) be in cache by
         * the time we look at it.
         */
-       prefetch(efx_rx_buf_eh(efx, rx_buf));
+       prefetch(efx_rx_buf_va(rx_buf));
+
+       rx_buf->page_offset += efx->type->rx_buffer_hash_size;
+       rx_buf->len -= efx->type->rx_buffer_hash_size;
+
+       if (n_frags > 1) {
+               /* Release/sync DMA mapping for additional fragments.
+                * Fix length for last fragment.
+                */
+               unsigned int tail_frags = n_frags - 1;
+
+               for (;;) {
+                       rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+                       if (--tail_frags == 0)
+                               break;
+                       efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
+               }
+               rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
+               efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+       }
+
+       /* All fragments have been DMA-synced, so recycle buffers and pages. */
+       rx_buf = efx_rx_buffer(rx_queue, index);
+       efx_recycle_rx_buffers(channel, rx_buf, n_frags);
 
        /* Pipeline receives so that we give time for packet headers to be
         * prefetched into cache.
         */
-       rx_buf->len = len - efx->type->rx_buffer_hash_size;
-out:
-       if (channel->rx_pkt)
-               __efx_rx_packet(channel, channel->rx_pkt);
-       channel->rx_pkt = rx_buf;
+       efx_rx_flush_packet(channel);
+       channel->rx_pkt_n_frags = n_frags;
+       channel->rx_pkt_index = index;
 }
 
-static void efx_rx_deliver(struct efx_channel *channel,
-                          struct efx_rx_buffer *rx_buf)
+static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
+                          struct efx_rx_buffer *rx_buf,
+                          unsigned int n_frags)
 {
        struct sk_buff *skb;
+       u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
 
-       /* We now own the SKB */
-       skb = rx_buf->u.skb;
-       rx_buf->u.skb = NULL;
+       skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
+       if (unlikely(skb == NULL)) {
+               efx_free_rx_buffer(rx_buf);
+               return;
+       }
+       skb_record_rx_queue(skb, channel->rx_queue.core_index);
 
        /* Set the SKB flags */
        skb_checksum_none_assert(skb);
 
-       /* Record the rx_queue */
-       skb_record_rx_queue(skb, channel->rx_queue.core_index);
-
-       /* Pass the packet up */
        if (channel->type->receive_skb)
-               channel->type->receive_skb(channel, skb);
-       else
-               netif_receive_skb(skb);
+               if (channel->type->receive_skb(channel, skb))
+                       return;
 
-       /* Update allocation strategy method */
-       channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+       /* Pass the packet up */
+       netif_receive_skb(skb);
 }
 
 /* Handle a received packet.  Second half: Touches packet payload. */
-void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
+void __efx_rx_packet(struct efx_channel *channel)
 {
        struct efx_nic *efx = channel->efx;
-       u8 *eh = efx_rx_buf_eh(efx, rx_buf);
+       struct efx_rx_buffer *rx_buf =
+               efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+       u8 *eh = efx_rx_buf_va(rx_buf);
 
        /* If we're in loopback test, then pass the packet directly to the
         * loopback layer, and free the rx_buf here
         */
        if (unlikely(efx->loopback_selftest)) {
                efx_loopback_rx_packet(efx, eh, rx_buf->len);
-               efx_free_rx_buffer(efx, rx_buf);
-               return;
-       }
-
-       if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
-               struct sk_buff *skb = rx_buf->u.skb;
-
-               prefetch(skb_shinfo(skb));
-
-               skb_reserve(skb, efx->type->rx_buffer_hash_size);
-               skb_put(skb, rx_buf->len);
-
-               if (efx->net_dev->features & NETIF_F_RXHASH)
-                       skb->rxhash = efx_rx_buf_hash(eh);
-
-               /* Move past the ethernet header. rx_buf->data still points
-                * at the ethernet header */
-               skb->protocol = eth_type_trans(skb, efx->net_dev);
-
-               skb_record_rx_queue(skb, channel->rx_queue.core_index);
+               efx_free_rx_buffer(rx_buf);
+               goto out;
        }
 
        if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
                rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
 
-       if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
-           !channel->type->receive_skb)
-               efx_rx_packet_gro(channel, rx_buf, eh);
+       if (!channel->type->receive_skb)
+               efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
        else
-               efx_rx_deliver(channel, rx_buf);
-}
-
-void efx_rx_strategy(struct efx_channel *channel)
-{
-       enum efx_rx_alloc_method method = rx_alloc_method;
-
-       if (channel->type->receive_skb) {
-               channel->rx_alloc_push_pages = false;
-               return;
-       }
-
-       /* Only makes sense to use page based allocation if GRO is enabled */
-       if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
-               method = RX_ALLOC_METHOD_SKB;
-       } else if (method == RX_ALLOC_METHOD_AUTO) {
-               /* Constrain the rx_alloc_level */
-               if (channel->rx_alloc_level < 0)
-                       channel->rx_alloc_level = 0;
-               else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
-                       channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
-
-               /* Decide on the allocation method */
-               method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
-                         RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
-       }
-
-       /* Push the option */
-       channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
+               efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
+out:
+       channel->rx_pkt_n_frags = 0;
 }
 
 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
@@ -683,9 +662,32 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
                kfree(rx_queue->buffer);
                rx_queue->buffer = NULL;
        }
+
        return rc;
 }
 
+static void efx_init_rx_recycle_ring(struct efx_nic *efx,
+                                    struct efx_rx_queue *rx_queue)
+{
+       unsigned int bufs_in_recycle_ring, page_ring_size;
+
+       /* Set the RX recycle ring size */
+#ifdef CONFIG_PPC64
+       bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+#else
+       if (efx->pci_dev->dev.iommu_group)
+               bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
+       else
+               bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
+#endif /* CONFIG_PPC64 */
+
+       page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+                                           efx->rx_bufs_per_page);
+       rx_queue->page_ring = kcalloc(page_ring_size,
+                                     sizeof(*rx_queue->page_ring), GFP_KERNEL);
+       rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 {
        struct efx_nic *efx = rx_queue->efx;
@@ -699,10 +701,18 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
        rx_queue->notified_count = 0;
        rx_queue->removed_count = 0;
        rx_queue->min_fill = -1U;
+       efx_init_rx_recycle_ring(efx, rx_queue);
+
+       rx_queue->page_remove = 0;
+       rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+       rx_queue->page_recycle_count = 0;
+       rx_queue->page_recycle_failed = 0;
+       rx_queue->page_recycle_full = 0;
 
        /* Initialise limit fields */
        max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
-       max_trigger = max_fill - EFX_RX_BATCH;
+       max_trigger =
+               max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
        if (rx_refill_threshold != 0) {
                trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
                if (trigger > max_trigger)
@@ -722,6 +732,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
 {
        int i;
+       struct efx_nic *efx = rx_queue->efx;
        struct efx_rx_buffer *rx_buf;
 
        netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
@@ -733,13 +744,32 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
        del_timer_sync(&rx_queue->slow_fill);
        efx_nic_fini_rx(rx_queue);
 
-       /* Release RX buffers NB start at index 0 not current HW ptr */
+       /* Release RX buffers from the current read ptr to the write ptr */
        if (rx_queue->buffer) {
-               for (i = 0; i <= rx_queue->ptr_mask; i++) {
-                       rx_buf = efx_rx_buffer(rx_queue, i);
+               for (i = rx_queue->removed_count; i < rx_queue->added_count;
+                    i++) {
+                       unsigned index = i & rx_queue->ptr_mask;
+                       rx_buf = efx_rx_buffer(rx_queue, index);
                        efx_fini_rx_buffer(rx_queue, rx_buf);
                }
        }
+
+       /* Unmap and release the pages in the recycle ring. Remove the ring. */
+       for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+               struct page *page = rx_queue->page_ring[i];
+               struct efx_rx_page_state *state;
+
+               if (page == NULL)
+                       continue;
+
+               state = page_address(page);
+               dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+                              PAGE_SIZE << efx->rx_buffer_order,
+                              DMA_FROM_DEVICE);
+               put_page(page);
+       }
+       kfree(rx_queue->page_ring);
+       rx_queue->page_ring = NULL;
 }
 
 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -754,9 +784,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
 }
 
 
-module_param(rx_alloc_method, int, 0644);
-MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
-
 module_param(rx_refill_threshold, uint, 0444);
 MODULE_PARM_DESC(rx_refill_threshold,
                 "RX descriptor ring refill threshold (%)");