12990929e2745e3fa62da7c5c57e5bd8caba1121
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / sfc / rx.c
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2005-2011 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10
11 #include <linux/socket.h>
12 #include <linux/in.h>
13 #include <linux/slab.h>
14 #include <linux/ip.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <linux/prefetch.h>
18 #include <linux/moduleparam.h>
19 #include <linux/iommu.h>
20 #include <net/ip.h>
21 #include <net/checksum.h>
22 #include "net_driver.h"
23 #include "efx.h"
24 #include "filter.h"
25 #include "nic.h"
26 #include "selftest.h"
27 #include "workarounds.h"
28
29 /* Preferred number of descriptors to fill at once */
30 #define EFX_RX_PREFERRED_BATCH 8U
31
32 /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
33  * ring, this number is divided by the number of buffers per page to calculate
34  * the number of pages to store in the RX page recycle ring.
35  */
36 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
37 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
38
39 /* Size of buffer allocated for skb header area. */
40 #define EFX_SKB_HEADERS  128u
41
42 /* This is the percentage fill level below which new RX descriptors
43  * will be added to the RX descriptor ring.
44  */
45 static unsigned int rx_refill_threshold;
46
47 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
48 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
49                                       EFX_RX_USR_BUF_SIZE)
50
51 /*
52  * RX maximum head room required.
53  *
54  * This must be at least 1 to prevent overflow, plus one packet-worth
55  * to allow pipelined receives.
56  */
57 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
58
59 static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
60 {
61         return page_address(buf->page) + buf->page_offset;
62 }
63
64 static inline u32 efx_rx_buf_hash(const u8 *eh)
65 {
66         /* The ethernet header is always directly after any hash. */
67 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
68         return __le32_to_cpup((const __le32 *)(eh - 4));
69 #else
70         const u8 *data = eh - 4;
71         return (u32)data[0]       |
72                (u32)data[1] << 8  |
73                (u32)data[2] << 16 |
74                (u32)data[3] << 24;
75 #endif
76 }
77
78 static inline struct efx_rx_buffer *
79 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
80 {
81         if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
82                 return efx_rx_buffer(rx_queue, 0);
83         else
84                 return rx_buf + 1;
85 }
86
87 static inline void efx_sync_rx_buffer(struct efx_nic *efx,
88                                       struct efx_rx_buffer *rx_buf,
89                                       unsigned int len)
90 {
91         dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
92                                 DMA_FROM_DEVICE);
93 }
94
95 void efx_rx_config_page_split(struct efx_nic *efx)
96 {
97         efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
98                                       EFX_RX_BUF_ALIGNMENT);
99         efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
100                 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
101                  efx->rx_page_buf_step);
102         efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
103                 efx->rx_bufs_per_page;
104         efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
105                                                efx->rx_bufs_per_page);
106 }
107
108 /* Check the RX page recycle ring for a page that can be reused. */
109 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
110 {
111         struct efx_nic *efx = rx_queue->efx;
112         struct page *page;
113         struct efx_rx_page_state *state;
114         unsigned index;
115
116         index = rx_queue->page_remove & rx_queue->page_ptr_mask;
117         page = rx_queue->page_ring[index];
118         if (page == NULL)
119                 return NULL;
120
121         rx_queue->page_ring[index] = NULL;
122         /* page_remove cannot exceed page_add. */
123         if (rx_queue->page_remove != rx_queue->page_add)
124                 ++rx_queue->page_remove;
125
126         /* If page_count is 1 then we hold the only reference to this page. */
127         if (page_count(page) == 1) {
128                 ++rx_queue->page_recycle_count;
129                 return page;
130         } else {
131                 state = page_address(page);
132                 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
133                                PAGE_SIZE << efx->rx_buffer_order,
134                                DMA_FROM_DEVICE);
135                 put_page(page);
136                 ++rx_queue->page_recycle_failed;
137         }
138
139         return NULL;
140 }
141
142 /**
143  * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
144  *
145  * @rx_queue:           Efx RX queue
146  *
147  * This allocates a batch of pages, maps them for DMA, and populates
148  * struct efx_rx_buffers for each one. Return a negative error code or
149  * 0 on success. If a single page can be used for multiple buffers,
150  * then the page will either be inserted fully, or not at all.
151  */
152 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
153 {
154         struct efx_nic *efx = rx_queue->efx;
155         struct efx_rx_buffer *rx_buf;
156         struct page *page;
157         unsigned int page_offset;
158         struct efx_rx_page_state *state;
159         dma_addr_t dma_addr;
160         unsigned index, count;
161
162         count = 0;
163         do {
164                 page = efx_reuse_page(rx_queue);
165                 if (page == NULL) {
166                         page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
167                                            efx->rx_buffer_order);
168                         if (unlikely(page == NULL))
169                                 return -ENOMEM;
170                         dma_addr =
171                                 dma_map_page(&efx->pci_dev->dev, page, 0,
172                                              PAGE_SIZE << efx->rx_buffer_order,
173                                              DMA_FROM_DEVICE);
174                         if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
175                                                        dma_addr))) {
176                                 __free_pages(page, efx->rx_buffer_order);
177                                 return -EIO;
178                         }
179                         state = page_address(page);
180                         state->dma_addr = dma_addr;
181                 } else {
182                         state = page_address(page);
183                         dma_addr = state->dma_addr;
184                 }
185
186                 dma_addr += sizeof(struct efx_rx_page_state);
187                 page_offset = sizeof(struct efx_rx_page_state);
188
189                 do {
190                         index = rx_queue->added_count & rx_queue->ptr_mask;
191                         rx_buf = efx_rx_buffer(rx_queue, index);
192                         rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
193                         rx_buf->page = page;
194                         rx_buf->page_offset = page_offset + NET_IP_ALIGN;
195                         rx_buf->len = efx->rx_dma_len;
196                         rx_buf->flags = 0;
197                         ++rx_queue->added_count;
198                         get_page(page);
199                         dma_addr += efx->rx_page_buf_step;
200                         page_offset += efx->rx_page_buf_step;
201                 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
202
203                 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
204         } while (++count < efx->rx_pages_per_batch);
205
206         return 0;
207 }
208
209 /* Unmap a DMA-mapped page.  This function is only called for the final RX
210  * buffer in a page.
211  */
212 static void efx_unmap_rx_buffer(struct efx_nic *efx,
213                                 struct efx_rx_buffer *rx_buf)
214 {
215         struct page *page = rx_buf->page;
216
217         if (page) {
218                 struct efx_rx_page_state *state = page_address(page);
219                 dma_unmap_page(&efx->pci_dev->dev,
220                                state->dma_addr,
221                                PAGE_SIZE << efx->rx_buffer_order,
222                                DMA_FROM_DEVICE);
223         }
224 }
225
226 static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
227 {
228         if (rx_buf->page) {
229                 put_page(rx_buf->page);
230                 rx_buf->page = NULL;
231         }
232 }
233
234 /* Attempt to recycle the page if there is an RX recycle ring; the page can
235  * only be added if this is the final RX buffer, to prevent pages being used in
236  * the descriptor ring and appearing in the recycle ring simultaneously.
237  */
238 static void efx_recycle_rx_page(struct efx_channel *channel,
239                                 struct efx_rx_buffer *rx_buf)
240 {
241         struct page *page = rx_buf->page;
242         struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
243         struct efx_nic *efx = rx_queue->efx;
244         unsigned index;
245
246         /* Only recycle the page after processing the final buffer. */
247         if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
248                 return;
249
250         index = rx_queue->page_add & rx_queue->page_ptr_mask;
251         if (rx_queue->page_ring[index] == NULL) {
252                 unsigned read_index = rx_queue->page_remove &
253                         rx_queue->page_ptr_mask;
254
255                 /* The next slot in the recycle ring is available, but
256                  * increment page_remove if the read pointer currently
257                  * points here.
258                  */
259                 if (read_index == index)
260                         ++rx_queue->page_remove;
261                 rx_queue->page_ring[index] = page;
262                 ++rx_queue->page_add;
263                 return;
264         }
265         ++rx_queue->page_recycle_full;
266         efx_unmap_rx_buffer(efx, rx_buf);
267         put_page(rx_buf->page);
268 }
269
270 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
271                                struct efx_rx_buffer *rx_buf)
272 {
273         /* Release the page reference we hold for the buffer. */
274         if (rx_buf->page)
275                 put_page(rx_buf->page);
276
277         /* If this is the last buffer in a page, unmap and free it. */
278         if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
279                 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
280                 efx_free_rx_buffer(rx_buf);
281         }
282         rx_buf->page = NULL;
283 }
284
285 /* Recycle the pages that are used by buffers that have just been received. */
286 static void efx_recycle_rx_pages(struct efx_channel *channel,
287                                  struct efx_rx_buffer *rx_buf,
288                                  unsigned int n_frags)
289 {
290         struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
291
292         do {
293                 efx_recycle_rx_page(channel, rx_buf);
294                 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
295         } while (--n_frags);
296 }
297
298 static void efx_discard_rx_packet(struct efx_channel *channel,
299                                   struct efx_rx_buffer *rx_buf,
300                                   unsigned int n_frags)
301 {
302         struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
303
304         efx_recycle_rx_pages(channel, rx_buf, n_frags);
305
306         do {
307                 efx_free_rx_buffer(rx_buf);
308                 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
309         } while (--n_frags);
310 }
311
312 /**
313  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
314  * @rx_queue:           RX descriptor queue
315  *
316  * This will aim to fill the RX descriptor queue up to
317  * @rx_queue->@max_fill. If there is insufficient atomic
318  * memory to do so, a slow fill will be scheduled.
319  *
320  * The caller must provide serialisation (none is used here). In practise,
321  * this means this function must run from the NAPI handler, or be called
322  * when NAPI is disabled.
323  */
324 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
325 {
326         struct efx_nic *efx = rx_queue->efx;
327         unsigned int fill_level, batch_size;
328         int space, rc = 0;
329
330         if (!rx_queue->refill_enabled)
331                 return;
332
333         /* Calculate current fill level, and exit if we don't need to fill */
334         fill_level = (rx_queue->added_count - rx_queue->removed_count);
335         EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
336         if (fill_level >= rx_queue->fast_fill_trigger)
337                 goto out;
338
339         /* Record minimum fill level */
340         if (unlikely(fill_level < rx_queue->min_fill)) {
341                 if (fill_level)
342                         rx_queue->min_fill = fill_level;
343         }
344
345         batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
346         space = rx_queue->max_fill - fill_level;
347         EFX_BUG_ON_PARANOID(space < batch_size);
348
349         netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
350                    "RX queue %d fast-filling descriptor ring from"
351                    " level %d to level %d\n",
352                    efx_rx_queue_index(rx_queue), fill_level,
353                    rx_queue->max_fill);
354
355
356         do {
357                 rc = efx_init_rx_buffers(rx_queue);
358                 if (unlikely(rc)) {
359                         /* Ensure that we don't leave the rx queue empty */
360                         if (rx_queue->added_count == rx_queue->removed_count)
361                                 efx_schedule_slow_fill(rx_queue);
362                         goto out;
363                 }
364         } while ((space -= batch_size) >= batch_size);
365
366         netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
367                    "RX queue %d fast-filled descriptor ring "
368                    "to level %d\n", efx_rx_queue_index(rx_queue),
369                    rx_queue->added_count - rx_queue->removed_count);
370
371  out:
372         if (rx_queue->notified_count != rx_queue->added_count)
373                 efx_nic_notify_rx_desc(rx_queue);
374 }
375
376 void efx_rx_slow_fill(unsigned long context)
377 {
378         struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
379
380         /* Post an event to cause NAPI to run and refill the queue */
381         efx_nic_generate_fill_event(rx_queue);
382         ++rx_queue->slow_fill_count;
383 }
384
385 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
386                                      struct efx_rx_buffer *rx_buf,
387                                      int len)
388 {
389         struct efx_nic *efx = rx_queue->efx;
390         unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
391
392         if (likely(len <= max_len))
393                 return;
394
395         /* The packet must be discarded, but this is only a fatal error
396          * if the caller indicated it was
397          */
398         rx_buf->flags |= EFX_RX_PKT_DISCARD;
399
400         if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
401                 if (net_ratelimit())
402                         netif_err(efx, rx_err, efx->net_dev,
403                                   " RX queue %d seriously overlength "
404                                   "RX event (0x%x > 0x%x+0x%x). Leaking\n",
405                                   efx_rx_queue_index(rx_queue), len, max_len,
406                                   efx->type->rx_buffer_padding);
407                 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
408         } else {
409                 if (net_ratelimit())
410                         netif_err(efx, rx_err, efx->net_dev,
411                                   " RX queue %d overlength RX event "
412                                   "(0x%x > 0x%x)\n",
413                                   efx_rx_queue_index(rx_queue), len, max_len);
414         }
415
416         efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
417 }
418
419 /* Pass a received packet up through GRO.  GRO can handle pages
420  * regardless of checksum state and skbs with a good checksum.
421  */
422 static void
423 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
424                   unsigned int n_frags, u8 *eh)
425 {
426         struct napi_struct *napi = &channel->napi_str;
427         gro_result_t gro_result;
428         struct efx_nic *efx = channel->efx;
429         struct sk_buff *skb;
430
431         skb = napi_get_frags(napi);
432         if (unlikely(!skb)) {
433                 while (n_frags--) {
434                         put_page(rx_buf->page);
435                         rx_buf->page = NULL;
436                         rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
437                 }
438                 return;
439         }
440
441         if (efx->net_dev->features & NETIF_F_RXHASH)
442                 skb->rxhash = efx_rx_buf_hash(eh);
443         skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
444                           CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
445
446         for (;;) {
447                 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
448                                    rx_buf->page, rx_buf->page_offset,
449                                    rx_buf->len);
450                 rx_buf->page = NULL;
451                 skb->len += rx_buf->len;
452                 if (skb_shinfo(skb)->nr_frags == n_frags)
453                         break;
454
455                 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
456         }
457
458         skb->data_len = skb->len;
459         skb->truesize += n_frags * efx->rx_buffer_truesize;
460
461         skb_record_rx_queue(skb, channel->rx_queue.core_index);
462
463         gro_result = napi_gro_frags(napi);
464         if (gro_result != GRO_DROP)
465                 channel->irq_mod_score += 2;
466 }
467
468 /* Allocate and construct an SKB around page fragments */
469 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
470                                      struct efx_rx_buffer *rx_buf,
471                                      unsigned int n_frags,
472                                      u8 *eh, int hdr_len)
473 {
474         struct efx_nic *efx = channel->efx;
475         struct sk_buff *skb;
476
477         /* Allocate an SKB to store the headers */
478         skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
479         if (unlikely(skb == NULL))
480                 return NULL;
481
482         EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
483
484         skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
485         memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
486
487         /* Append the remaining page(s) onto the frag list */
488         if (rx_buf->len > hdr_len) {
489                 rx_buf->page_offset += hdr_len;
490                 rx_buf->len -= hdr_len;
491
492                 for (;;) {
493                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
494                                            rx_buf->page, rx_buf->page_offset,
495                                            rx_buf->len);
496                         rx_buf->page = NULL;
497                         skb->len += rx_buf->len;
498                         skb->data_len += rx_buf->len;
499                         if (skb_shinfo(skb)->nr_frags == n_frags)
500                                 break;
501
502                         rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
503                 }
504         } else {
505                 __free_pages(rx_buf->page, efx->rx_buffer_order);
506                 rx_buf->page = NULL;
507                 n_frags = 0;
508         }
509
510         skb->truesize += n_frags * efx->rx_buffer_truesize;
511
512         /* Move past the ethernet header */
513         skb->protocol = eth_type_trans(skb, efx->net_dev);
514
515         return skb;
516 }
517
518 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
519                    unsigned int n_frags, unsigned int len, u16 flags)
520 {
521         struct efx_nic *efx = rx_queue->efx;
522         struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
523         struct efx_rx_buffer *rx_buf;
524
525         rx_buf = efx_rx_buffer(rx_queue, index);
526         rx_buf->flags |= flags;
527
528         /* Validate the number of fragments and completed length */
529         if (n_frags == 1) {
530                 efx_rx_packet__check_len(rx_queue, rx_buf, len);
531         } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
532                    unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
533                    unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
534                    unlikely(!efx->rx_scatter)) {
535                 /* If this isn't an explicit discard request, either
536                  * the hardware or the driver is broken.
537                  */
538                 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
539                 rx_buf->flags |= EFX_RX_PKT_DISCARD;
540         }
541
542         netif_vdbg(efx, rx_status, efx->net_dev,
543                    "RX queue %d received ids %x-%x len %d %s%s\n",
544                    efx_rx_queue_index(rx_queue), index,
545                    (index + n_frags - 1) & rx_queue->ptr_mask, len,
546                    (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
547                    (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
548
549         /* Discard packet, if instructed to do so.  Process the
550          * previous receive first.
551          */
552         if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
553                 efx_rx_flush_packet(channel);
554                 efx_discard_rx_packet(channel, rx_buf, n_frags);
555                 return;
556         }
557
558         if (n_frags == 1)
559                 rx_buf->len = len;
560
561         /* Release and/or sync the DMA mapping - assumes all RX buffers
562          * consumed in-order per RX queue.
563          */
564         efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
565
566         /* Prefetch nice and early so data will (hopefully) be in cache by
567          * the time we look at it.
568          */
569         prefetch(efx_rx_buf_va(rx_buf));
570
571         rx_buf->page_offset += efx->type->rx_buffer_hash_size;
572         rx_buf->len -= efx->type->rx_buffer_hash_size;
573
574         if (n_frags > 1) {
575                 /* Release/sync DMA mapping for additional fragments.
576                  * Fix length for last fragment.
577                  */
578                 unsigned int tail_frags = n_frags - 1;
579
580                 for (;;) {
581                         rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
582                         if (--tail_frags == 0)
583                                 break;
584                         efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
585                 }
586                 rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
587                 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
588         }
589
590         /* All fragments have been DMA-synced, so recycle pages. */
591         rx_buf = efx_rx_buffer(rx_queue, index);
592         efx_recycle_rx_pages(channel, rx_buf, n_frags);
593
594         /* Pipeline receives so that we give time for packet headers to be
595          * prefetched into cache.
596          */
597         efx_rx_flush_packet(channel);
598         channel->rx_pkt_n_frags = n_frags;
599         channel->rx_pkt_index = index;
600 }
601
602 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
603                            struct efx_rx_buffer *rx_buf,
604                            unsigned int n_frags)
605 {
606         struct sk_buff *skb;
607         u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
608
609         skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
610         if (unlikely(skb == NULL)) {
611                 efx_free_rx_buffer(rx_buf);
612                 return;
613         }
614         skb_record_rx_queue(skb, channel->rx_queue.core_index);
615
616         /* Set the SKB flags */
617         skb_checksum_none_assert(skb);
618         if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
619                 skb->ip_summed = CHECKSUM_UNNECESSARY;
620
621         if (channel->type->receive_skb)
622                 if (channel->type->receive_skb(channel, skb))
623                         return;
624
625         /* Pass the packet up */
626         netif_receive_skb(skb);
627 }
628
629 /* Handle a received packet.  Second half: Touches packet payload. */
630 void __efx_rx_packet(struct efx_channel *channel)
631 {
632         struct efx_nic *efx = channel->efx;
633         struct efx_rx_buffer *rx_buf =
634                 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
635         u8 *eh = efx_rx_buf_va(rx_buf);
636
637         /* If we're in loopback test, then pass the packet directly to the
638          * loopback layer, and free the rx_buf here
639          */
640         if (unlikely(efx->loopback_selftest)) {
641                 efx_loopback_rx_packet(efx, eh, rx_buf->len);
642                 efx_free_rx_buffer(rx_buf);
643                 goto out;
644         }
645
646         if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
647                 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
648
649         if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
650                 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
651         else
652                 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
653 out:
654         channel->rx_pkt_n_frags = 0;
655 }
656
657 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
658 {
659         struct efx_nic *efx = rx_queue->efx;
660         unsigned int entries;
661         int rc;
662
663         /* Create the smallest power-of-two aligned ring */
664         entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
665         EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
666         rx_queue->ptr_mask = entries - 1;
667
668         netif_dbg(efx, probe, efx->net_dev,
669                   "creating RX queue %d size %#x mask %#x\n",
670                   efx_rx_queue_index(rx_queue), efx->rxq_entries,
671                   rx_queue->ptr_mask);
672
673         /* Allocate RX buffers */
674         rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
675                                    GFP_KERNEL);
676         if (!rx_queue->buffer)
677                 return -ENOMEM;
678
679         rc = efx_nic_probe_rx(rx_queue);
680         if (rc) {
681                 kfree(rx_queue->buffer);
682                 rx_queue->buffer = NULL;
683         }
684
685         return rc;
686 }
687
688 static void efx_init_rx_recycle_ring(struct efx_nic *efx,
689                                      struct efx_rx_queue *rx_queue)
690 {
691         unsigned int bufs_in_recycle_ring, page_ring_size;
692
693         /* Set the RX recycle ring size */
694 #ifdef CONFIG_PPC64
695         bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
696 #else
697         if (iommu_present(&pci_bus_type))
698                 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
699         else
700                 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
701 #endif /* CONFIG_PPC64 */
702
703         page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
704                                             efx->rx_bufs_per_page);
705         rx_queue->page_ring = kcalloc(page_ring_size,
706                                       sizeof(*rx_queue->page_ring), GFP_KERNEL);
707         rx_queue->page_ptr_mask = page_ring_size - 1;
708 }
709
710 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
711 {
712         struct efx_nic *efx = rx_queue->efx;
713         unsigned int max_fill, trigger, max_trigger;
714
715         netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
716                   "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
717
718         /* Initialise ptr fields */
719         rx_queue->added_count = 0;
720         rx_queue->notified_count = 0;
721         rx_queue->removed_count = 0;
722         rx_queue->min_fill = -1U;
723         efx_init_rx_recycle_ring(efx, rx_queue);
724
725         rx_queue->page_remove = 0;
726         rx_queue->page_add = rx_queue->page_ptr_mask + 1;
727         rx_queue->page_recycle_count = 0;
728         rx_queue->page_recycle_failed = 0;
729         rx_queue->page_recycle_full = 0;
730
731         /* Initialise limit fields */
732         max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
733         max_trigger =
734                 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
735         if (rx_refill_threshold != 0) {
736                 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
737                 if (trigger > max_trigger)
738                         trigger = max_trigger;
739         } else {
740                 trigger = max_trigger;
741         }
742
743         rx_queue->max_fill = max_fill;
744         rx_queue->fast_fill_trigger = trigger;
745         rx_queue->refill_enabled = true;
746
747         /* Set up RX descriptor ring */
748         efx_nic_init_rx(rx_queue);
749 }
750
751 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
752 {
753         int i;
754         struct efx_nic *efx = rx_queue->efx;
755         struct efx_rx_buffer *rx_buf;
756
757         netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
758                   "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
759
760         del_timer_sync(&rx_queue->slow_fill);
761
762         /* Release RX buffers from the current read ptr to the write ptr */
763         if (rx_queue->buffer) {
764                 for (i = rx_queue->removed_count; i < rx_queue->added_count;
765                      i++) {
766                         unsigned index = i & rx_queue->ptr_mask;
767                         rx_buf = efx_rx_buffer(rx_queue, index);
768                         efx_fini_rx_buffer(rx_queue, rx_buf);
769                 }
770         }
771
772         /* Unmap and release the pages in the recycle ring. Remove the ring. */
773         for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
774                 struct page *page = rx_queue->page_ring[i];
775                 struct efx_rx_page_state *state;
776
777                 if (page == NULL)
778                         continue;
779
780                 state = page_address(page);
781                 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
782                                PAGE_SIZE << efx->rx_buffer_order,
783                                DMA_FROM_DEVICE);
784                 put_page(page);
785         }
786         kfree(rx_queue->page_ring);
787         rx_queue->page_ring = NULL;
788 }
789
790 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
791 {
792         netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
793                   "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
794
795         efx_nic_remove_rx(rx_queue);
796
797         kfree(rx_queue->buffer);
798         rx_queue->buffer = NULL;
799 }
800
801
802 module_param(rx_refill_threshold, uint, 0444);
803 MODULE_PARM_DESC(rx_refill_threshold,
804                  "RX descriptor ring refill threshold (%)");
805
806 #ifdef CONFIG_RFS_ACCEL
807
808 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
809                    u16 rxq_index, u32 flow_id)
810 {
811         struct efx_nic *efx = netdev_priv(net_dev);
812         struct efx_channel *channel;
813         struct efx_filter_spec spec;
814         const struct iphdr *ip;
815         const __be16 *ports;
816         int nhoff;
817         int rc;
818
819         nhoff = skb_network_offset(skb);
820
821         if (skb->protocol == htons(ETH_P_8021Q)) {
822                 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
823                                     nhoff + sizeof(struct vlan_hdr));
824                 if (((const struct vlan_hdr *)skb->data + nhoff)->
825                     h_vlan_encapsulated_proto != htons(ETH_P_IP))
826                         return -EPROTONOSUPPORT;
827
828                 /* This is IP over 802.1q VLAN.  We can't filter on the
829                  * IP 5-tuple and the vlan together, so just strip the
830                  * vlan header and filter on the IP part.
831                  */
832                 nhoff += sizeof(struct vlan_hdr);
833         } else if (skb->protocol != htons(ETH_P_IP)) {
834                 return -EPROTONOSUPPORT;
835         }
836
837         /* RFS must validate the IP header length before calling us */
838         EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
839         ip = (const struct iphdr *)(skb->data + nhoff);
840         if (ip_is_fragment(ip))
841                 return -EPROTONOSUPPORT;
842         EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
843         ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
844
845         efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
846                            efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
847                            rxq_index);
848         rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
849                                       ip->daddr, ports[1], ip->saddr, ports[0]);
850         if (rc)
851                 return rc;
852
853         rc = efx->type->filter_rfs_insert(efx, &spec);
854         if (rc < 0)
855                 return rc;
856
857         /* Remember this so we can check whether to expire the filter later */
858         efx->rps_flow_id[rc] = flow_id;
859         channel = efx_get_channel(efx, skb_get_rx_queue(skb));
860         ++channel->rfs_filters_added;
861
862         netif_info(efx, rx_status, efx->net_dev,
863                    "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
864                    (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
865                    &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
866                    rxq_index, flow_id, rc);
867
868         return rc;
869 }
870
871 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
872 {
873         bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
874         unsigned int index, size;
875         u32 flow_id;
876
877         if (!spin_trylock_bh(&efx->filter_lock))
878                 return false;
879
880         expire_one = efx->type->filter_rfs_expire_one;
881         index = efx->rps_expire_index;
882         size = efx->type->max_rx_ip_filters;
883         while (quota--) {
884                 flow_id = efx->rps_flow_id[index];
885                 if (expire_one(efx, flow_id, index))
886                         netif_info(efx, rx_status, efx->net_dev,
887                                    "expired filter %d [flow %u]\n",
888                                    index, flow_id);
889                 if (++index == size)
890                         index = 0;
891         }
892         efx->rps_expire_index = index;
893
894         spin_unlock_bh(&efx->filter_lock);
895         return true;
896 }
897
898 #endif /* CONFIG_RFS_ACCEL */