1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
13 #include <linux/slab.h>
15 #include <linux/ipv6.h>
16 #include <linux/tcp.h>
17 #include <linux/udp.h>
18 #include <linux/prefetch.h>
19 #include <linux/moduleparam.h>
20 #include <linux/iommu.h>
22 #include <net/checksum.h>
23 #include "net_driver.h"
28 #include "workarounds.h"
30 /* Preferred number of descriptors to fill at once */
31 #define EFX_RX_PREFERRED_BATCH 8U
33 /* Number of RX buffers to recycle pages for. When creating the RX page recycle
34 * ring, this number is divided by the number of buffers per page to calculate
35 * the number of pages to store in the RX page recycle ring.
37 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
38 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
40 /* Size of buffer allocated for skb header area. */
41 #define EFX_SKB_HEADERS 128u
43 /* This is the percentage fill level below which new RX descriptors
44 * will be added to the RX descriptor ring.
46 static unsigned int rx_refill_threshold;
48 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
49 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
53 * RX maximum head room required.
55 * This must be at least 1 to prevent overflow, plus one packet-worth
56 * to allow pipelined receives.
58 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
60 static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
62 return page_address(buf->page) + buf->page_offset;
65 static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
67 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
68 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
70 const u8 *data = eh + efx->rx_packet_hash_offset;
78 static inline struct efx_rx_buffer *
79 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
81 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
82 return efx_rx_buffer(rx_queue, 0);
87 static inline void efx_sync_rx_buffer(struct efx_nic *efx,
88 struct efx_rx_buffer *rx_buf,
91 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
95 void efx_rx_config_page_split(struct efx_nic *efx)
97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
98 EFX_RX_BUF_ALIGNMENT);
99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
101 efx->rx_page_buf_step);
102 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
103 efx->rx_bufs_per_page;
104 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
105 efx->rx_bufs_per_page);
108 /* Check the RX page recycle ring for a page that can be reused. */
109 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
111 struct efx_nic *efx = rx_queue->efx;
113 struct efx_rx_page_state *state;
116 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
117 page = rx_queue->page_ring[index];
121 rx_queue->page_ring[index] = NULL;
122 /* page_remove cannot exceed page_add. */
123 if (rx_queue->page_remove != rx_queue->page_add)
124 ++rx_queue->page_remove;
126 /* If page_count is 1 then we hold the only reference to this page. */
127 if (page_count(page) == 1) {
128 ++rx_queue->page_recycle_count;
131 state = page_address(page);
132 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
133 PAGE_SIZE << efx->rx_buffer_order,
136 ++rx_queue->page_recycle_failed;
143 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
145 * @rx_queue: Efx RX queue
147 * This allocates a batch of pages, maps them for DMA, and populates
148 * struct efx_rx_buffers for each one. Return a negative error code or
149 * 0 on success. If a single page can be used for multiple buffers,
150 * then the page will either be inserted fully, or not at all.
152 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
154 struct efx_nic *efx = rx_queue->efx;
155 struct efx_rx_buffer *rx_buf;
157 unsigned int page_offset;
158 struct efx_rx_page_state *state;
160 unsigned index, count;
164 page = efx_reuse_page(rx_queue);
166 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
167 efx->rx_buffer_order);
168 if (unlikely(page == NULL))
171 dma_map_page(&efx->pci_dev->dev, page, 0,
172 PAGE_SIZE << efx->rx_buffer_order,
174 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
176 __free_pages(page, efx->rx_buffer_order);
179 state = page_address(page);
180 state->dma_addr = dma_addr;
182 state = page_address(page);
183 dma_addr = state->dma_addr;
186 dma_addr += sizeof(struct efx_rx_page_state);
187 page_offset = sizeof(struct efx_rx_page_state);
190 index = rx_queue->added_count & rx_queue->ptr_mask;
191 rx_buf = efx_rx_buffer(rx_queue, index);
192 rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
194 rx_buf->page_offset = page_offset + NET_IP_ALIGN;
195 rx_buf->len = efx->rx_dma_len;
197 ++rx_queue->added_count;
199 dma_addr += efx->rx_page_buf_step;
200 page_offset += efx->rx_page_buf_step;
201 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
203 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
204 } while (++count < efx->rx_pages_per_batch);
209 /* Unmap a DMA-mapped page. This function is only called for the final RX
212 static void efx_unmap_rx_buffer(struct efx_nic *efx,
213 struct efx_rx_buffer *rx_buf)
215 struct page *page = rx_buf->page;
218 struct efx_rx_page_state *state = page_address(page);
219 dma_unmap_page(&efx->pci_dev->dev,
221 PAGE_SIZE << efx->rx_buffer_order,
226 static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
229 put_page(rx_buf->page);
234 /* Attempt to recycle the page if there is an RX recycle ring; the page can
235 * only be added if this is the final RX buffer, to prevent pages being used in
236 * the descriptor ring and appearing in the recycle ring simultaneously.
238 static void efx_recycle_rx_page(struct efx_channel *channel,
239 struct efx_rx_buffer *rx_buf)
241 struct page *page = rx_buf->page;
242 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
243 struct efx_nic *efx = rx_queue->efx;
246 /* Only recycle the page after processing the final buffer. */
247 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
250 index = rx_queue->page_add & rx_queue->page_ptr_mask;
251 if (rx_queue->page_ring[index] == NULL) {
252 unsigned read_index = rx_queue->page_remove &
253 rx_queue->page_ptr_mask;
255 /* The next slot in the recycle ring is available, but
256 * increment page_remove if the read pointer currently
259 if (read_index == index)
260 ++rx_queue->page_remove;
261 rx_queue->page_ring[index] = page;
262 ++rx_queue->page_add;
265 ++rx_queue->page_recycle_full;
266 efx_unmap_rx_buffer(efx, rx_buf);
267 put_page(rx_buf->page);
270 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
271 struct efx_rx_buffer *rx_buf)
273 /* Release the page reference we hold for the buffer. */
275 put_page(rx_buf->page);
277 /* If this is the last buffer in a page, unmap and free it. */
278 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
279 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
280 efx_free_rx_buffer(rx_buf);
285 /* Recycle the pages that are used by buffers that have just been received. */
286 static void efx_recycle_rx_pages(struct efx_channel *channel,
287 struct efx_rx_buffer *rx_buf,
288 unsigned int n_frags)
290 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
293 efx_recycle_rx_page(channel, rx_buf);
294 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
298 static void efx_discard_rx_packet(struct efx_channel *channel,
299 struct efx_rx_buffer *rx_buf,
300 unsigned int n_frags)
302 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
304 efx_recycle_rx_pages(channel, rx_buf, n_frags);
307 efx_free_rx_buffer(rx_buf);
308 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
313 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
314 * @rx_queue: RX descriptor queue
316 * This will aim to fill the RX descriptor queue up to
317 * @rx_queue->@max_fill. If there is insufficient atomic
318 * memory to do so, a slow fill will be scheduled.
320 * The caller must provide serialisation (none is used here). In practise,
321 * this means this function must run from the NAPI handler, or be called
322 * when NAPI is disabled.
324 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
326 struct efx_nic *efx = rx_queue->efx;
327 unsigned int fill_level, batch_size;
330 if (!rx_queue->refill_enabled)
333 /* Calculate current fill level, and exit if we don't need to fill */
334 fill_level = (rx_queue->added_count - rx_queue->removed_count);
335 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
336 if (fill_level >= rx_queue->fast_fill_trigger)
339 /* Record minimum fill level */
340 if (unlikely(fill_level < rx_queue->min_fill)) {
342 rx_queue->min_fill = fill_level;
345 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
346 space = rx_queue->max_fill - fill_level;
347 EFX_BUG_ON_PARANOID(space < batch_size);
349 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
350 "RX queue %d fast-filling descriptor ring from"
351 " level %d to level %d\n",
352 efx_rx_queue_index(rx_queue), fill_level,
357 rc = efx_init_rx_buffers(rx_queue);
359 /* Ensure that we don't leave the rx queue empty */
360 if (rx_queue->added_count == rx_queue->removed_count)
361 efx_schedule_slow_fill(rx_queue);
364 } while ((space -= batch_size) >= batch_size);
366 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
367 "RX queue %d fast-filled descriptor ring "
368 "to level %d\n", efx_rx_queue_index(rx_queue),
369 rx_queue->added_count - rx_queue->removed_count);
372 if (rx_queue->notified_count != rx_queue->added_count)
373 efx_nic_notify_rx_desc(rx_queue);
376 void efx_rx_slow_fill(unsigned long context)
378 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
380 /* Post an event to cause NAPI to run and refill the queue */
381 efx_nic_generate_fill_event(rx_queue);
382 ++rx_queue->slow_fill_count;
385 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
386 struct efx_rx_buffer *rx_buf,
389 struct efx_nic *efx = rx_queue->efx;
390 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
392 if (likely(len <= max_len))
395 /* The packet must be discarded, but this is only a fatal error
396 * if the caller indicated it was
398 rx_buf->flags |= EFX_RX_PKT_DISCARD;
400 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
402 netif_err(efx, rx_err, efx->net_dev,
403 " RX queue %d seriously overlength "
404 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
405 efx_rx_queue_index(rx_queue), len, max_len,
406 efx->type->rx_buffer_padding);
407 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
410 netif_err(efx, rx_err, efx->net_dev,
411 " RX queue %d overlength RX event "
413 efx_rx_queue_index(rx_queue), len, max_len);
416 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
419 /* Pass a received packet up through GRO. GRO can handle pages
420 * regardless of checksum state and skbs with a good checksum.
423 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
424 unsigned int n_frags, u8 *eh)
426 struct napi_struct *napi = &channel->napi_str;
427 gro_result_t gro_result;
428 struct efx_nic *efx = channel->efx;
431 skb = napi_get_frags(napi);
432 if (unlikely(!skb)) {
434 put_page(rx_buf->page);
436 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
441 if (efx->net_dev->features & NETIF_F_RXHASH)
442 skb->rxhash = efx_rx_buf_hash(efx, eh);
443 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
444 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
447 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
448 rx_buf->page, rx_buf->page_offset,
451 skb->len += rx_buf->len;
452 if (skb_shinfo(skb)->nr_frags == n_frags)
455 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
458 skb->data_len = skb->len;
459 skb->truesize += n_frags * efx->rx_buffer_truesize;
461 skb_record_rx_queue(skb, channel->rx_queue.core_index);
463 gro_result = napi_gro_frags(napi);
464 if (gro_result != GRO_DROP)
465 channel->irq_mod_score += 2;
468 /* Allocate and construct an SKB around page fragments */
469 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
470 struct efx_rx_buffer *rx_buf,
471 unsigned int n_frags,
474 struct efx_nic *efx = channel->efx;
477 /* Allocate an SKB to store the headers */
478 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
479 if (unlikely(skb == NULL))
482 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
484 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
485 memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
487 /* Append the remaining page(s) onto the frag list */
488 if (rx_buf->len > hdr_len) {
489 rx_buf->page_offset += hdr_len;
490 rx_buf->len -= hdr_len;
493 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
494 rx_buf->page, rx_buf->page_offset,
497 skb->len += rx_buf->len;
498 skb->data_len += rx_buf->len;
499 if (skb_shinfo(skb)->nr_frags == n_frags)
502 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
505 __free_pages(rx_buf->page, efx->rx_buffer_order);
510 skb->truesize += n_frags * efx->rx_buffer_truesize;
512 /* Move past the ethernet header */
513 skb->protocol = eth_type_trans(skb, efx->net_dev);
518 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
519 unsigned int n_frags, unsigned int len, u16 flags)
521 struct efx_nic *efx = rx_queue->efx;
522 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
523 struct efx_rx_buffer *rx_buf;
525 rx_buf = efx_rx_buffer(rx_queue, index);
526 rx_buf->flags |= flags;
528 /* Validate the number of fragments and completed length */
530 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
531 efx_rx_packet__check_len(rx_queue, rx_buf, len);
532 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
533 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
534 unlikely(len > n_frags * efx->rx_dma_len) ||
535 unlikely(!efx->rx_scatter)) {
536 /* If this isn't an explicit discard request, either
537 * the hardware or the driver is broken.
539 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
540 rx_buf->flags |= EFX_RX_PKT_DISCARD;
543 netif_vdbg(efx, rx_status, efx->net_dev,
544 "RX queue %d received ids %x-%x len %d %s%s\n",
545 efx_rx_queue_index(rx_queue), index,
546 (index + n_frags - 1) & rx_queue->ptr_mask, len,
547 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
548 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
550 /* Discard packet, if instructed to do so. Process the
551 * previous receive first.
553 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
554 efx_rx_flush_packet(channel);
555 efx_discard_rx_packet(channel, rx_buf, n_frags);
559 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
562 /* Release and/or sync the DMA mapping - assumes all RX buffers
563 * consumed in-order per RX queue.
565 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
567 /* Prefetch nice and early so data will (hopefully) be in cache by
568 * the time we look at it.
570 prefetch(efx_rx_buf_va(rx_buf));
572 rx_buf->page_offset += efx->rx_prefix_size;
573 rx_buf->len -= efx->rx_prefix_size;
576 /* Release/sync DMA mapping for additional fragments.
577 * Fix length for last fragment.
579 unsigned int tail_frags = n_frags - 1;
582 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
583 if (--tail_frags == 0)
585 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
587 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
588 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
591 /* All fragments have been DMA-synced, so recycle pages. */
592 rx_buf = efx_rx_buffer(rx_queue, index);
593 efx_recycle_rx_pages(channel, rx_buf, n_frags);
595 /* Pipeline receives so that we give time for packet headers to be
596 * prefetched into cache.
598 efx_rx_flush_packet(channel);
599 channel->rx_pkt_n_frags = n_frags;
600 channel->rx_pkt_index = index;
603 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
604 struct efx_rx_buffer *rx_buf,
605 unsigned int n_frags)
608 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
610 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
611 if (unlikely(skb == NULL)) {
612 efx_free_rx_buffer(rx_buf);
615 skb_record_rx_queue(skb, channel->rx_queue.core_index);
617 /* Set the SKB flags */
618 skb_checksum_none_assert(skb);
619 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
620 skb->ip_summed = CHECKSUM_UNNECESSARY;
622 if (channel->type->receive_skb)
623 if (channel->type->receive_skb(channel, skb))
626 /* Pass the packet up */
627 netif_receive_skb(skb);
630 /* Handle a received packet. Second half: Touches packet payload. */
631 void __efx_rx_packet(struct efx_channel *channel)
633 struct efx_nic *efx = channel->efx;
634 struct efx_rx_buffer *rx_buf =
635 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
636 u8 *eh = efx_rx_buf_va(rx_buf);
638 /* Read length from the prefix if necessary. This already
639 * excludes the length of the prefix itself.
641 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
642 rx_buf->len = le16_to_cpup((__le16 *)
643 (eh + efx->rx_packet_len_offset));
645 /* If we're in loopback test, then pass the packet directly to the
646 * loopback layer, and free the rx_buf here
648 if (unlikely(efx->loopback_selftest)) {
649 efx_loopback_rx_packet(efx, eh, rx_buf->len);
650 efx_free_rx_buffer(rx_buf);
654 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
655 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
657 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
658 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
660 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
662 channel->rx_pkt_n_frags = 0;
665 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
667 struct efx_nic *efx = rx_queue->efx;
668 unsigned int entries;
671 /* Create the smallest power-of-two aligned ring */
672 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
673 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
674 rx_queue->ptr_mask = entries - 1;
676 netif_dbg(efx, probe, efx->net_dev,
677 "creating RX queue %d size %#x mask %#x\n",
678 efx_rx_queue_index(rx_queue), efx->rxq_entries,
681 /* Allocate RX buffers */
682 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
684 if (!rx_queue->buffer)
687 rc = efx_nic_probe_rx(rx_queue);
689 kfree(rx_queue->buffer);
690 rx_queue->buffer = NULL;
696 static void efx_init_rx_recycle_ring(struct efx_nic *efx,
697 struct efx_rx_queue *rx_queue)
699 unsigned int bufs_in_recycle_ring, page_ring_size;
701 /* Set the RX recycle ring size */
703 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
705 if (iommu_present(&pci_bus_type))
706 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
708 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
709 #endif /* CONFIG_PPC64 */
711 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
712 efx->rx_bufs_per_page);
713 rx_queue->page_ring = kcalloc(page_ring_size,
714 sizeof(*rx_queue->page_ring), GFP_KERNEL);
715 rx_queue->page_ptr_mask = page_ring_size - 1;
718 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
720 struct efx_nic *efx = rx_queue->efx;
721 unsigned int max_fill, trigger, max_trigger;
723 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
724 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
726 /* Initialise ptr fields */
727 rx_queue->added_count = 0;
728 rx_queue->notified_count = 0;
729 rx_queue->removed_count = 0;
730 rx_queue->min_fill = -1U;
731 efx_init_rx_recycle_ring(efx, rx_queue);
733 rx_queue->page_remove = 0;
734 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
735 rx_queue->page_recycle_count = 0;
736 rx_queue->page_recycle_failed = 0;
737 rx_queue->page_recycle_full = 0;
739 /* Initialise limit fields */
740 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
742 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
743 if (rx_refill_threshold != 0) {
744 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
745 if (trigger > max_trigger)
746 trigger = max_trigger;
748 trigger = max_trigger;
751 rx_queue->max_fill = max_fill;
752 rx_queue->fast_fill_trigger = trigger;
753 rx_queue->refill_enabled = true;
755 /* Set up RX descriptor ring */
756 efx_nic_init_rx(rx_queue);
759 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
762 struct efx_nic *efx = rx_queue->efx;
763 struct efx_rx_buffer *rx_buf;
765 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
766 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
768 del_timer_sync(&rx_queue->slow_fill);
770 /* Release RX buffers from the current read ptr to the write ptr */
771 if (rx_queue->buffer) {
772 for (i = rx_queue->removed_count; i < rx_queue->added_count;
774 unsigned index = i & rx_queue->ptr_mask;
775 rx_buf = efx_rx_buffer(rx_queue, index);
776 efx_fini_rx_buffer(rx_queue, rx_buf);
780 /* Unmap and release the pages in the recycle ring. Remove the ring. */
781 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
782 struct page *page = rx_queue->page_ring[i];
783 struct efx_rx_page_state *state;
788 state = page_address(page);
789 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
790 PAGE_SIZE << efx->rx_buffer_order,
794 kfree(rx_queue->page_ring);
795 rx_queue->page_ring = NULL;
798 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
800 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
801 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
803 efx_nic_remove_rx(rx_queue);
805 kfree(rx_queue->buffer);
806 rx_queue->buffer = NULL;
810 module_param(rx_refill_threshold, uint, 0444);
811 MODULE_PARM_DESC(rx_refill_threshold,
812 "RX descriptor ring refill threshold (%)");
814 #ifdef CONFIG_RFS_ACCEL
816 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
817 u16 rxq_index, u32 flow_id)
819 struct efx_nic *efx = netdev_priv(net_dev);
820 struct efx_channel *channel;
821 struct efx_filter_spec spec;
827 /* The core RPS/RFS code has already parsed and validated
828 * VLAN, IP and transport headers. We assume they are in the
832 if (skb->protocol == htons(ETH_P_8021Q)) {
833 const struct vlan_hdr *vh =
834 (const struct vlan_hdr *)skb->data;
836 /* We can't filter on the IP 5-tuple and the vlan
837 * together, so just strip the vlan header and filter
840 EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
841 ether_type = vh->h_vlan_encapsulated_proto;
842 nhoff = sizeof(struct vlan_hdr);
844 ether_type = skb->protocol;
848 if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
849 return -EPROTONOSUPPORT;
851 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
852 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
855 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
856 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
857 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
858 spec.ether_type = ether_type;
860 if (ether_type == htons(ETH_P_IP)) {
861 const struct iphdr *ip =
862 (const struct iphdr *)(skb->data + nhoff);
864 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
865 if (ip_is_fragment(ip))
866 return -EPROTONOSUPPORT;
867 spec.ip_proto = ip->protocol;
868 spec.rem_host[0] = ip->saddr;
869 spec.loc_host[0] = ip->daddr;
870 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
871 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
873 const struct ipv6hdr *ip6 =
874 (const struct ipv6hdr *)(skb->data + nhoff);
876 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
877 nhoff + sizeof(*ip6) + 4);
878 spec.ip_proto = ip6->nexthdr;
879 memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
880 memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
881 ports = (const __be16 *)(ip6 + 1);
884 spec.rem_port = ports[0];
885 spec.loc_port = ports[1];
887 rc = efx->type->filter_rfs_insert(efx, &spec);
891 /* Remember this so we can check whether to expire the filter later */
892 efx->rps_flow_id[rc] = flow_id;
893 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
894 ++channel->rfs_filters_added;
896 if (ether_type == htons(ETH_P_IP))
897 netif_info(efx, rx_status, efx->net_dev,
898 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
899 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
900 spec.rem_host, ntohs(ports[0]), spec.loc_host,
901 ntohs(ports[1]), rxq_index, flow_id, rc);
903 netif_info(efx, rx_status, efx->net_dev,
904 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
905 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
906 spec.rem_host, ntohs(ports[0]), spec.loc_host,
907 ntohs(ports[1]), rxq_index, flow_id, rc);
912 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
914 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
915 unsigned int index, size;
918 if (!spin_trylock_bh(&efx->filter_lock))
921 expire_one = efx->type->filter_rfs_expire_one;
922 index = efx->rps_expire_index;
923 size = efx->type->max_rx_ip_filters;
925 flow_id = efx->rps_flow_id[index];
926 if (expire_one(efx, flow_id, index))
927 netif_info(efx, rx_status, efx->net_dev,
928 "expired filter %d [flow %u]\n",
933 efx->rps_expire_index = index;
935 spin_unlock_bh(&efx->filter_lock);
939 #endif /* CONFIG_RFS_ACCEL */
942 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
943 * @spec: Specification to test
945 * Return: %true if the specification is a non-drop RX filter that
946 * matches a local MAC address I/G bit value of 1 or matches a local
947 * IPv4 or IPv6 address value in the respective multicast address
948 * range. Otherwise %false.
950 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
952 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
953 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
956 if (spec->match_flags &
957 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
958 is_multicast_ether_addr(spec->loc_mac))
961 if ((spec->match_flags &
962 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
963 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
964 if (spec->ether_type == htons(ETH_P_IP) &&
965 ipv4_is_multicast(spec->loc_host[0]))
967 if (spec->ether_type == htons(ETH_P_IPV6) &&
968 ((const u8 *)spec->loc_host)[0] == 0xff)