1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/socket.h>
13 #include <linux/slab.h>
15 #include <linux/ipv6.h>
16 #include <linux/tcp.h>
17 #include <linux/udp.h>
18 #include <linux/prefetch.h>
19 #include <linux/moduleparam.h>
20 #include <linux/iommu.h>
22 #include <net/checksum.h>
23 #include "net_driver.h"
28 #include "workarounds.h"
30 /* Preferred number of descriptors to fill at once */
31 #define EFX_RX_PREFERRED_BATCH 8U
33 /* Number of RX buffers to recycle pages for. When creating the RX page recycle
34 * ring, this number is divided by the number of buffers per page to calculate
35 * the number of pages to store in the RX page recycle ring.
37 #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
38 #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
40 /* Size of buffer allocated for skb header area. */
41 #define EFX_SKB_HEADERS 128u
43 /* This is the percentage fill level below which new RX descriptors
44 * will be added to the RX descriptor ring.
46 static unsigned int rx_refill_threshold;
48 /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
49 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
53 * RX maximum head room required.
55 * This must be at least 1 to prevent overflow, plus one packet-worth
56 * to allow pipelined receives.
58 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
60 static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
62 return page_address(buf->page) + buf->page_offset;
65 static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
67 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
68 return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
70 const u8 *data = eh + efx->rx_packet_hash_offset;
78 static inline struct efx_rx_buffer *
79 efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
81 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
82 return efx_rx_buffer(rx_queue, 0);
87 static inline void efx_sync_rx_buffer(struct efx_nic *efx,
88 struct efx_rx_buffer *rx_buf,
91 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
95 void efx_rx_config_page_split(struct efx_nic *efx)
97 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
98 EFX_RX_BUF_ALIGNMENT);
99 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
100 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
101 efx->rx_page_buf_step);
102 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
103 efx->rx_bufs_per_page;
104 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
105 efx->rx_bufs_per_page);
108 /* Check the RX page recycle ring for a page that can be reused. */
109 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
111 struct efx_nic *efx = rx_queue->efx;
113 struct efx_rx_page_state *state;
116 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
117 page = rx_queue->page_ring[index];
121 rx_queue->page_ring[index] = NULL;
122 /* page_remove cannot exceed page_add. */
123 if (rx_queue->page_remove != rx_queue->page_add)
124 ++rx_queue->page_remove;
126 /* If page_count is 1 then we hold the only reference to this page. */
127 if (page_count(page) == 1) {
128 ++rx_queue->page_recycle_count;
131 state = page_address(page);
132 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
133 PAGE_SIZE << efx->rx_buffer_order,
136 ++rx_queue->page_recycle_failed;
143 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
145 * @rx_queue: Efx RX queue
147 * This allocates a batch of pages, maps them for DMA, and populates
148 * struct efx_rx_buffers for each one. Return a negative error code or
149 * 0 on success. If a single page can be used for multiple buffers,
150 * then the page will either be inserted fully, or not at all.
152 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
154 struct efx_nic *efx = rx_queue->efx;
155 struct efx_rx_buffer *rx_buf;
157 unsigned int page_offset;
158 struct efx_rx_page_state *state;
160 unsigned index, count;
164 page = efx_reuse_page(rx_queue);
166 page = alloc_pages(__GFP_COLD | __GFP_COMP |
167 (atomic ? GFP_ATOMIC : GFP_KERNEL),
168 efx->rx_buffer_order);
169 if (unlikely(page == NULL))
172 dma_map_page(&efx->pci_dev->dev, page, 0,
173 PAGE_SIZE << efx->rx_buffer_order,
175 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
177 __free_pages(page, efx->rx_buffer_order);
180 state = page_address(page);
181 state->dma_addr = dma_addr;
183 state = page_address(page);
184 dma_addr = state->dma_addr;
187 dma_addr += sizeof(struct efx_rx_page_state);
188 page_offset = sizeof(struct efx_rx_page_state);
191 index = rx_queue->added_count & rx_queue->ptr_mask;
192 rx_buf = efx_rx_buffer(rx_queue, index);
193 rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
195 rx_buf->page_offset = page_offset + efx->rx_ip_align;
196 rx_buf->len = efx->rx_dma_len;
198 ++rx_queue->added_count;
200 dma_addr += efx->rx_page_buf_step;
201 page_offset += efx->rx_page_buf_step;
202 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
204 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
205 } while (++count < efx->rx_pages_per_batch);
210 /* Unmap a DMA-mapped page. This function is only called for the final RX
213 static void efx_unmap_rx_buffer(struct efx_nic *efx,
214 struct efx_rx_buffer *rx_buf)
216 struct page *page = rx_buf->page;
219 struct efx_rx_page_state *state = page_address(page);
220 dma_unmap_page(&efx->pci_dev->dev,
222 PAGE_SIZE << efx->rx_buffer_order,
227 static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
230 put_page(rx_buf->page);
235 /* Attempt to recycle the page if there is an RX recycle ring; the page can
236 * only be added if this is the final RX buffer, to prevent pages being used in
237 * the descriptor ring and appearing in the recycle ring simultaneously.
239 static void efx_recycle_rx_page(struct efx_channel *channel,
240 struct efx_rx_buffer *rx_buf)
242 struct page *page = rx_buf->page;
243 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
244 struct efx_nic *efx = rx_queue->efx;
247 /* Only recycle the page after processing the final buffer. */
248 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
251 index = rx_queue->page_add & rx_queue->page_ptr_mask;
252 if (rx_queue->page_ring[index] == NULL) {
253 unsigned read_index = rx_queue->page_remove &
254 rx_queue->page_ptr_mask;
256 /* The next slot in the recycle ring is available, but
257 * increment page_remove if the read pointer currently
260 if (read_index == index)
261 ++rx_queue->page_remove;
262 rx_queue->page_ring[index] = page;
263 ++rx_queue->page_add;
266 ++rx_queue->page_recycle_full;
267 efx_unmap_rx_buffer(efx, rx_buf);
268 put_page(rx_buf->page);
271 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
272 struct efx_rx_buffer *rx_buf)
274 /* Release the page reference we hold for the buffer. */
276 put_page(rx_buf->page);
278 /* If this is the last buffer in a page, unmap and free it. */
279 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
280 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
281 efx_free_rx_buffer(rx_buf);
286 /* Recycle the pages that are used by buffers that have just been received. */
287 static void efx_recycle_rx_pages(struct efx_channel *channel,
288 struct efx_rx_buffer *rx_buf,
289 unsigned int n_frags)
291 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
294 efx_recycle_rx_page(channel, rx_buf);
295 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
299 static void efx_discard_rx_packet(struct efx_channel *channel,
300 struct efx_rx_buffer *rx_buf,
301 unsigned int n_frags)
303 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
305 efx_recycle_rx_pages(channel, rx_buf, n_frags);
308 efx_free_rx_buffer(rx_buf);
309 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
314 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
315 * @rx_queue: RX descriptor queue
317 * This will aim to fill the RX descriptor queue up to
318 * @rx_queue->@max_fill. If there is insufficient atomic
319 * memory to do so, a slow fill will be scheduled.
321 * The caller must provide serialisation (none is used here). In practise,
322 * this means this function must run from the NAPI handler, or be called
323 * when NAPI is disabled.
325 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
327 struct efx_nic *efx = rx_queue->efx;
328 unsigned int fill_level, batch_size;
331 if (!rx_queue->refill_enabled)
334 /* Calculate current fill level, and exit if we don't need to fill */
335 fill_level = (rx_queue->added_count - rx_queue->removed_count);
336 EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
337 if (fill_level >= rx_queue->fast_fill_trigger)
340 /* Record minimum fill level */
341 if (unlikely(fill_level < rx_queue->min_fill)) {
343 rx_queue->min_fill = fill_level;
346 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
347 space = rx_queue->max_fill - fill_level;
348 EFX_BUG_ON_PARANOID(space < batch_size);
350 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
351 "RX queue %d fast-filling descriptor ring from"
352 " level %d to level %d\n",
353 efx_rx_queue_index(rx_queue), fill_level,
358 rc = efx_init_rx_buffers(rx_queue, atomic);
360 /* Ensure that we don't leave the rx queue empty */
361 if (rx_queue->added_count == rx_queue->removed_count)
362 efx_schedule_slow_fill(rx_queue);
365 } while ((space -= batch_size) >= batch_size);
367 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
368 "RX queue %d fast-filled descriptor ring "
369 "to level %d\n", efx_rx_queue_index(rx_queue),
370 rx_queue->added_count - rx_queue->removed_count);
373 if (rx_queue->notified_count != rx_queue->added_count)
374 efx_nic_notify_rx_desc(rx_queue);
377 void efx_rx_slow_fill(unsigned long context)
379 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
381 /* Post an event to cause NAPI to run and refill the queue */
382 efx_nic_generate_fill_event(rx_queue);
383 ++rx_queue->slow_fill_count;
386 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
387 struct efx_rx_buffer *rx_buf,
390 struct efx_nic *efx = rx_queue->efx;
391 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
393 if (likely(len <= max_len))
396 /* The packet must be discarded, but this is only a fatal error
397 * if the caller indicated it was
399 rx_buf->flags |= EFX_RX_PKT_DISCARD;
401 if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
403 netif_err(efx, rx_err, efx->net_dev,
404 " RX queue %d seriously overlength "
405 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
406 efx_rx_queue_index(rx_queue), len, max_len,
407 efx->type->rx_buffer_padding);
408 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
411 netif_err(efx, rx_err, efx->net_dev,
412 " RX queue %d overlength RX event "
414 efx_rx_queue_index(rx_queue), len, max_len);
417 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
420 /* Pass a received packet up through GRO. GRO can handle pages
421 * regardless of checksum state and skbs with a good checksum.
424 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
425 unsigned int n_frags, u8 *eh)
427 struct napi_struct *napi = &channel->napi_str;
428 gro_result_t gro_result;
429 struct efx_nic *efx = channel->efx;
432 skb = napi_get_frags(napi);
433 if (unlikely(!skb)) {
435 put_page(rx_buf->page);
437 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
442 if (efx->net_dev->features & NETIF_F_RXHASH)
443 skb->rxhash = efx_rx_buf_hash(efx, eh);
444 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
445 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
448 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
449 rx_buf->page, rx_buf->page_offset,
452 skb->len += rx_buf->len;
453 if (skb_shinfo(skb)->nr_frags == n_frags)
456 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
459 skb->data_len = skb->len;
460 skb->truesize += n_frags * efx->rx_buffer_truesize;
462 skb_record_rx_queue(skb, channel->rx_queue.core_index);
464 gro_result = napi_gro_frags(napi);
465 if (gro_result != GRO_DROP)
466 channel->irq_mod_score += 2;
469 /* Allocate and construct an SKB around page fragments */
470 static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
471 struct efx_rx_buffer *rx_buf,
472 unsigned int n_frags,
475 struct efx_nic *efx = channel->efx;
478 /* Allocate an SKB to store the headers */
479 skb = netdev_alloc_skb(efx->net_dev,
480 efx->rx_ip_align + efx->rx_prefix_size +
482 if (unlikely(skb == NULL))
485 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
487 memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
488 efx->rx_prefix_size + hdr_len);
489 skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
490 __skb_put(skb, hdr_len);
492 /* Append the remaining page(s) onto the frag list */
493 if (rx_buf->len > hdr_len) {
494 rx_buf->page_offset += hdr_len;
495 rx_buf->len -= hdr_len;
498 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
499 rx_buf->page, rx_buf->page_offset,
502 skb->len += rx_buf->len;
503 skb->data_len += rx_buf->len;
504 if (skb_shinfo(skb)->nr_frags == n_frags)
507 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
510 __free_pages(rx_buf->page, efx->rx_buffer_order);
515 skb->truesize += n_frags * efx->rx_buffer_truesize;
517 /* Move past the ethernet header */
518 skb->protocol = eth_type_trans(skb, efx->net_dev);
523 void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
524 unsigned int n_frags, unsigned int len, u16 flags)
526 struct efx_nic *efx = rx_queue->efx;
527 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
528 struct efx_rx_buffer *rx_buf;
530 rx_buf = efx_rx_buffer(rx_queue, index);
531 rx_buf->flags |= flags;
533 /* Validate the number of fragments and completed length */
535 if (!(flags & EFX_RX_PKT_PREFIX_LEN))
536 efx_rx_packet__check_len(rx_queue, rx_buf, len);
537 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
538 unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
539 unlikely(len > n_frags * efx->rx_dma_len) ||
540 unlikely(!efx->rx_scatter)) {
541 /* If this isn't an explicit discard request, either
542 * the hardware or the driver is broken.
544 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
545 rx_buf->flags |= EFX_RX_PKT_DISCARD;
548 netif_vdbg(efx, rx_status, efx->net_dev,
549 "RX queue %d received ids %x-%x len %d %s%s\n",
550 efx_rx_queue_index(rx_queue), index,
551 (index + n_frags - 1) & rx_queue->ptr_mask, len,
552 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
553 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
555 /* Discard packet, if instructed to do so. Process the
556 * previous receive first.
558 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
559 efx_rx_flush_packet(channel);
560 efx_discard_rx_packet(channel, rx_buf, n_frags);
564 if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
567 /* Release and/or sync the DMA mapping - assumes all RX buffers
568 * consumed in-order per RX queue.
570 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
572 /* Prefetch nice and early so data will (hopefully) be in cache by
573 * the time we look at it.
575 prefetch(efx_rx_buf_va(rx_buf));
577 rx_buf->page_offset += efx->rx_prefix_size;
578 rx_buf->len -= efx->rx_prefix_size;
581 /* Release/sync DMA mapping for additional fragments.
582 * Fix length for last fragment.
584 unsigned int tail_frags = n_frags - 1;
587 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
588 if (--tail_frags == 0)
590 efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
592 rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
593 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
596 /* All fragments have been DMA-synced, so recycle pages. */
597 rx_buf = efx_rx_buffer(rx_queue, index);
598 efx_recycle_rx_pages(channel, rx_buf, n_frags);
600 /* Pipeline receives so that we give time for packet headers to be
601 * prefetched into cache.
603 efx_rx_flush_packet(channel);
604 channel->rx_pkt_n_frags = n_frags;
605 channel->rx_pkt_index = index;
608 static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
609 struct efx_rx_buffer *rx_buf,
610 unsigned int n_frags)
613 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
615 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
616 if (unlikely(skb == NULL)) {
617 efx_free_rx_buffer(rx_buf);
620 skb_record_rx_queue(skb, channel->rx_queue.core_index);
622 /* Set the SKB flags */
623 skb_checksum_none_assert(skb);
624 if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
625 skb->ip_summed = CHECKSUM_UNNECESSARY;
627 if (channel->type->receive_skb)
628 if (channel->type->receive_skb(channel, skb))
631 /* Pass the packet up */
632 netif_receive_skb(skb);
635 /* Handle a received packet. Second half: Touches packet payload. */
636 void __efx_rx_packet(struct efx_channel *channel)
638 struct efx_nic *efx = channel->efx;
639 struct efx_rx_buffer *rx_buf =
640 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
641 u8 *eh = efx_rx_buf_va(rx_buf);
643 /* Read length from the prefix if necessary. This already
644 * excludes the length of the prefix itself.
646 if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
647 rx_buf->len = le16_to_cpup((__le16 *)
648 (eh + efx->rx_packet_len_offset));
650 /* If we're in loopback test, then pass the packet directly to the
651 * loopback layer, and free the rx_buf here
653 if (unlikely(efx->loopback_selftest)) {
654 efx_loopback_rx_packet(efx, eh, rx_buf->len);
655 efx_free_rx_buffer(rx_buf);
659 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
660 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
662 if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
663 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
665 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
667 channel->rx_pkt_n_frags = 0;
670 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
672 struct efx_nic *efx = rx_queue->efx;
673 unsigned int entries;
676 /* Create the smallest power-of-two aligned ring */
677 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
678 EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
679 rx_queue->ptr_mask = entries - 1;
681 netif_dbg(efx, probe, efx->net_dev,
682 "creating RX queue %d size %#x mask %#x\n",
683 efx_rx_queue_index(rx_queue), efx->rxq_entries,
686 /* Allocate RX buffers */
687 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
689 if (!rx_queue->buffer)
692 rc = efx_nic_probe_rx(rx_queue);
694 kfree(rx_queue->buffer);
695 rx_queue->buffer = NULL;
701 static void efx_init_rx_recycle_ring(struct efx_nic *efx,
702 struct efx_rx_queue *rx_queue)
704 unsigned int bufs_in_recycle_ring, page_ring_size;
706 /* Set the RX recycle ring size */
708 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
710 if (iommu_present(&pci_bus_type))
711 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
713 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
714 #endif /* CONFIG_PPC64 */
716 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
717 efx->rx_bufs_per_page);
718 rx_queue->page_ring = kcalloc(page_ring_size,
719 sizeof(*rx_queue->page_ring), GFP_KERNEL);
720 rx_queue->page_ptr_mask = page_ring_size - 1;
723 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
725 struct efx_nic *efx = rx_queue->efx;
726 unsigned int max_fill, trigger, max_trigger;
728 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
729 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
731 /* Initialise ptr fields */
732 rx_queue->added_count = 0;
733 rx_queue->notified_count = 0;
734 rx_queue->removed_count = 0;
735 rx_queue->min_fill = -1U;
736 efx_init_rx_recycle_ring(efx, rx_queue);
738 rx_queue->page_remove = 0;
739 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
740 rx_queue->page_recycle_count = 0;
741 rx_queue->page_recycle_failed = 0;
742 rx_queue->page_recycle_full = 0;
744 /* Initialise limit fields */
745 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
747 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
748 if (rx_refill_threshold != 0) {
749 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
750 if (trigger > max_trigger)
751 trigger = max_trigger;
753 trigger = max_trigger;
756 rx_queue->max_fill = max_fill;
757 rx_queue->fast_fill_trigger = trigger;
758 rx_queue->refill_enabled = true;
760 /* Set up RX descriptor ring */
761 efx_nic_init_rx(rx_queue);
764 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
767 struct efx_nic *efx = rx_queue->efx;
768 struct efx_rx_buffer *rx_buf;
770 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
771 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
773 del_timer_sync(&rx_queue->slow_fill);
775 /* Release RX buffers from the current read ptr to the write ptr */
776 if (rx_queue->buffer) {
777 for (i = rx_queue->removed_count; i < rx_queue->added_count;
779 unsigned index = i & rx_queue->ptr_mask;
780 rx_buf = efx_rx_buffer(rx_queue, index);
781 efx_fini_rx_buffer(rx_queue, rx_buf);
785 /* Unmap and release the pages in the recycle ring. Remove the ring. */
786 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
787 struct page *page = rx_queue->page_ring[i];
788 struct efx_rx_page_state *state;
793 state = page_address(page);
794 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
795 PAGE_SIZE << efx->rx_buffer_order,
799 kfree(rx_queue->page_ring);
800 rx_queue->page_ring = NULL;
803 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
805 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
806 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
808 efx_nic_remove_rx(rx_queue);
810 kfree(rx_queue->buffer);
811 rx_queue->buffer = NULL;
815 module_param(rx_refill_threshold, uint, 0444);
816 MODULE_PARM_DESC(rx_refill_threshold,
817 "RX descriptor ring refill threshold (%)");
819 #ifdef CONFIG_RFS_ACCEL
821 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
822 u16 rxq_index, u32 flow_id)
824 struct efx_nic *efx = netdev_priv(net_dev);
825 struct efx_channel *channel;
826 struct efx_filter_spec spec;
832 /* The core RPS/RFS code has already parsed and validated
833 * VLAN, IP and transport headers. We assume they are in the
837 if (skb->protocol == htons(ETH_P_8021Q)) {
838 const struct vlan_hdr *vh =
839 (const struct vlan_hdr *)skb->data;
841 /* We can't filter on the IP 5-tuple and the vlan
842 * together, so just strip the vlan header and filter
845 EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
846 ether_type = vh->h_vlan_encapsulated_proto;
847 nhoff = sizeof(struct vlan_hdr);
849 ether_type = skb->protocol;
853 if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
854 return -EPROTONOSUPPORT;
856 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
857 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
860 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
861 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
862 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
863 spec.ether_type = ether_type;
865 if (ether_type == htons(ETH_P_IP)) {
866 const struct iphdr *ip =
867 (const struct iphdr *)(skb->data + nhoff);
869 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
870 if (ip_is_fragment(ip))
871 return -EPROTONOSUPPORT;
872 spec.ip_proto = ip->protocol;
873 spec.rem_host[0] = ip->saddr;
874 spec.loc_host[0] = ip->daddr;
875 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
876 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
878 const struct ipv6hdr *ip6 =
879 (const struct ipv6hdr *)(skb->data + nhoff);
881 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
882 nhoff + sizeof(*ip6) + 4);
883 spec.ip_proto = ip6->nexthdr;
884 memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
885 memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
886 ports = (const __be16 *)(ip6 + 1);
889 spec.rem_port = ports[0];
890 spec.loc_port = ports[1];
892 rc = efx->type->filter_rfs_insert(efx, &spec);
896 /* Remember this so we can check whether to expire the filter later */
897 efx->rps_flow_id[rc] = flow_id;
898 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
899 ++channel->rfs_filters_added;
901 if (ether_type == htons(ETH_P_IP))
902 netif_info(efx, rx_status, efx->net_dev,
903 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
904 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
905 spec.rem_host, ntohs(ports[0]), spec.loc_host,
906 ntohs(ports[1]), rxq_index, flow_id, rc);
908 netif_info(efx, rx_status, efx->net_dev,
909 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
910 (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
911 spec.rem_host, ntohs(ports[0]), spec.loc_host,
912 ntohs(ports[1]), rxq_index, flow_id, rc);
917 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
919 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
920 unsigned int index, size;
923 if (!spin_trylock_bh(&efx->filter_lock))
926 expire_one = efx->type->filter_rfs_expire_one;
927 index = efx->rps_expire_index;
928 size = efx->type->max_rx_ip_filters;
930 flow_id = efx->rps_flow_id[index];
931 if (expire_one(efx, flow_id, index))
932 netif_info(efx, rx_status, efx->net_dev,
933 "expired filter %d [flow %u]\n",
938 efx->rps_expire_index = index;
940 spin_unlock_bh(&efx->filter_lock);
944 #endif /* CONFIG_RFS_ACCEL */
947 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
948 * @spec: Specification to test
950 * Return: %true if the specification is a non-drop RX filter that
951 * matches a local MAC address I/G bit value of 1 or matches a local
952 * IPv4 or IPv6 address value in the respective multicast address
953 * range. Otherwise %false.
955 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
957 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
958 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
961 if (spec->match_flags &
962 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
963 is_multicast_ether_addr(spec->loc_mac))
966 if ((spec->match_flags &
967 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
968 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
969 if (spec->ether_type == htons(ETH_P_IP) &&
970 ipv4_is_multicast(spec->loc_host[0]))
972 if (spec->ether_type == htons(ETH_P_IPV6) &&
973 ((const u8 *)spec->loc_host)[0] == 0xff)