1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/crc32.h>
18 #include "net_driver.h"
22 #include "farch_regs.h"
24 #include "siena_sriov.h"
26 #include "workarounds.h"
28 /* Falcon-architecture (SFC4000 and SFC9000-family) support */
30 /**************************************************************************
34 **************************************************************************
37 /* This is set to 16 for a good reason. In summary, if larger than
38 * 16, the descriptor cache holds more than a default socket
39 * buffer's worth of packets (for UDP we can only have at most one
40 * socket buffer's worth outstanding). This combined with the fact
41 * that we only get 1 TX event per descriptor cache means the NIC
44 #define TX_DC_ENTRIES 16
45 #define TX_DC_ENTRIES_ORDER 1
47 #define RX_DC_ENTRIES 64
48 #define RX_DC_ENTRIES_ORDER 3
50 /* If EFX_MAX_INT_ERRORS internal errors occur within
51 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
54 #define EFX_INT_ERROR_EXPIRE 3600
55 #define EFX_MAX_INT_ERRORS 5
57 /* Depth of RX flush request fifo */
58 #define EFX_RX_FLUSH_COUNT 4
60 /* Driver generated events */
61 #define _EFX_CHANNEL_MAGIC_TEST 0x000101
62 #define _EFX_CHANNEL_MAGIC_FILL 0x000102
63 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
64 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
66 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
67 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
69 #define EFX_CHANNEL_MAGIC_TEST(_channel) \
70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
71 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
72 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
73 efx_rx_queue_index(_rx_queue))
74 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
75 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
76 efx_rx_queue_index(_rx_queue))
77 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
78 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
81 static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
83 /**************************************************************************
87 **************************************************************************/
89 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
92 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
96 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
97 const efx_oword_t *mask)
99 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
100 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
103 int efx_farch_test_registers(struct efx_nic *efx,
104 const struct efx_farch_register_test *regs,
107 unsigned address = 0, i, j;
108 efx_oword_t mask, imask, original, reg, buf;
110 for (i = 0; i < n_regs; ++i) {
111 address = regs[i].address;
112 mask = imask = regs[i].mask;
113 EFX_INVERT_OWORD(imask);
115 efx_reado(efx, &original, address);
117 /* bit sweep on and off */
118 for (j = 0; j < 128; j++) {
119 if (!EFX_EXTRACT_OWORD32(mask, j, j))
122 /* Test this testable bit can be set in isolation */
123 EFX_AND_OWORD(reg, original, mask);
124 EFX_SET_OWORD32(reg, j, j, 1);
126 efx_writeo(efx, ®, address);
127 efx_reado(efx, &buf, address);
129 if (efx_masked_compare_oword(®, &buf, &mask))
132 /* Test this testable bit can be cleared in isolation */
133 EFX_OR_OWORD(reg, original, mask);
134 EFX_SET_OWORD32(reg, j, j, 0);
136 efx_writeo(efx, ®, address);
137 efx_reado(efx, &buf, address);
139 if (efx_masked_compare_oword(®, &buf, &mask))
143 efx_writeo(efx, &original, address);
149 netif_err(efx, hw, efx->net_dev,
150 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
151 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
152 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
156 /**************************************************************************
158 * Special buffer handling
159 * Special buffers are used for event queues and the TX and RX
162 *************************************************************************/
165 * Initialise a special buffer
167 * This will define a buffer (previously allocated via
168 * efx_alloc_special_buffer()) in the buffer table, allowing
169 * it to be used for event queues, descriptor rings etc.
172 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
174 efx_qword_t buf_desc;
179 EFX_BUG_ON_PARANOID(!buffer->buf.addr);
181 /* Write buffer descriptors to NIC */
182 for (i = 0; i < buffer->entries; i++) {
183 index = buffer->index + i;
184 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
185 netif_dbg(efx, probe, efx->net_dev,
186 "mapping special buffer %d at %llx\n",
187 index, (unsigned long long)dma_addr);
188 EFX_POPULATE_QWORD_3(buf_desc,
189 FRF_AZ_BUF_ADR_REGION, 0,
190 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
191 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
192 efx_write_buf_tbl(efx, &buf_desc, index);
196 /* Unmaps a buffer and clears the buffer table entries */
198 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
200 efx_oword_t buf_tbl_upd;
201 unsigned int start = buffer->index;
202 unsigned int end = (buffer->index + buffer->entries - 1);
204 if (!buffer->entries)
207 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
208 buffer->index, buffer->index + buffer->entries - 1);
210 EFX_POPULATE_OWORD_4(buf_tbl_upd,
211 FRF_AZ_BUF_UPD_CMD, 0,
212 FRF_AZ_BUF_CLR_CMD, 1,
213 FRF_AZ_BUF_CLR_END_ID, end,
214 FRF_AZ_BUF_CLR_START_ID, start);
215 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
219 * Allocate a new special buffer
221 * This allocates memory for a new buffer, clears it and allocates a
222 * new buffer ID range. It does not write into the buffer table.
224 * This call will allocate 4KB buffers, since 8KB buffers can't be
225 * used for event queues and descriptor rings.
227 static int efx_alloc_special_buffer(struct efx_nic *efx,
228 struct efx_special_buffer *buffer,
231 #ifdef CONFIG_SFC_SRIOV
232 struct siena_nic_data *nic_data = efx->nic_data;
234 len = ALIGN(len, EFX_BUF_SIZE);
236 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
238 buffer->entries = len / EFX_BUF_SIZE;
239 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
241 /* Select new buffer ID */
242 buffer->index = efx->next_buffer_table;
243 efx->next_buffer_table += buffer->entries;
244 #ifdef CONFIG_SFC_SRIOV
245 BUG_ON(efx_siena_sriov_enabled(efx) &&
246 nic_data->vf_buftbl_base < efx->next_buffer_table);
249 netif_dbg(efx, probe, efx->net_dev,
250 "allocating special buffers %d-%d at %llx+%x "
251 "(virt %p phys %llx)\n", buffer->index,
252 buffer->index + buffer->entries - 1,
253 (u64)buffer->buf.dma_addr, len,
254 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
260 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
262 if (!buffer->buf.addr)
265 netif_dbg(efx, hw, efx->net_dev,
266 "deallocating special buffers %d-%d at %llx+%x "
267 "(virt %p phys %llx)\n", buffer->index,
268 buffer->index + buffer->entries - 1,
269 (u64)buffer->buf.dma_addr, buffer->buf.len,
270 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
272 efx_nic_free_buffer(efx, &buffer->buf);
276 /**************************************************************************
280 **************************************************************************/
282 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
283 static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
288 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
289 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
290 efx_writed_page(tx_queue->efx, ®,
291 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
294 /* Write pointer and first descriptor for TX descriptor ring */
295 static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
296 const efx_qword_t *txd)
301 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
302 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
304 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
305 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
306 FRF_AZ_TX_DESC_WPTR, write_ptr);
308 efx_writeo_page(tx_queue->efx, ®,
309 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
313 /* For each entry inserted into the software descriptor ring, create a
314 * descriptor in the hardware TX descriptor ring (in host memory), and
317 void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
319 struct efx_tx_buffer *buffer;
322 unsigned old_write_count = tx_queue->write_count;
324 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
327 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
328 buffer = &tx_queue->buffer[write_ptr];
329 txd = efx_tx_desc(tx_queue, write_ptr);
330 ++tx_queue->write_count;
332 EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
334 /* Create TX descriptor ring entry */
335 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
336 EFX_POPULATE_QWORD_4(*txd,
338 buffer->flags & EFX_TX_BUF_CONT,
339 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
340 FSF_AZ_TX_KER_BUF_REGION, 0,
341 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
342 } while (tx_queue->write_count != tx_queue->insert_count);
344 wmb(); /* Ensure descriptors are written before they are fetched */
346 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
347 txd = efx_tx_desc(tx_queue,
348 old_write_count & tx_queue->ptr_mask);
349 efx_farch_push_tx_desc(tx_queue, txd);
352 efx_farch_notify_tx_desc(tx_queue);
356 /* Allocate hardware resources for a TX queue */
357 int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
359 struct efx_nic *efx = tx_queue->efx;
362 entries = tx_queue->ptr_mask + 1;
363 return efx_alloc_special_buffer(efx, &tx_queue->txd,
364 entries * sizeof(efx_qword_t));
367 void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
369 struct efx_nic *efx = tx_queue->efx;
372 /* Pin TX descriptor ring */
373 efx_init_special_buffer(efx, &tx_queue->txd);
375 /* Push TX descriptor ring to card */
376 EFX_POPULATE_OWORD_10(reg,
377 FRF_AZ_TX_DESCQ_EN, 1,
378 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
379 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
380 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
381 FRF_AZ_TX_DESCQ_EVQ_ID,
382 tx_queue->channel->channel,
383 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
384 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
385 FRF_AZ_TX_DESCQ_SIZE,
386 __ffs(tx_queue->txd.entries),
387 FRF_AZ_TX_DESCQ_TYPE, 0,
388 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
390 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
391 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
392 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
393 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
397 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base,
400 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
401 /* Only 128 bits in this register */
402 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
404 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
405 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
406 __clear_bit_le(tx_queue->queue, ®);
408 __set_bit_le(tx_queue->queue, ®);
409 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
412 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
413 EFX_POPULATE_OWORD_1(reg,
415 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
417 FFE_BZ_TX_PACE_RESERVED);
418 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL,
423 static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
425 struct efx_nic *efx = tx_queue->efx;
426 efx_oword_t tx_flush_descq;
428 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
429 atomic_set(&tx_queue->flush_outstanding, 1);
431 EFX_POPULATE_OWORD_2(tx_flush_descq,
432 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
433 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
434 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
437 void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
439 struct efx_nic *efx = tx_queue->efx;
440 efx_oword_t tx_desc_ptr;
442 /* Remove TX descriptor ring from card */
443 EFX_ZERO_OWORD(tx_desc_ptr);
444 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
447 /* Unpin TX descriptor ring */
448 efx_fini_special_buffer(efx, &tx_queue->txd);
451 /* Free buffers backing TX queue */
452 void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
454 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
457 /**************************************************************************
461 **************************************************************************/
463 /* This creates an entry in the RX descriptor queue */
465 efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
467 struct efx_rx_buffer *rx_buf;
470 rxd = efx_rx_desc(rx_queue, index);
471 rx_buf = efx_rx_buffer(rx_queue, index);
472 EFX_POPULATE_QWORD_3(*rxd,
473 FSF_AZ_RX_KER_BUF_SIZE,
475 rx_queue->efx->type->rx_buffer_padding,
476 FSF_AZ_RX_KER_BUF_REGION, 0,
477 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
480 /* This writes to the RX_DESC_WPTR register for the specified receive
483 void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
485 struct efx_nic *efx = rx_queue->efx;
489 while (rx_queue->notified_count != rx_queue->added_count) {
490 efx_farch_build_rx_desc(
492 rx_queue->notified_count & rx_queue->ptr_mask);
493 ++rx_queue->notified_count;
497 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
498 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
499 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
500 efx_rx_queue_index(rx_queue));
503 int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
505 struct efx_nic *efx = rx_queue->efx;
508 entries = rx_queue->ptr_mask + 1;
509 return efx_alloc_special_buffer(efx, &rx_queue->rxd,
510 entries * sizeof(efx_qword_t));
513 void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
515 efx_oword_t rx_desc_ptr;
516 struct efx_nic *efx = rx_queue->efx;
517 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
518 bool iscsi_digest_en = is_b0;
521 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
522 * DMA to continue after a PCIe page boundary (and scattering
523 * is not possible). In Falcon B0 and Siena, it enables
526 jumbo_en = !is_b0 || efx->rx_scatter;
528 netif_dbg(efx, hw, efx->net_dev,
529 "RX queue %d ring in special buffers %d-%d\n",
530 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
531 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
533 rx_queue->scatter_n = 0;
535 /* Pin RX descriptor ring */
536 efx_init_special_buffer(efx, &rx_queue->rxd);
538 /* Push RX descriptor ring to card */
539 EFX_POPULATE_OWORD_10(rx_desc_ptr,
540 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
541 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
542 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
543 FRF_AZ_RX_DESCQ_EVQ_ID,
544 efx_rx_queue_channel(rx_queue)->channel,
545 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
546 FRF_AZ_RX_DESCQ_LABEL,
547 efx_rx_queue_index(rx_queue),
548 FRF_AZ_RX_DESCQ_SIZE,
549 __ffs(rx_queue->rxd.entries),
550 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
551 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
552 FRF_AZ_RX_DESCQ_EN, 1);
553 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
554 efx_rx_queue_index(rx_queue));
557 static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
559 struct efx_nic *efx = rx_queue->efx;
560 efx_oword_t rx_flush_descq;
562 EFX_POPULATE_OWORD_2(rx_flush_descq,
563 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
564 FRF_AZ_RX_FLUSH_DESCQ,
565 efx_rx_queue_index(rx_queue));
566 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
569 void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
571 efx_oword_t rx_desc_ptr;
572 struct efx_nic *efx = rx_queue->efx;
574 /* Remove RX descriptor ring from card */
575 EFX_ZERO_OWORD(rx_desc_ptr);
576 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
577 efx_rx_queue_index(rx_queue));
579 /* Unpin RX descriptor ring */
580 efx_fini_special_buffer(efx, &rx_queue->rxd);
583 /* Free buffers backing RX queue */
584 void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
586 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
589 /**************************************************************************
593 **************************************************************************/
595 /* efx_farch_flush_queues() must be woken up when all flushes are completed,
596 * or more RX flushes can be kicked off.
598 static bool efx_farch_flush_wake(struct efx_nic *efx)
600 /* Ensure that all updates are visible to efx_farch_flush_queues() */
603 return (atomic_read(&efx->active_queues) == 0 ||
604 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
605 && atomic_read(&efx->rxq_flush_pending) > 0));
608 static bool efx_check_tx_flush_complete(struct efx_nic *efx)
611 efx_oword_t txd_ptr_tbl;
612 struct efx_channel *channel;
613 struct efx_tx_queue *tx_queue;
615 efx_for_each_channel(channel, efx) {
616 efx_for_each_channel_tx_queue(tx_queue, channel) {
617 efx_reado_table(efx, &txd_ptr_tbl,
618 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
619 if (EFX_OWORD_FIELD(txd_ptr_tbl,
620 FRF_AZ_TX_DESCQ_FLUSH) ||
621 EFX_OWORD_FIELD(txd_ptr_tbl,
622 FRF_AZ_TX_DESCQ_EN)) {
623 netif_dbg(efx, hw, efx->net_dev,
624 "flush did not complete on TXQ %d\n",
627 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
629 /* The flush is complete, but we didn't
630 * receive a flush completion event
632 netif_dbg(efx, hw, efx->net_dev,
633 "flush complete on TXQ %d, so drain "
634 "the queue\n", tx_queue->queue);
635 /* Don't need to increment active_queues as it
636 * has already been incremented for the queues
637 * which did not drain
639 efx_farch_magic_event(channel,
640 EFX_CHANNEL_MAGIC_TX_DRAIN(
649 /* Flush all the transmit queues, and continue flushing receive queues until
650 * they're all flushed. Wait for the DRAIN events to be received so that there
651 * are no more RX and TX events left on any channel. */
652 static int efx_farch_do_flush(struct efx_nic *efx)
654 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
655 struct efx_channel *channel;
656 struct efx_rx_queue *rx_queue;
657 struct efx_tx_queue *tx_queue;
660 efx_for_each_channel(channel, efx) {
661 efx_for_each_channel_tx_queue(tx_queue, channel) {
662 efx_farch_flush_tx_queue(tx_queue);
664 efx_for_each_channel_rx_queue(rx_queue, channel) {
665 rx_queue->flush_pending = true;
666 atomic_inc(&efx->rxq_flush_pending);
670 while (timeout && atomic_read(&efx->active_queues) > 0) {
671 /* If SRIOV is enabled, then offload receive queue flushing to
672 * the firmware (though we will still have to poll for
673 * completion). If that fails, fall back to the old scheme.
675 if (efx_siena_sriov_enabled(efx)) {
676 rc = efx_mcdi_flush_rxqs(efx);
681 /* The hardware supports four concurrent rx flushes, each of
682 * which may need to be retried if there is an outstanding
685 efx_for_each_channel(channel, efx) {
686 efx_for_each_channel_rx_queue(rx_queue, channel) {
687 if (atomic_read(&efx->rxq_flush_outstanding) >=
691 if (rx_queue->flush_pending) {
692 rx_queue->flush_pending = false;
693 atomic_dec(&efx->rxq_flush_pending);
694 atomic_inc(&efx->rxq_flush_outstanding);
695 efx_farch_flush_rx_queue(rx_queue);
701 timeout = wait_event_timeout(efx->flush_wq,
702 efx_farch_flush_wake(efx),
706 if (atomic_read(&efx->active_queues) &&
707 !efx_check_tx_flush_complete(efx)) {
708 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
709 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
710 atomic_read(&efx->rxq_flush_outstanding),
711 atomic_read(&efx->rxq_flush_pending));
714 atomic_set(&efx->active_queues, 0);
715 atomic_set(&efx->rxq_flush_pending, 0);
716 atomic_set(&efx->rxq_flush_outstanding, 0);
722 int efx_farch_fini_dmaq(struct efx_nic *efx)
724 struct efx_channel *channel;
725 struct efx_tx_queue *tx_queue;
726 struct efx_rx_queue *rx_queue;
729 /* Do not attempt to write to the NIC during EEH recovery */
730 if (efx->state != STATE_RECOVERY) {
731 /* Only perform flush if DMA is enabled */
732 if (efx->pci_dev->is_busmaster) {
733 efx->type->prepare_flush(efx);
734 rc = efx_farch_do_flush(efx);
735 efx->type->finish_flush(efx);
738 efx_for_each_channel(channel, efx) {
739 efx_for_each_channel_rx_queue(rx_queue, channel)
740 efx_farch_rx_fini(rx_queue);
741 efx_for_each_channel_tx_queue(tx_queue, channel)
742 efx_farch_tx_fini(tx_queue);
749 /* Reset queue and flush accounting after FLR
751 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
752 * mastering was disabled), in which case we don't receive (RXQ) flush
753 * completion events. This means that efx->rxq_flush_outstanding remained at 4
754 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
755 * events were received, and we didn't go through efx_check_tx_flush_complete())
756 * If we don't fix this up, on the next call to efx_realloc_channels() we won't
757 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
758 * for batched flush requests; and the efx->active_queues gets messed up because
759 * we keep incrementing for the newly initialised queues, but it never went to
760 * zero previously. Then we get a timeout every time we try to restart the
761 * queues, as it doesn't go back to zero when we should be flushing the queues.
763 void efx_farch_finish_flr(struct efx_nic *efx)
765 atomic_set(&efx->rxq_flush_pending, 0);
766 atomic_set(&efx->rxq_flush_outstanding, 0);
767 atomic_set(&efx->active_queues, 0);
771 /**************************************************************************
773 * Event queue processing
774 * Event queues are processed by per-channel tasklets.
776 **************************************************************************/
778 /* Update a channel's event queue's read pointer (RPTR) register
780 * This writes the EVQ_RPTR_REG register for the specified channel's
783 void efx_farch_ev_read_ack(struct efx_channel *channel)
786 struct efx_nic *efx = channel->efx;
788 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
789 channel->eventq_read_ptr & channel->eventq_mask);
791 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
792 * of 4 bytes, but it is really 16 bytes just like later revisions.
794 efx_writed(efx, ®,
795 efx->type->evq_rptr_tbl_base +
796 FR_BZ_EVQ_RPTR_STEP * channel->channel);
799 /* Use HW to insert a SW defined event */
800 void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
803 efx_oword_t drv_ev_reg;
805 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
806 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
807 drv_ev_reg.u32[0] = event->u32[0];
808 drv_ev_reg.u32[1] = event->u32[1];
809 drv_ev_reg.u32[2] = 0;
810 drv_ev_reg.u32[3] = 0;
811 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
812 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
815 static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
819 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
820 FSE_AZ_EV_CODE_DRV_GEN_EV,
821 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
822 efx_farch_generate_event(channel->efx, channel->channel, &event);
825 /* Handle a transmit completion event
827 * The NIC batches TX completion events; the message we receive is of
828 * the form "complete all TX events up to this index".
831 efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
833 unsigned int tx_ev_desc_ptr;
834 unsigned int tx_ev_q_label;
835 struct efx_tx_queue *tx_queue;
836 struct efx_nic *efx = channel->efx;
839 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
842 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
843 /* Transmit completion */
844 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
845 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
846 tx_queue = efx_channel_get_tx_queue(
847 channel, tx_ev_q_label % EFX_TXQ_TYPES);
848 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
850 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
851 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
852 /* Rewrite the FIFO write pointer */
853 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
854 tx_queue = efx_channel_get_tx_queue(
855 channel, tx_ev_q_label % EFX_TXQ_TYPES);
857 netif_tx_lock(efx->net_dev);
858 efx_farch_notify_tx_desc(tx_queue);
859 netif_tx_unlock(efx->net_dev);
860 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
861 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
863 netif_err(efx, tx_err, efx->net_dev,
864 "channel %d unexpected TX event "
865 EFX_QWORD_FMT"\n", channel->channel,
866 EFX_QWORD_VAL(*event));
872 /* Detect errors included in the rx_evt_pkt_ok bit. */
873 static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
874 const efx_qword_t *event)
876 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
877 struct efx_nic *efx = rx_queue->efx;
878 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
879 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
880 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
881 bool rx_ev_other_err, rx_ev_pause_frm;
882 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
883 unsigned rx_ev_pkt_type;
885 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
886 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
887 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
888 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
889 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
890 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
891 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
892 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
893 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
894 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
895 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
896 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
897 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
898 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
899 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
901 /* Every error apart from tobe_disc and pause_frm */
902 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
903 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
904 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
906 /* Count errors that are not in MAC stats. Ignore expected
907 * checksum errors during self-test. */
909 ++channel->n_rx_frm_trunc;
910 else if (rx_ev_tobe_disc)
911 ++channel->n_rx_tobe_disc;
912 else if (!efx->loopback_selftest) {
913 if (rx_ev_ip_hdr_chksum_err)
914 ++channel->n_rx_ip_hdr_chksum_err;
915 else if (rx_ev_tcp_udp_chksum_err)
916 ++channel->n_rx_tcp_udp_chksum_err;
919 /* TOBE_DISC is expected on unicast mismatches; don't print out an
920 * error message. FRM_TRUNC indicates RXDP dropped the packet due
921 * to a FIFO overflow.
924 if (rx_ev_other_err && net_ratelimit()) {
925 netif_dbg(efx, rx_err, efx->net_dev,
926 " RX queue %d unexpected RX event "
927 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
928 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
929 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
930 rx_ev_ip_hdr_chksum_err ?
931 " [IP_HDR_CHKSUM_ERR]" : "",
932 rx_ev_tcp_udp_chksum_err ?
933 " [TCP_UDP_CHKSUM_ERR]" : "",
934 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
935 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
936 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
937 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
938 rx_ev_pause_frm ? " [PAUSE]" : "");
942 /* The frame must be discarded if any of these are true. */
943 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
944 rx_ev_tobe_disc | rx_ev_pause_frm) ?
945 EFX_RX_PKT_DISCARD : 0;
948 /* Handle receive events that are not in-order. Return true if this
949 * can be handled as a partial packet discard, false if it's more
953 efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
955 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
956 struct efx_nic *efx = rx_queue->efx;
957 unsigned expected, dropped;
959 if (rx_queue->scatter_n &&
960 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
961 rx_queue->ptr_mask)) {
962 ++channel->n_rx_nodesc_trunc;
966 expected = rx_queue->removed_count & rx_queue->ptr_mask;
967 dropped = (index - expected) & rx_queue->ptr_mask;
968 netif_info(efx, rx_err, efx->net_dev,
969 "dropped %d events (index=%d expected=%d)\n",
970 dropped, index, expected);
972 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
973 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
977 /* Handle a packet received event
979 * The NIC gives a "discard" flag if it's a unicast packet with the
980 * wrong destination address
981 * Also "is multicast" and "matches multicast filter" flags can be used to
982 * discard non-matching multicast packets.
985 efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
987 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
988 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
989 unsigned expected_ptr;
990 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
992 struct efx_rx_queue *rx_queue;
993 struct efx_nic *efx = channel->efx;
995 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
998 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
999 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
1000 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
1003 rx_queue = efx_channel_get_rx_queue(channel);
1005 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1006 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1007 rx_queue->ptr_mask);
1009 /* Check for partial drops and other errors */
1010 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1011 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1012 if (rx_ev_desc_ptr != expected_ptr &&
1013 !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1016 /* Discard all pending fragments */
1017 if (rx_queue->scatter_n) {
1020 rx_queue->removed_count & rx_queue->ptr_mask,
1021 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
1022 rx_queue->removed_count += rx_queue->scatter_n;
1023 rx_queue->scatter_n = 0;
1026 /* Return if there is no new fragment */
1027 if (rx_ev_desc_ptr != expected_ptr)
1030 /* Discard new fragment if not SOP */
1034 rx_queue->removed_count & rx_queue->ptr_mask,
1035 1, 0, EFX_RX_PKT_DISCARD);
1036 ++rx_queue->removed_count;
1041 ++rx_queue->scatter_n;
1045 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1046 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1047 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1049 if (likely(rx_ev_pkt_ok)) {
1050 /* If packet is marked as OK then we can rely on the
1051 * hardware checksum and classification.
1054 switch (rx_ev_hdr_type) {
1055 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1056 flags |= EFX_RX_PKT_TCP;
1058 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1059 flags |= EFX_RX_PKT_CSUMMED;
1061 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1062 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1066 flags = efx_farch_handle_rx_not_ok(rx_queue, event);
1069 /* Detect multicast packets that didn't match the filter */
1070 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1071 if (rx_ev_mcast_pkt) {
1072 unsigned int rx_ev_mcast_hash_match =
1073 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1075 if (unlikely(!rx_ev_mcast_hash_match)) {
1076 ++channel->n_rx_mcast_mismatch;
1077 flags |= EFX_RX_PKT_DISCARD;
1081 channel->irq_mod_score += 2;
1083 /* Handle received packet */
1084 efx_rx_packet(rx_queue,
1085 rx_queue->removed_count & rx_queue->ptr_mask,
1086 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1087 rx_queue->removed_count += rx_queue->scatter_n;
1088 rx_queue->scatter_n = 0;
1091 /* If this flush done event corresponds to a &struct efx_tx_queue, then
1092 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1093 * of all transmit completions.
1096 efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1098 struct efx_tx_queue *tx_queue;
1101 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1102 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
1103 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
1104 qid % EFX_TXQ_TYPES);
1105 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1106 efx_farch_magic_event(tx_queue->channel,
1107 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1112 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
1113 * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1114 * the RX queue back to the mask of RX queues in need of flushing.
1117 efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
1119 struct efx_channel *channel;
1120 struct efx_rx_queue *rx_queue;
1124 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1125 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1126 if (qid >= efx->n_channels)
1128 channel = efx_get_channel(efx, qid);
1129 if (!efx_channel_has_rx_queue(channel))
1131 rx_queue = efx_channel_get_rx_queue(channel);
1134 netif_info(efx, hw, efx->net_dev,
1135 "RXQ %d flush retry\n", qid);
1136 rx_queue->flush_pending = true;
1137 atomic_inc(&efx->rxq_flush_pending);
1139 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1140 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1142 atomic_dec(&efx->rxq_flush_outstanding);
1143 if (efx_farch_flush_wake(efx))
1144 wake_up(&efx->flush_wq);
1148 efx_farch_handle_drain_event(struct efx_channel *channel)
1150 struct efx_nic *efx = channel->efx;
1152 WARN_ON(atomic_read(&efx->active_queues) == 0);
1153 atomic_dec(&efx->active_queues);
1154 if (efx_farch_flush_wake(efx))
1155 wake_up(&efx->flush_wq);
1158 static void efx_farch_handle_generated_event(struct efx_channel *channel,
1161 struct efx_nic *efx = channel->efx;
1162 struct efx_rx_queue *rx_queue =
1163 efx_channel_has_rx_queue(channel) ?
1164 efx_channel_get_rx_queue(channel) : NULL;
1165 unsigned magic, code;
1167 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1168 code = _EFX_CHANNEL_MAGIC_CODE(magic);
1170 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
1171 channel->event_test_cpu = raw_smp_processor_id();
1172 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
1173 /* The queue must be empty, so we won't receive any rx
1174 * events, so efx_process_channel() won't refill the
1175 * queue. Refill it here */
1176 efx_fast_push_rx_descriptors(rx_queue, true);
1177 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1178 efx_farch_handle_drain_event(channel);
1179 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
1180 efx_farch_handle_drain_event(channel);
1182 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1183 "generated event "EFX_QWORD_FMT"\n",
1184 channel->channel, EFX_QWORD_VAL(*event));
1189 efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1191 struct efx_nic *efx = channel->efx;
1192 unsigned int ev_sub_code;
1193 unsigned int ev_sub_data;
1195 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1196 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1198 switch (ev_sub_code) {
1199 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1200 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1201 channel->channel, ev_sub_data);
1202 efx_farch_handle_tx_flush_done(efx, event);
1203 #ifdef CONFIG_SFC_SRIOV
1204 efx_siena_sriov_tx_flush_done(efx, event);
1207 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1208 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1209 channel->channel, ev_sub_data);
1210 efx_farch_handle_rx_flush_done(efx, event);
1211 #ifdef CONFIG_SFC_SRIOV
1212 efx_siena_sriov_rx_flush_done(efx, event);
1215 case FSE_AZ_EVQ_INIT_DONE_EV:
1216 netif_dbg(efx, hw, efx->net_dev,
1217 "channel %d EVQ %d initialised\n",
1218 channel->channel, ev_sub_data);
1220 case FSE_AZ_SRM_UPD_DONE_EV:
1221 netif_vdbg(efx, hw, efx->net_dev,
1222 "channel %d SRAM update done\n", channel->channel);
1224 case FSE_AZ_WAKE_UP_EV:
1225 netif_vdbg(efx, hw, efx->net_dev,
1226 "channel %d RXQ %d wakeup event\n",
1227 channel->channel, ev_sub_data);
1229 case FSE_AZ_TIMER_EV:
1230 netif_vdbg(efx, hw, efx->net_dev,
1231 "channel %d RX queue %d timer expired\n",
1232 channel->channel, ev_sub_data);
1234 case FSE_AA_RX_RECOVER_EV:
1235 netif_err(efx, rx_err, efx->net_dev,
1236 "channel %d seen DRIVER RX_RESET event. "
1237 "Resetting.\n", channel->channel);
1238 atomic_inc(&efx->rx_reset);
1239 efx_schedule_reset(efx,
1240 EFX_WORKAROUND_6555(efx) ?
1241 RESET_TYPE_RX_RECOVERY :
1242 RESET_TYPE_DISABLE);
1244 case FSE_BZ_RX_DSC_ERROR_EV:
1245 if (ev_sub_data < EFX_VI_BASE) {
1246 netif_err(efx, rx_err, efx->net_dev,
1247 "RX DMA Q %d reports descriptor fetch error."
1248 " RX Q %d is disabled.\n", ev_sub_data,
1250 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1252 #ifdef CONFIG_SFC_SRIOV
1254 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
1257 case FSE_BZ_TX_DSC_ERROR_EV:
1258 if (ev_sub_data < EFX_VI_BASE) {
1259 netif_err(efx, tx_err, efx->net_dev,
1260 "TX DMA Q %d reports descriptor fetch error."
1261 " TX Q %d is disabled.\n", ev_sub_data,
1263 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1265 #ifdef CONFIG_SFC_SRIOV
1267 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data);
1271 netif_vdbg(efx, hw, efx->net_dev,
1272 "channel %d unknown driver event code %d "
1273 "data %04x\n", channel->channel, ev_sub_code,
1279 int efx_farch_ev_process(struct efx_channel *channel, int budget)
1281 struct efx_nic *efx = channel->efx;
1282 unsigned int read_ptr;
1283 efx_qword_t event, *p_event;
1291 read_ptr = channel->eventq_read_ptr;
1294 p_event = efx_event(channel, read_ptr);
1297 if (!efx_event_present(&event))
1301 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1302 "channel %d event is "EFX_QWORD_FMT"\n",
1303 channel->channel, EFX_QWORD_VAL(event));
1305 /* Clear this event by marking it all ones */
1306 EFX_SET_QWORD(*p_event);
1310 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1313 case FSE_AZ_EV_CODE_RX_EV:
1314 efx_farch_handle_rx_event(channel, &event);
1315 if (++spent == budget)
1318 case FSE_AZ_EV_CODE_TX_EV:
1319 tx_packets += efx_farch_handle_tx_event(channel,
1321 if (tx_packets > efx->txq_entries) {
1326 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1327 efx_farch_handle_generated_event(channel, &event);
1329 case FSE_AZ_EV_CODE_DRIVER_EV:
1330 efx_farch_handle_driver_event(channel, &event);
1332 #ifdef CONFIG_SFC_SRIOV
1333 case FSE_CZ_EV_CODE_USER_EV:
1334 efx_siena_sriov_event(channel, &event);
1337 case FSE_CZ_EV_CODE_MCDI_EV:
1338 efx_mcdi_process_event(channel, &event);
1340 case FSE_AZ_EV_CODE_GLOBAL_EV:
1341 if (efx->type->handle_global_event &&
1342 efx->type->handle_global_event(channel, &event))
1344 /* else fall through */
1346 netif_err(channel->efx, hw, channel->efx->net_dev,
1347 "channel %d unknown event type %d (data "
1348 EFX_QWORD_FMT ")\n", channel->channel,
1349 ev_code, EFX_QWORD_VAL(event));
1354 channel->eventq_read_ptr = read_ptr;
1358 /* Allocate buffer table entries for event queue */
1359 int efx_farch_ev_probe(struct efx_channel *channel)
1361 struct efx_nic *efx = channel->efx;
1364 entries = channel->eventq_mask + 1;
1365 return efx_alloc_special_buffer(efx, &channel->eventq,
1366 entries * sizeof(efx_qword_t));
1369 int efx_farch_ev_init(struct efx_channel *channel)
1372 struct efx_nic *efx = channel->efx;
1374 netif_dbg(efx, hw, efx->net_dev,
1375 "channel %d event queue in special buffers %d-%d\n",
1376 channel->channel, channel->eventq.index,
1377 channel->eventq.index + channel->eventq.entries - 1);
1379 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1380 EFX_POPULATE_OWORD_3(reg,
1381 FRF_CZ_TIMER_Q_EN, 1,
1382 FRF_CZ_HOST_NOTIFY_MODE, 0,
1383 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1384 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1387 /* Pin event queue buffer */
1388 efx_init_special_buffer(efx, &channel->eventq);
1390 /* Fill event queue with all ones (i.e. empty events) */
1391 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1393 /* Push event queue to card */
1394 EFX_POPULATE_OWORD_3(reg,
1396 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1397 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1398 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1404 void efx_farch_ev_fini(struct efx_channel *channel)
1407 struct efx_nic *efx = channel->efx;
1409 /* Remove event queue from card */
1410 EFX_ZERO_OWORD(reg);
1411 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1413 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1414 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel);
1416 /* Unpin event queue */
1417 efx_fini_special_buffer(efx, &channel->eventq);
1420 /* Free buffers backing event queue */
1421 void efx_farch_ev_remove(struct efx_channel *channel)
1423 efx_free_special_buffer(channel->efx, &channel->eventq);
1427 void efx_farch_ev_test_generate(struct efx_channel *channel)
1429 efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
1432 void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
1434 efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
1435 EFX_CHANNEL_MAGIC_FILL(rx_queue));
1438 /**************************************************************************
1440 * Hardware interrupts
1441 * The hardware interrupt handler does very little work; all the event
1442 * queue processing is carried out by per-channel tasklets.
1444 **************************************************************************/
1446 /* Enable/disable/generate interrupts */
1447 static inline void efx_farch_interrupts(struct efx_nic *efx,
1448 bool enabled, bool force)
1450 efx_oword_t int_en_reg_ker;
1452 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1453 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1454 FRF_AZ_KER_INT_KER, force,
1455 FRF_AZ_DRV_INT_EN_KER, enabled);
1456 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1459 void efx_farch_irq_enable_master(struct efx_nic *efx)
1461 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1462 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1464 efx_farch_interrupts(efx, true, false);
1467 void efx_farch_irq_disable_master(struct efx_nic *efx)
1469 /* Disable interrupts */
1470 efx_farch_interrupts(efx, false, false);
1473 /* Generate a test interrupt
1474 * Interrupt must already have been enabled, otherwise nasty things
1477 void efx_farch_irq_test_generate(struct efx_nic *efx)
1479 efx_farch_interrupts(efx, true, true);
1482 /* Process a fatal interrupt
1483 * Disable bus mastering ASAP and schedule a reset
1485 irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
1487 struct falcon_nic_data *nic_data = efx->nic_data;
1488 efx_oword_t *int_ker = efx->irq_status.addr;
1489 efx_oword_t fatal_intr;
1490 int error, mem_perr;
1492 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1493 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1495 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1496 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1497 EFX_OWORD_VAL(fatal_intr),
1498 error ? "disabling bus mastering" : "no recognised error");
1500 /* If this is a memory parity error dump which blocks are offending */
1501 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1502 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1505 efx_reado(efx, ®, FR_AZ_MEM_STAT);
1506 netif_err(efx, hw, efx->net_dev,
1507 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1508 EFX_OWORD_VAL(reg));
1511 /* Disable both devices */
1512 pci_clear_master(efx->pci_dev);
1513 if (efx_nic_is_dual_func(efx))
1514 pci_clear_master(nic_data->pci_dev2);
1515 efx_farch_irq_disable_master(efx);
1517 /* Count errors and reset or disable the NIC accordingly */
1518 if (efx->int_error_count == 0 ||
1519 time_after(jiffies, efx->int_error_expire)) {
1520 efx->int_error_count = 0;
1521 efx->int_error_expire =
1522 jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1524 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1525 netif_err(efx, hw, efx->net_dev,
1526 "SYSTEM ERROR - reset scheduled\n");
1527 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1529 netif_err(efx, hw, efx->net_dev,
1530 "SYSTEM ERROR - max number of errors seen."
1531 "NIC will be disabled\n");
1532 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1538 /* Handle a legacy interrupt
1539 * Acknowledges the interrupt and schedule event queue processing.
1541 irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
1543 struct efx_nic *efx = dev_id;
1544 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1545 efx_oword_t *int_ker = efx->irq_status.addr;
1546 irqreturn_t result = IRQ_NONE;
1547 struct efx_channel *channel;
1552 /* Read the ISR which also ACKs the interrupts */
1553 efx_readd(efx, ®, FR_BZ_INT_ISR0);
1554 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1556 /* Legacy interrupts are disabled too late by the EEH kernel
1557 * code. Disable them earlier.
1558 * If an EEH error occurred, the read will have returned all ones.
1560 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
1561 !efx->eeh_disabled_legacy_irq) {
1562 disable_irq_nosync(efx->legacy_irq);
1563 efx->eeh_disabled_legacy_irq = true;
1566 /* Handle non-event-queue sources */
1567 if (queues & (1U << efx->irq_level) && soft_enabled) {
1568 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1569 if (unlikely(syserr))
1570 return efx_farch_fatal_interrupt(efx);
1571 efx->last_irq_cpu = raw_smp_processor_id();
1575 efx->irq_zero_count = 0;
1577 /* Schedule processing of any interrupting queues */
1578 if (likely(soft_enabled)) {
1579 efx_for_each_channel(channel, efx) {
1581 efx_schedule_channel_irq(channel);
1585 result = IRQ_HANDLED;
1590 /* Legacy ISR read can return zero once (SF bug 15783) */
1592 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1593 * because this might be a shared interrupt. */
1594 if (efx->irq_zero_count++ == 0)
1595 result = IRQ_HANDLED;
1597 /* Ensure we schedule or rearm all event queues */
1598 if (likely(soft_enabled)) {
1599 efx_for_each_channel(channel, efx) {
1600 event = efx_event(channel,
1601 channel->eventq_read_ptr);
1602 if (efx_event_present(event))
1603 efx_schedule_channel_irq(channel);
1605 efx_farch_ev_read_ack(channel);
1610 if (result == IRQ_HANDLED)
1611 netif_vdbg(efx, intr, efx->net_dev,
1612 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1613 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1618 /* Handle an MSI interrupt
1620 * Handle an MSI hardware interrupt. This routine schedules event
1621 * queue processing. No interrupt acknowledgement cycle is necessary.
1622 * Also, we never need to check that the interrupt is for us, since
1623 * MSI interrupts cannot be shared.
1625 irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
1627 struct efx_msi_context *context = dev_id;
1628 struct efx_nic *efx = context->efx;
1629 efx_oword_t *int_ker = efx->irq_status.addr;
1632 netif_vdbg(efx, intr, efx->net_dev,
1633 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1634 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1636 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
1639 /* Handle non-event-queue sources */
1640 if (context->index == efx->irq_level) {
1641 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1642 if (unlikely(syserr))
1643 return efx_farch_fatal_interrupt(efx);
1644 efx->last_irq_cpu = raw_smp_processor_id();
1647 /* Schedule processing of the channel */
1648 efx_schedule_channel_irq(efx->channel[context->index]);
1653 /* Setup RSS indirection table.
1654 * This maps from the hash value of the packet to RXQ
1656 void efx_farch_rx_push_indir_table(struct efx_nic *efx)
1661 BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0);
1663 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1664 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1666 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1667 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1668 efx->rx_indir_table[i]);
1669 efx_writed(efx, &dword,
1670 FR_BZ_RX_INDIRECTION_TBL +
1671 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1675 /* Looks at available SRAM resources and works out how many queues we
1676 * can support, and where things like descriptor caches should live.
1678 * SRAM is split up as follows:
1679 * 0 buftbl entries for channels
1680 * efx->vf_buftbl_base buftbl entries for SR-IOV
1681 * efx->rx_dc_base RX descriptor caches
1682 * efx->tx_dc_base TX descriptor caches
1684 void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
1686 unsigned vi_count, buftbl_min;
1688 #ifdef CONFIG_SFC_SRIOV
1689 struct siena_nic_data *nic_data = efx->nic_data;
1692 /* Account for the buffer table entries backing the datapath channels
1693 * and the descriptor caches for those channels.
1695 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
1696 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
1697 efx->n_channels * EFX_MAX_EVQ_SIZE)
1698 * sizeof(efx_qword_t) / EFX_BUF_SIZE);
1699 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
1701 #ifdef CONFIG_SFC_SRIOV
1702 if (efx->type->sriov_wanted) {
1703 if (efx->type->sriov_wanted(efx)) {
1704 unsigned vi_dc_entries, buftbl_free;
1705 unsigned entries_per_vf, vf_limit;
1707 nic_data->vf_buftbl_base = buftbl_min;
1709 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
1710 vi_count = max(vi_count, EFX_VI_BASE);
1711 buftbl_free = (sram_lim_qw - buftbl_min -
1712 vi_count * vi_dc_entries);
1714 entries_per_vf = ((vi_dc_entries +
1715 EFX_VF_BUFTBL_PER_VI) *
1717 vf_limit = min(buftbl_free / entries_per_vf,
1718 (1024U - EFX_VI_BASE) >> efx->vi_scale);
1720 if (efx->vf_count > vf_limit) {
1721 netif_err(efx, probe, efx->net_dev,
1722 "Reducing VF count from from %d to %d\n",
1723 efx->vf_count, vf_limit);
1724 efx->vf_count = vf_limit;
1726 vi_count += efx->vf_count * efx_vf_size(efx);
1731 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1732 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1735 u32 efx_farch_fpga_ver(struct efx_nic *efx)
1737 efx_oword_t altera_build;
1738 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1739 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1742 void efx_farch_init_common(struct efx_nic *efx)
1746 /* Set positions of descriptor caches in SRAM. */
1747 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1748 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1749 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1750 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1752 /* Set TX descriptor cache size. */
1753 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1754 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1755 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1757 /* Set RX descriptor cache size. Set low watermark to size-8, as
1758 * this allows most efficient prefetching.
1760 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1761 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1762 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1763 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1764 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1766 /* Program INT_KER address */
1767 EFX_POPULATE_OWORD_2(temp,
1768 FRF_AZ_NORM_INT_VEC_DIS_KER,
1769 EFX_INT_MODE_USE_MSI(efx),
1770 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1771 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1773 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1774 /* Use an interrupt level unused by event queues */
1775 efx->irq_level = 0x1f;
1777 /* Use a valid MSI-X vector */
1780 /* Enable all the genuinely fatal interrupts. (They are still
1781 * masked by the overall interrupt mask, controlled by
1782 * falcon_interrupts()).
1784 * Note: All other fatal interrupts are enabled
1786 EFX_POPULATE_OWORD_3(temp,
1787 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1788 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1789 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1790 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1791 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1792 EFX_INVERT_OWORD(temp);
1793 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1795 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1796 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1798 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1799 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1800 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1801 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1802 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1803 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1804 /* Enable SW_EV to inherit in char driver - assume harmless here */
1805 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1806 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1807 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1808 /* Disable hardware watchdog which can misfire */
1809 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1810 /* Squash TX of packets of 16 bytes or less */
1811 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1812 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1813 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1815 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1816 EFX_POPULATE_OWORD_4(temp,
1817 /* Default values */
1818 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1819 FRF_BZ_TX_PACE_SB_AF, 0xb,
1820 FRF_BZ_TX_PACE_FB_BASE, 0,
1821 /* Allow large pace values in the
1823 FRF_BZ_TX_PACE_BIN_TH,
1824 FFE_BZ_TX_PACE_RESERVED);
1825 efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1829 /**************************************************************************
1833 **************************************************************************
1836 /* "Fudge factors" - difference between programmed value and actual depth.
1837 * Due to pipelined implementation we need to program H/W with a value that
1838 * is larger than the hop limit we want.
1840 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1841 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1843 /* Hard maximum search limit. Hardware will time-out beyond 200-something.
1844 * We also need to avoid infinite loops in efx_farch_filter_search() when the
1847 #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
1849 /* Don't try very hard to find space for performance hints, as this is
1850 * counter-productive. */
1851 #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1853 enum efx_farch_filter_type {
1854 EFX_FARCH_FILTER_TCP_FULL = 0,
1855 EFX_FARCH_FILTER_TCP_WILD,
1856 EFX_FARCH_FILTER_UDP_FULL,
1857 EFX_FARCH_FILTER_UDP_WILD,
1858 EFX_FARCH_FILTER_MAC_FULL = 4,
1859 EFX_FARCH_FILTER_MAC_WILD,
1860 EFX_FARCH_FILTER_UC_DEF = 8,
1861 EFX_FARCH_FILTER_MC_DEF,
1862 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
1865 enum efx_farch_filter_table_id {
1866 EFX_FARCH_FILTER_TABLE_RX_IP = 0,
1867 EFX_FARCH_FILTER_TABLE_RX_MAC,
1868 EFX_FARCH_FILTER_TABLE_RX_DEF,
1869 EFX_FARCH_FILTER_TABLE_TX_MAC,
1870 EFX_FARCH_FILTER_TABLE_COUNT,
1873 enum efx_farch_filter_index {
1874 EFX_FARCH_FILTER_INDEX_UC_DEF,
1875 EFX_FARCH_FILTER_INDEX_MC_DEF,
1876 EFX_FARCH_FILTER_SIZE_RX_DEF,
1879 struct efx_farch_filter_spec {
1887 struct efx_farch_filter_table {
1888 enum efx_farch_filter_table_id id;
1889 u32 offset; /* address of table relative to BAR */
1890 unsigned size; /* number of entries */
1891 unsigned step; /* step between entries */
1892 unsigned used; /* number currently used */
1893 unsigned long *used_bitmap;
1894 struct efx_farch_filter_spec *spec;
1895 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
1898 struct efx_farch_filter_state {
1899 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
1903 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
1904 struct efx_farch_filter_table *table,
1905 unsigned int filter_idx);
1907 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1908 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1909 static u16 efx_farch_filter_hash(u32 key)
1913 /* First 16 rounds */
1914 tmp = 0x1fff ^ key >> 16;
1915 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1916 tmp = tmp ^ tmp >> 9;
1917 /* Last 16 rounds */
1918 tmp = tmp ^ tmp << 13 ^ key;
1919 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1920 return tmp ^ tmp >> 9;
1923 /* To allow for hash collisions, filter search continues at these
1924 * increments from the first possible entry selected by the hash. */
1925 static u16 efx_farch_filter_increment(u32 key)
1930 static enum efx_farch_filter_table_id
1931 efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
1933 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1934 (EFX_FARCH_FILTER_TCP_FULL >> 2));
1935 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1936 (EFX_FARCH_FILTER_TCP_WILD >> 2));
1937 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1938 (EFX_FARCH_FILTER_UDP_FULL >> 2));
1939 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
1940 (EFX_FARCH_FILTER_UDP_WILD >> 2));
1941 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1942 (EFX_FARCH_FILTER_MAC_FULL >> 2));
1943 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
1944 (EFX_FARCH_FILTER_MAC_WILD >> 2));
1945 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
1946 EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
1947 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
1950 static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
1952 struct efx_farch_filter_state *state = efx->filter_state;
1953 struct efx_farch_filter_table *table;
1954 efx_oword_t filter_ctl;
1956 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1958 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1959 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1960 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
1961 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1962 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1963 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
1964 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1965 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1966 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
1967 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1968 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1969 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
1970 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1972 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1974 EFX_SET_OWORD_FIELD(
1975 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1976 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
1977 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1978 EFX_SET_OWORD_FIELD(
1979 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1980 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
1981 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1984 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1986 EFX_SET_OWORD_FIELD(
1987 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1988 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1989 EFX_SET_OWORD_FIELD(
1990 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1991 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
1992 EFX_FILTER_FLAG_RX_RSS));
1993 EFX_SET_OWORD_FIELD(
1994 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1995 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1996 EFX_SET_OWORD_FIELD(
1997 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1998 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
1999 EFX_FILTER_FLAG_RX_RSS));
2001 /* There is a single bit to enable RX scatter for all
2002 * unmatched packets. Only set it if scatter is
2003 * enabled in both filter specs.
2005 EFX_SET_OWORD_FIELD(
2006 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
2007 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
2008 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
2009 EFX_FILTER_FLAG_RX_SCATTER));
2010 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2011 /* We don't expose 'default' filters because unmatched
2012 * packets always go to the queue number found in the
2013 * RSS table. But we still need to set the RX scatter
2016 EFX_SET_OWORD_FIELD(
2017 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
2021 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
2024 static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
2026 struct efx_farch_filter_state *state = efx->filter_state;
2027 struct efx_farch_filter_table *table;
2030 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
2032 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2034 EFX_SET_OWORD_FIELD(
2035 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
2036 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
2037 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
2038 EFX_SET_OWORD_FIELD(
2039 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
2040 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
2041 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
2044 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
2048 efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
2049 const struct efx_filter_spec *gen_spec)
2051 bool is_full = false;
2053 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
2054 gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
2057 spec->priority = gen_spec->priority;
2058 spec->flags = gen_spec->flags;
2059 spec->dmaq_id = gen_spec->dmaq_id;
2061 switch (gen_spec->match_flags) {
2062 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2063 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
2064 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
2067 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
2068 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
2069 __be32 rhost, host1, host2;
2070 __be16 rport, port1, port2;
2072 EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
2074 if (gen_spec->ether_type != htons(ETH_P_IP))
2075 return -EPROTONOSUPPORT;
2076 if (gen_spec->loc_port == 0 ||
2077 (is_full && gen_spec->rem_port == 0))
2078 return -EADDRNOTAVAIL;
2079 switch (gen_spec->ip_proto) {
2081 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
2082 EFX_FARCH_FILTER_TCP_WILD);
2085 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
2086 EFX_FARCH_FILTER_UDP_WILD);
2089 return -EPROTONOSUPPORT;
2092 /* Filter is constructed in terms of source and destination,
2093 * with the odd wrinkle that the ports are swapped in a UDP
2094 * wildcard filter. We need to convert from local and remote
2095 * (= zero for wildcard) addresses.
2097 rhost = is_full ? gen_spec->rem_host[0] : 0;
2098 rport = is_full ? gen_spec->rem_port : 0;
2100 host2 = gen_spec->loc_host[0];
2101 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2102 port1 = gen_spec->loc_port;
2106 port2 = gen_spec->loc_port;
2108 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2109 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2110 spec->data[2] = ntohl(host2);
2115 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
2118 case EFX_FILTER_MATCH_LOC_MAC:
2119 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
2120 EFX_FARCH_FILTER_MAC_WILD);
2121 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2122 spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2123 gen_spec->loc_mac[3] << 16 |
2124 gen_spec->loc_mac[4] << 8 |
2125 gen_spec->loc_mac[5]);
2126 spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2127 gen_spec->loc_mac[1]);
2130 case EFX_FILTER_MATCH_LOC_MAC_IG:
2131 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2132 EFX_FARCH_FILTER_MC_DEF :
2133 EFX_FARCH_FILTER_UC_DEF);
2134 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2138 return -EPROTONOSUPPORT;
2145 efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
2146 const struct efx_farch_filter_spec *spec)
2148 bool is_full = false;
2150 /* *gen_spec should be completely initialised, to be consistent
2151 * with efx_filter_init_{rx,tx}() and in case we want to copy
2152 * it back to userland.
2154 memset(gen_spec, 0, sizeof(*gen_spec));
2156 gen_spec->priority = spec->priority;
2157 gen_spec->flags = spec->flags;
2158 gen_spec->dmaq_id = spec->dmaq_id;
2160 switch (spec->type) {
2161 case EFX_FARCH_FILTER_TCP_FULL:
2162 case EFX_FARCH_FILTER_UDP_FULL:
2165 case EFX_FARCH_FILTER_TCP_WILD:
2166 case EFX_FARCH_FILTER_UDP_WILD: {
2167 __be32 host1, host2;
2168 __be16 port1, port2;
2170 gen_spec->match_flags =
2171 EFX_FILTER_MATCH_ETHER_TYPE |
2172 EFX_FILTER_MATCH_IP_PROTO |
2173 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
2175 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
2176 EFX_FILTER_MATCH_REM_PORT);
2177 gen_spec->ether_type = htons(ETH_P_IP);
2178 gen_spec->ip_proto =
2179 (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
2180 spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
2181 IPPROTO_TCP : IPPROTO_UDP;
2183 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2184 port1 = htons(spec->data[0]);
2185 host2 = htonl(spec->data[2]);
2186 port2 = htons(spec->data[1] >> 16);
2187 if (spec->flags & EFX_FILTER_FLAG_TX) {
2188 gen_spec->loc_host[0] = host1;
2189 gen_spec->rem_host[0] = host2;
2191 gen_spec->loc_host[0] = host2;
2192 gen_spec->rem_host[0] = host1;
2194 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
2195 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2196 gen_spec->loc_port = port1;
2197 gen_spec->rem_port = port2;
2199 gen_spec->loc_port = port2;
2200 gen_spec->rem_port = port1;
2206 case EFX_FARCH_FILTER_MAC_FULL:
2209 case EFX_FARCH_FILTER_MAC_WILD:
2210 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
2212 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
2213 gen_spec->loc_mac[0] = spec->data[2] >> 8;
2214 gen_spec->loc_mac[1] = spec->data[2];
2215 gen_spec->loc_mac[2] = spec->data[1] >> 24;
2216 gen_spec->loc_mac[3] = spec->data[1] >> 16;
2217 gen_spec->loc_mac[4] = spec->data[1] >> 8;
2218 gen_spec->loc_mac[5] = spec->data[1];
2219 gen_spec->outer_vid = htons(spec->data[0]);
2222 case EFX_FARCH_FILTER_UC_DEF:
2223 case EFX_FARCH_FILTER_MC_DEF:
2224 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
2225 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
2235 efx_farch_filter_init_rx_auto(struct efx_nic *efx,
2236 struct efx_farch_filter_spec *spec)
2238 /* If there's only one channel then disable RSS for non VF
2239 * traffic, thereby allowing VFs to use RSS when the PF can't.
2241 spec->priority = EFX_FILTER_PRI_AUTO;
2242 spec->flags = (EFX_FILTER_FLAG_RX |
2243 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
2244 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
2248 /* Build a filter entry and return its n-tuple key. */
2249 static u32 efx_farch_filter_build(efx_oword_t *filter,
2250 struct efx_farch_filter_spec *spec)
2254 switch (efx_farch_filter_spec_table_id(spec)) {
2255 case EFX_FARCH_FILTER_TABLE_RX_IP: {
2256 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
2257 spec->type == EFX_FARCH_FILTER_UDP_WILD);
2258 EFX_POPULATE_OWORD_7(
2261 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2263 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2264 FRF_BZ_TCP_UDP, is_udp,
2265 FRF_BZ_RXQ_ID, spec->dmaq_id,
2266 EFX_DWORD_2, spec->data[2],
2267 EFX_DWORD_1, spec->data[1],
2268 EFX_DWORD_0, spec->data[0]);
2273 case EFX_FARCH_FILTER_TABLE_RX_MAC: {
2274 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2275 EFX_POPULATE_OWORD_7(
2278 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
2279 FRF_CZ_RMFT_SCATTER_EN,
2280 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
2281 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2282 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2283 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2284 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2285 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2290 case EFX_FARCH_FILTER_TABLE_TX_MAC: {
2291 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
2292 EFX_POPULATE_OWORD_5(*filter,
2293 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2294 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2295 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2296 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2297 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2298 data3 = is_wild | spec->dmaq_id << 1;
2306 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2309 static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
2310 const struct efx_farch_filter_spec *right)
2312 if (left->type != right->type ||
2313 memcmp(left->data, right->data, sizeof(left->data)))
2316 if (left->flags & EFX_FILTER_FLAG_TX &&
2317 left->dmaq_id != right->dmaq_id)
2324 * Construct/deconstruct external filter IDs. At least the RX filter
2325 * IDs must be ordered by matching priority, for RX NFC semantics.
2327 * Deconstruction needs to be robust against invalid IDs so that
2328 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
2329 * accept user-provided IDs.
2332 #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
2334 static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
2335 [EFX_FARCH_FILTER_TCP_FULL] = 0,
2336 [EFX_FARCH_FILTER_UDP_FULL] = 0,
2337 [EFX_FARCH_FILTER_TCP_WILD] = 1,
2338 [EFX_FARCH_FILTER_UDP_WILD] = 1,
2339 [EFX_FARCH_FILTER_MAC_FULL] = 2,
2340 [EFX_FARCH_FILTER_MAC_WILD] = 3,
2341 [EFX_FARCH_FILTER_UC_DEF] = 4,
2342 [EFX_FARCH_FILTER_MC_DEF] = 4,
2345 static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
2346 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
2347 EFX_FARCH_FILTER_TABLE_RX_IP,
2348 EFX_FARCH_FILTER_TABLE_RX_MAC,
2349 EFX_FARCH_FILTER_TABLE_RX_MAC,
2350 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
2351 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
2352 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
2355 #define EFX_FARCH_FILTER_INDEX_WIDTH 13
2356 #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
2359 efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
2364 range = efx_farch_filter_type_match_pri[spec->type];
2365 if (!(spec->flags & EFX_FILTER_FLAG_RX))
2366 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
2368 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
2371 static inline enum efx_farch_filter_table_id
2372 efx_farch_filter_id_table_id(u32 id)
2374 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
2376 if (range < ARRAY_SIZE(efx_farch_filter_range_table))
2377 return efx_farch_filter_range_table[range];
2379 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
2382 static inline unsigned int efx_farch_filter_id_index(u32 id)
2384 return id & EFX_FARCH_FILTER_INDEX_MASK;
2387 u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
2389 struct efx_farch_filter_state *state = efx->filter_state;
2390 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2391 enum efx_farch_filter_table_id table_id;
2394 table_id = efx_farch_filter_range_table[range];
2395 if (state->table[table_id].size != 0)
2396 return range << EFX_FARCH_FILTER_INDEX_WIDTH |
2397 state->table[table_id].size;
2403 s32 efx_farch_filter_insert(struct efx_nic *efx,
2404 struct efx_filter_spec *gen_spec,
2407 struct efx_farch_filter_state *state = efx->filter_state;
2408 struct efx_farch_filter_table *table;
2409 struct efx_farch_filter_spec spec;
2411 int rep_index, ins_index;
2412 unsigned int depth = 0;
2415 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
2419 table = &state->table[efx_farch_filter_spec_table_id(&spec)];
2420 if (table->size == 0)
2423 netif_vdbg(efx, hw, efx->net_dev,
2424 "%s: type %d search_limit=%d", __func__, spec.type,
2425 table->search_limit[spec.type]);
2427 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2428 /* One filter spec per type */
2429 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
2430 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
2431 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
2432 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
2433 ins_index = rep_index;
2435 spin_lock_bh(&efx->filter_lock);
2437 /* Search concurrently for
2438 * (1) a filter to be replaced (rep_index): any filter
2439 * with the same match values, up to the current
2440 * search depth for this type, and
2441 * (2) the insertion point (ins_index): (1) or any
2442 * free slot before it or up to the maximum search
2443 * depth for this priority
2444 * We fail if we cannot find (2).
2446 * We can stop once either
2447 * (a) we find (1), in which case we have definitely
2448 * found (2) as well; or
2449 * (b) we have searched exhaustively for (1), and have
2450 * either found (2) or searched exhaustively for it
2452 u32 key = efx_farch_filter_build(&filter, &spec);
2453 unsigned int hash = efx_farch_filter_hash(key);
2454 unsigned int incr = efx_farch_filter_increment(key);
2455 unsigned int max_rep_depth = table->search_limit[spec.type];
2456 unsigned int max_ins_depth =
2457 spec.priority <= EFX_FILTER_PRI_HINT ?
2458 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2459 EFX_FARCH_FILTER_CTL_SRCH_MAX;
2460 unsigned int i = hash & (table->size - 1);
2465 spin_lock_bh(&efx->filter_lock);
2468 if (!test_bit(i, table->used_bitmap)) {
2471 } else if (efx_farch_filter_equal(&spec,
2480 if (depth >= max_rep_depth &&
2481 (ins_index >= 0 || depth >= max_ins_depth)) {
2483 if (ins_index < 0) {
2491 i = (i + incr) & (table->size - 1);
2496 /* If we found a filter to be replaced, check whether we
2499 if (rep_index >= 0) {
2500 struct efx_farch_filter_spec *saved_spec =
2501 &table->spec[rep_index];
2503 if (spec.priority == saved_spec->priority && !replace_equal) {
2507 if (spec.priority < saved_spec->priority) {
2511 if (saved_spec->priority == EFX_FILTER_PRI_AUTO ||
2512 saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO)
2513 spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
2516 /* Insert the filter */
2517 if (ins_index != rep_index) {
2518 __set_bit(ins_index, table->used_bitmap);
2521 table->spec[ins_index] = spec;
2523 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
2524 efx_farch_filter_push_rx_config(efx);
2526 if (table->search_limit[spec.type] < depth) {
2527 table->search_limit[spec.type] = depth;
2528 if (spec.flags & EFX_FILTER_FLAG_TX)
2529 efx_farch_filter_push_tx_limits(efx);
2531 efx_farch_filter_push_rx_config(efx);
2534 efx_writeo(efx, &filter,
2535 table->offset + table->step * ins_index);
2537 /* If we were able to replace a filter by inserting
2538 * at a lower depth, clear the replaced filter
2540 if (ins_index != rep_index && rep_index >= 0)
2541 efx_farch_filter_table_clear_entry(efx, table,
2545 netif_vdbg(efx, hw, efx->net_dev,
2546 "%s: filter type %d index %d rxq %u set",
2547 __func__, spec.type, ins_index, spec.dmaq_id);
2548 rc = efx_farch_filter_make_id(&spec, ins_index);
2551 spin_unlock_bh(&efx->filter_lock);
2556 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
2557 struct efx_farch_filter_table *table,
2558 unsigned int filter_idx)
2560 static efx_oword_t filter;
2562 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2563 BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2565 __clear_bit(filter_idx, table->used_bitmap);
2567 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2569 efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
2571 /* If this filter required a greater search depth than
2572 * any other, the search limit for its type can now be
2573 * decreased. However, it is hard to determine that
2574 * unless the table has become completely empty - in
2575 * which case, all its search limits can be set to 0.
2577 if (unlikely(table->used == 0)) {
2578 memset(table->search_limit, 0, sizeof(table->search_limit));
2579 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
2580 efx_farch_filter_push_tx_limits(efx);
2582 efx_farch_filter_push_rx_config(efx);
2586 static int efx_farch_filter_remove(struct efx_nic *efx,
2587 struct efx_farch_filter_table *table,
2588 unsigned int filter_idx,
2589 enum efx_filter_priority priority)
2591 struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
2593 if (!test_bit(filter_idx, table->used_bitmap) ||
2594 spec->priority != priority)
2597 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
2598 efx_farch_filter_init_rx_auto(efx, spec);
2599 efx_farch_filter_push_rx_config(efx);
2601 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
2607 int efx_farch_filter_remove_safe(struct efx_nic *efx,
2608 enum efx_filter_priority priority,
2611 struct efx_farch_filter_state *state = efx->filter_state;
2612 enum efx_farch_filter_table_id table_id;
2613 struct efx_farch_filter_table *table;
2614 unsigned int filter_idx;
2615 struct efx_farch_filter_spec *spec;
2618 table_id = efx_farch_filter_id_table_id(filter_id);
2619 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2621 table = &state->table[table_id];
2623 filter_idx = efx_farch_filter_id_index(filter_id);
2624 if (filter_idx >= table->size)
2626 spec = &table->spec[filter_idx];
2628 spin_lock_bh(&efx->filter_lock);
2629 rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
2630 spin_unlock_bh(&efx->filter_lock);
2635 int efx_farch_filter_get_safe(struct efx_nic *efx,
2636 enum efx_filter_priority priority,
2637 u32 filter_id, struct efx_filter_spec *spec_buf)
2639 struct efx_farch_filter_state *state = efx->filter_state;
2640 enum efx_farch_filter_table_id table_id;
2641 struct efx_farch_filter_table *table;
2642 struct efx_farch_filter_spec *spec;
2643 unsigned int filter_idx;
2646 table_id = efx_farch_filter_id_table_id(filter_id);
2647 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
2649 table = &state->table[table_id];
2651 filter_idx = efx_farch_filter_id_index(filter_id);
2652 if (filter_idx >= table->size)
2654 spec = &table->spec[filter_idx];
2656 spin_lock_bh(&efx->filter_lock);
2658 if (test_bit(filter_idx, table->used_bitmap) &&
2659 spec->priority == priority) {
2660 efx_farch_filter_to_gen_spec(spec_buf, spec);
2666 spin_unlock_bh(&efx->filter_lock);
2672 efx_farch_filter_table_clear(struct efx_nic *efx,
2673 enum efx_farch_filter_table_id table_id,
2674 enum efx_filter_priority priority)
2676 struct efx_farch_filter_state *state = efx->filter_state;
2677 struct efx_farch_filter_table *table = &state->table[table_id];
2678 unsigned int filter_idx;
2680 spin_lock_bh(&efx->filter_lock);
2681 for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
2682 if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO)
2683 efx_farch_filter_remove(efx, table,
2684 filter_idx, priority);
2686 spin_unlock_bh(&efx->filter_lock);
2689 int efx_farch_filter_clear_rx(struct efx_nic *efx,
2690 enum efx_filter_priority priority)
2692 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
2694 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
2696 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
2701 u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
2702 enum efx_filter_priority priority)
2704 struct efx_farch_filter_state *state = efx->filter_state;
2705 enum efx_farch_filter_table_id table_id;
2706 struct efx_farch_filter_table *table;
2707 unsigned int filter_idx;
2710 spin_lock_bh(&efx->filter_lock);
2712 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2713 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2715 table = &state->table[table_id];
2716 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2717 if (test_bit(filter_idx, table->used_bitmap) &&
2718 table->spec[filter_idx].priority == priority)
2723 spin_unlock_bh(&efx->filter_lock);
2728 s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
2729 enum efx_filter_priority priority,
2732 struct efx_farch_filter_state *state = efx->filter_state;
2733 enum efx_farch_filter_table_id table_id;
2734 struct efx_farch_filter_table *table;
2735 unsigned int filter_idx;
2738 spin_lock_bh(&efx->filter_lock);
2740 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2741 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2743 table = &state->table[table_id];
2744 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2745 if (test_bit(filter_idx, table->used_bitmap) &&
2746 table->spec[filter_idx].priority == priority) {
2747 if (count == size) {
2751 buf[count++] = efx_farch_filter_make_id(
2752 &table->spec[filter_idx], filter_idx);
2757 spin_unlock_bh(&efx->filter_lock);
2762 /* Restore filter stater after reset */
2763 void efx_farch_filter_table_restore(struct efx_nic *efx)
2765 struct efx_farch_filter_state *state = efx->filter_state;
2766 enum efx_farch_filter_table_id table_id;
2767 struct efx_farch_filter_table *table;
2769 unsigned int filter_idx;
2771 spin_lock_bh(&efx->filter_lock);
2773 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2774 table = &state->table[table_id];
2776 /* Check whether this is a regular register table */
2777 if (table->step == 0)
2780 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2781 if (!test_bit(filter_idx, table->used_bitmap))
2783 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2784 efx_writeo(efx, &filter,
2785 table->offset + table->step * filter_idx);
2789 efx_farch_filter_push_rx_config(efx);
2790 efx_farch_filter_push_tx_limits(efx);
2792 spin_unlock_bh(&efx->filter_lock);
2795 void efx_farch_filter_table_remove(struct efx_nic *efx)
2797 struct efx_farch_filter_state *state = efx->filter_state;
2798 enum efx_farch_filter_table_id table_id;
2800 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2801 kfree(state->table[table_id].used_bitmap);
2802 vfree(state->table[table_id].spec);
2807 int efx_farch_filter_table_probe(struct efx_nic *efx)
2809 struct efx_farch_filter_state *state;
2810 struct efx_farch_filter_table *table;
2813 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
2816 efx->filter_state = state;
2818 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2819 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2820 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
2821 table->offset = FR_BZ_RX_FILTER_TBL0;
2822 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2823 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2826 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
2827 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
2828 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
2829 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
2830 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
2831 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
2833 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2834 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
2835 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
2837 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
2838 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
2839 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
2840 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
2841 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
2844 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
2845 table = &state->table[table_id];
2846 if (table->size == 0)
2848 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2849 sizeof(unsigned long),
2851 if (!table->used_bitmap)
2853 table->spec = vzalloc(table->size * sizeof(*table->spec));
2858 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
2860 /* RX default filters must always exist */
2861 struct efx_farch_filter_spec *spec;
2864 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
2865 spec = &table->spec[i];
2866 spec->type = EFX_FARCH_FILTER_UC_DEF + i;
2867 efx_farch_filter_init_rx_auto(efx, spec);
2868 __set_bit(i, table->used_bitmap);
2872 efx_farch_filter_push_rx_config(efx);
2877 efx_farch_filter_table_remove(efx);
2881 /* Update scatter enable flags for filters pointing to our own RX queues */
2882 void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
2884 struct efx_farch_filter_state *state = efx->filter_state;
2885 enum efx_farch_filter_table_id table_id;
2886 struct efx_farch_filter_table *table;
2888 unsigned int filter_idx;
2890 spin_lock_bh(&efx->filter_lock);
2892 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
2893 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
2895 table = &state->table[table_id];
2897 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2898 if (!test_bit(filter_idx, table->used_bitmap) ||
2899 table->spec[filter_idx].dmaq_id >=
2903 if (efx->rx_scatter)
2904 table->spec[filter_idx].flags |=
2905 EFX_FILTER_FLAG_RX_SCATTER;
2907 table->spec[filter_idx].flags &=
2908 ~EFX_FILTER_FLAG_RX_SCATTER;
2910 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
2911 /* Pushed by efx_farch_filter_push_rx_config() */
2914 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
2915 efx_writeo(efx, &filter,
2916 table->offset + table->step * filter_idx);
2920 efx_farch_filter_push_rx_config(efx);
2922 spin_unlock_bh(&efx->filter_lock);
2925 #ifdef CONFIG_RFS_ACCEL
2927 s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
2928 struct efx_filter_spec *gen_spec)
2930 return efx_farch_filter_insert(efx, gen_spec, true);
2933 bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2936 struct efx_farch_filter_state *state = efx->filter_state;
2937 struct efx_farch_filter_table *table =
2938 &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2940 if (test_bit(index, table->used_bitmap) &&
2941 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
2942 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2944 efx_farch_filter_table_clear_entry(efx, table, index);
2951 #endif /* CONFIG_RFS_ACCEL */
2953 void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2955 struct net_device *net_dev = efx->net_dev;
2956 struct netdev_hw_addr *ha;
2957 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2961 if (!efx_dev_registered(efx))
2964 netif_addr_lock_bh(net_dev);
2966 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2968 /* Build multicast hash table */
2969 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2970 memset(mc_hash, 0xff, sizeof(*mc_hash));
2972 memset(mc_hash, 0x00, sizeof(*mc_hash));
2973 netdev_for_each_mc_addr(ha, net_dev) {
2974 crc = ether_crc_le(ETH_ALEN, ha->addr);
2975 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
2976 __set_bit_le(bit, mc_hash);
2979 /* Broadcast packets go through the multicast hash filter.
2980 * ether_crc_le() of the broadcast address is 0xbe2612ff
2981 * so we always add bit 0xff to the mask.
2983 __set_bit_le(0xff, mc_hash);
2986 netif_addr_unlock_bh(net_dev);