1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
16 #include "farch_regs.h"
18 /* "Fudge factors" - difference between programmed value and actual depth.
19 * Due to pipelined implementation we need to program H/W with a value that
20 * is larger than the hop limit we want.
22 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
23 #define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
25 /* Hard maximum hop limit. Hardware will time-out beyond 200-something.
26 * We also need to avoid infinite loops in efx_farch_filter_search() when the
29 #define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
31 /* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33 #define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
35 enum efx_farch_filter_table_id {
36 EFX_FARCH_FILTER_TABLE_RX_IP = 0,
37 EFX_FARCH_FILTER_TABLE_RX_MAC,
38 EFX_FARCH_FILTER_TABLE_RX_DEF,
39 EFX_FARCH_FILTER_TABLE_TX_MAC,
40 EFX_FARCH_FILTER_TABLE_COUNT,
43 enum efx_farch_filter_index {
44 EFX_FARCH_FILTER_INDEX_UC_DEF,
45 EFX_FARCH_FILTER_INDEX_MC_DEF,
46 EFX_FARCH_FILTER_SIZE_RX_DEF,
49 struct efx_farch_filter_spec {
57 struct efx_farch_filter_table {
58 enum efx_farch_filter_table_id id;
59 u32 offset; /* address of table relative to BAR */
60 unsigned size; /* number of entries */
61 unsigned step; /* step between entries */
62 unsigned used; /* number currently used */
63 unsigned long *used_bitmap;
64 struct efx_farch_filter_spec *spec;
65 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
68 struct efx_filter_state {
70 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
71 #ifdef CONFIG_RFS_ACCEL
73 unsigned rps_expire_index;
78 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
79 struct efx_farch_filter_table *table,
80 unsigned int filter_idx);
82 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
83 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
84 static u16 efx_farch_filter_hash(u32 key)
89 tmp = 0x1fff ^ key >> 16;
90 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
93 tmp = tmp ^ tmp << 13 ^ key;
94 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
95 return tmp ^ tmp >> 9;
98 /* To allow for hash collisions, filter search continues at these
99 * increments from the first possible entry selected by the hash. */
100 static u16 efx_farch_filter_increment(u32 key)
105 static enum efx_farch_filter_table_id
106 efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
108 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
109 (EFX_FILTER_TCP_FULL >> 2));
110 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
111 (EFX_FILTER_TCP_WILD >> 2));
112 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
113 (EFX_FILTER_UDP_FULL >> 2));
114 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
115 (EFX_FILTER_UDP_WILD >> 2));
116 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
117 (EFX_FILTER_MAC_FULL >> 2));
118 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
119 (EFX_FILTER_MAC_WILD >> 2));
120 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
121 EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
122 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
123 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
126 static struct efx_farch_filter_table *
127 efx_farch_filter_spec_table(struct efx_filter_state *state,
128 const struct efx_farch_filter_spec *spec)
130 if (spec->type == EFX_FILTER_UNSPEC)
133 return &state->table[efx_farch_filter_spec_table_id(spec)];
137 efx_farch_filter_table_reset_search_depth(struct efx_farch_filter_table *table)
139 memset(table->search_depth, 0, sizeof(table->search_depth));
142 static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
144 struct efx_filter_state *state = efx->filter_state;
145 struct efx_farch_filter_table *table;
146 efx_oword_t filter_ctl;
148 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
150 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
151 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
152 table->search_depth[EFX_FILTER_TCP_FULL] +
153 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
154 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
155 table->search_depth[EFX_FILTER_TCP_WILD] +
156 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
157 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
158 table->search_depth[EFX_FILTER_UDP_FULL] +
159 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
160 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
161 table->search_depth[EFX_FILTER_UDP_WILD] +
162 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
164 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
167 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
168 table->search_depth[EFX_FILTER_MAC_FULL] +
169 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
171 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
172 table->search_depth[EFX_FILTER_MAC_WILD] +
173 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
176 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
179 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
180 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
182 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
183 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
184 EFX_FILTER_FLAG_RX_RSS));
186 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
187 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
189 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
190 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
191 EFX_FILTER_FLAG_RX_RSS));
193 /* There is a single bit to enable RX scatter for all
194 * unmatched packets. Only set it if scatter is
195 * enabled in both filter specs.
198 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
199 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
200 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
201 EFX_FILTER_FLAG_RX_SCATTER));
202 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
203 /* We don't expose 'default' filters because unmatched
204 * packets always go to the queue number found in the
205 * RSS table. But we still need to set the RX scatter
209 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
213 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
216 static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
218 struct efx_filter_state *state = efx->filter_state;
219 struct efx_farch_filter_table *table;
222 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
224 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
227 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
228 table->search_depth[EFX_FILTER_MAC_FULL] +
229 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
231 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
232 table->search_depth[EFX_FILTER_MAC_WILD] +
233 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
236 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
239 static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
240 __be32 host1, __be16 port1,
241 __be32 host2, __be16 port2)
243 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
244 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
245 spec->data[2] = ntohl(host2);
248 static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
249 __be32 *host1, __be16 *port1,
250 __be32 *host2, __be16 *port2)
252 *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
253 *port1 = htons(spec->data[0]);
254 *host2 = htonl(spec->data[2]);
255 *port2 = htons(spec->data[1] >> 16);
259 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
260 * @spec: Specification to initialise
261 * @proto: Transport layer protocol number
262 * @host: Local host address (network byte order)
263 * @port: Local port (network byte order)
265 int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
266 __be32 host, __be16 port)
271 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
273 /* This cannot currently be combined with other filtering */
274 if (spec->type != EFX_FILTER_UNSPEC)
275 return -EPROTONOSUPPORT;
282 spec->type = EFX_FILTER_TCP_WILD;
285 spec->type = EFX_FILTER_UDP_WILD;
288 return -EPROTONOSUPPORT;
291 /* Filter is constructed in terms of source and destination,
292 * with the odd wrinkle that the ports are swapped in a UDP
293 * wildcard filter. We need to convert from local and remote
294 * (= zero for wildcard) addresses.
297 if (proto != IPPROTO_UDP) {
304 __efx_filter_set_ipv4(spec, host1, port1, host, port);
308 int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
309 u8 *proto, __be32 *host, __be16 *port)
314 switch (spec->type) {
315 case EFX_FILTER_TCP_WILD:
316 *proto = IPPROTO_TCP;
317 __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
319 case EFX_FILTER_UDP_WILD:
320 *proto = IPPROTO_UDP;
321 __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
329 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
330 * @spec: Specification to initialise
331 * @proto: Transport layer protocol number
332 * @host: Local host address (network byte order)
333 * @port: Local port (network byte order)
334 * @rhost: Remote host address (network byte order)
335 * @rport: Remote port (network byte order)
337 int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
338 __be32 host, __be16 port,
339 __be32 rhost, __be16 rport)
341 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
343 /* This cannot currently be combined with other filtering */
344 if (spec->type != EFX_FILTER_UNSPEC)
345 return -EPROTONOSUPPORT;
347 if (port == 0 || rport == 0)
352 spec->type = EFX_FILTER_TCP_FULL;
355 spec->type = EFX_FILTER_UDP_FULL;
358 return -EPROTONOSUPPORT;
361 __efx_filter_set_ipv4(spec, rhost, rport, host, port);
365 int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
366 u8 *proto, __be32 *host, __be16 *port,
367 __be32 *rhost, __be16 *rport)
369 switch (spec->type) {
370 case EFX_FILTER_TCP_FULL:
371 *proto = IPPROTO_TCP;
373 case EFX_FILTER_UDP_FULL:
374 *proto = IPPROTO_UDP;
380 __efx_filter_get_ipv4(spec, rhost, rport, host, port);
385 * efx_filter_set_eth_local - specify local Ethernet address and optional VID
386 * @spec: Specification to initialise
387 * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
388 * @addr: Local Ethernet MAC address
390 int efx_filter_set_eth_local(struct efx_filter_spec *spec,
391 u16 vid, const u8 *addr)
393 EFX_BUG_ON_PARANOID(!(spec->flags &
394 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
396 /* This cannot currently be combined with other filtering */
397 if (spec->type != EFX_FILTER_UNSPEC)
398 return -EPROTONOSUPPORT;
400 if (vid == EFX_FILTER_VID_UNSPEC) {
401 spec->type = EFX_FILTER_MAC_WILD;
404 spec->type = EFX_FILTER_MAC_FULL;
408 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
409 spec->data[2] = addr[0] << 8 | addr[1];
414 * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
415 * @spec: Specification to initialise
417 int efx_filter_set_uc_def(struct efx_filter_spec *spec)
419 EFX_BUG_ON_PARANOID(!(spec->flags &
420 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
422 if (spec->type != EFX_FILTER_UNSPEC)
425 spec->type = EFX_FILTER_UC_DEF;
426 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
431 * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
432 * @spec: Specification to initialise
434 int efx_filter_set_mc_def(struct efx_filter_spec *spec)
436 EFX_BUG_ON_PARANOID(!(spec->flags &
437 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
439 if (spec->type != EFX_FILTER_UNSPEC)
442 spec->type = EFX_FILTER_MC_DEF;
443 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
448 efx_farch_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
450 struct efx_filter_state *state = efx->filter_state;
451 struct efx_farch_filter_table *table =
452 &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
453 struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
455 /* If there's only one channel then disable RSS for non VF
456 * traffic, thereby allowing VFs to use RSS when the PF can't.
458 spec->type = EFX_FILTER_UC_DEF + filter_idx;
459 spec->priority = EFX_FILTER_PRI_MANUAL;
460 spec->flags = (EFX_FILTER_FLAG_RX |
461 (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
462 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
464 table->used_bitmap[0] |= 1 << filter_idx;
467 int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
470 switch (spec->type) {
471 case EFX_FILTER_MAC_WILD:
472 *vid = EFX_FILTER_VID_UNSPEC;
474 case EFX_FILTER_MAC_FULL:
475 *vid = spec->data[0];
481 addr[0] = spec->data[2] >> 8;
482 addr[1] = spec->data[2];
483 addr[2] = spec->data[1] >> 24;
484 addr[3] = spec->data[1] >> 16;
485 addr[4] = spec->data[1] >> 8;
486 addr[5] = spec->data[1];
490 /* Build a filter entry and return its n-tuple key. */
491 static u32 efx_farch_filter_build(efx_oword_t *filter,
492 struct efx_farch_filter_spec *spec)
496 switch (efx_farch_filter_spec_table_id(spec)) {
497 case EFX_FARCH_FILTER_TABLE_RX_IP: {
498 bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
499 spec->type == EFX_FILTER_UDP_WILD);
500 EFX_POPULATE_OWORD_7(
503 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
505 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
506 FRF_BZ_TCP_UDP, is_udp,
507 FRF_BZ_RXQ_ID, spec->dmaq_id,
508 EFX_DWORD_2, spec->data[2],
509 EFX_DWORD_1, spec->data[1],
510 EFX_DWORD_0, spec->data[0]);
515 case EFX_FARCH_FILTER_TABLE_RX_MAC: {
516 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
517 EFX_POPULATE_OWORD_7(
520 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
521 FRF_CZ_RMFT_SCATTER_EN,
522 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
523 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
524 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
525 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
526 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
527 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
532 case EFX_FARCH_FILTER_TABLE_TX_MAC: {
533 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
534 EFX_POPULATE_OWORD_5(*filter,
535 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
536 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
537 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
538 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
539 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
540 data3 = is_wild | spec->dmaq_id << 1;
548 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
551 static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
552 const struct efx_farch_filter_spec *right)
554 if (left->type != right->type ||
555 memcmp(left->data, right->data, sizeof(left->data)))
558 if (left->flags & EFX_FILTER_FLAG_TX &&
559 left->dmaq_id != right->dmaq_id)
566 * Construct/deconstruct external filter IDs. At least the RX filter
567 * IDs must be ordered by matching priority, for RX NFC semantics.
569 * Deconstruction needs to be robust against invalid IDs so that
570 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
571 * accept user-provided IDs.
574 #define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
576 static const u8 efx_farch_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
577 [EFX_FILTER_TCP_FULL] = 0,
578 [EFX_FILTER_UDP_FULL] = 0,
579 [EFX_FILTER_TCP_WILD] = 1,
580 [EFX_FILTER_UDP_WILD] = 1,
581 [EFX_FILTER_MAC_FULL] = 2,
582 [EFX_FILTER_MAC_WILD] = 3,
583 [EFX_FILTER_UC_DEF] = 4,
584 [EFX_FILTER_MC_DEF] = 4,
587 static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
588 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
589 EFX_FARCH_FILTER_TABLE_RX_IP,
590 EFX_FARCH_FILTER_TABLE_RX_MAC,
591 EFX_FARCH_FILTER_TABLE_RX_MAC,
592 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
593 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
594 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
597 #define EFX_FARCH_FILTER_INDEX_WIDTH 13
598 #define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
601 efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
606 range = efx_farch_filter_type_match_pri[spec->type];
607 if (!(spec->flags & EFX_FILTER_FLAG_RX))
608 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
610 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
613 static inline enum efx_farch_filter_table_id
614 efx_farch_filter_id_table_id(u32 id)
616 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
618 if (range < ARRAY_SIZE(efx_farch_filter_range_table))
619 return efx_farch_filter_range_table[range];
621 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
624 static inline unsigned int efx_farch_filter_id_index(u32 id)
626 return id & EFX_FARCH_FILTER_INDEX_MASK;
629 u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
631 struct efx_filter_state *state = efx->filter_state;
632 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
633 enum efx_farch_filter_table_id table_id;
636 table_id = efx_farch_filter_range_table[range];
637 if (state->table[table_id].size != 0)
638 return range << EFX_FARCH_FILTER_INDEX_WIDTH |
639 state->table[table_id].size;
646 * efx_filter_insert_filter - add or replace a filter
647 * @efx: NIC in which to insert the filter
648 * @spec: Specification for the filter
649 * @replace_equal: Flag for whether the specified filter may replace an
650 * existing filter with equal priority
652 * On success, return the filter ID.
653 * On failure, return a negative error code.
655 * If an existing filter has equal match values to the new filter
656 * spec, then the new filter might replace it, depending on the
657 * relative priorities. If the existing filter has lower priority, or
658 * if @replace_equal is set and it has equal priority, then it is
659 * replaced. Otherwise the function fails, returning -%EPERM if
660 * the existing filter has higher priority or -%EEXIST if it has
663 s32 efx_filter_insert_filter(struct efx_nic *efx,
664 struct efx_filter_spec *gen_spec,
667 struct efx_filter_state *state = efx->filter_state;
668 struct efx_farch_filter_table *table;
669 struct efx_farch_filter_spec spec;
671 int rep_index, ins_index;
672 unsigned int depth = 0;
675 /* XXX efx_farch_filter_spec and efx_filter_spec will diverge in future */
676 memcpy(&spec, gen_spec, sizeof(*gen_spec));
678 table = efx_farch_filter_spec_table(state, &spec);
679 if (!table || table->size == 0)
682 netif_vdbg(efx, hw, efx->net_dev,
683 "%s: type %d search_depth=%d", __func__, spec.type,
684 table->search_depth[spec.type]);
686 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
687 /* One filter spec per type */
688 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
689 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
690 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
691 rep_index = spec.type - EFX_FILTER_UC_DEF;
692 ins_index = rep_index;
694 spin_lock_bh(&state->lock);
696 /* Search concurrently for
697 * (1) a filter to be replaced (rep_index): any filter
698 * with the same match values, up to the current
699 * search depth for this type, and
700 * (2) the insertion point (ins_index): (1) or any
701 * free slot before it or up to the maximum search
702 * depth for this priority
703 * We fail if we cannot find (2).
705 * We can stop once either
706 * (a) we find (1), in which case we have definitely
707 * found (2) as well; or
708 * (b) we have searched exhaustively for (1), and have
709 * either found (2) or searched exhaustively for it
711 u32 key = efx_farch_filter_build(&filter, &spec);
712 unsigned int hash = efx_farch_filter_hash(key);
713 unsigned int incr = efx_farch_filter_increment(key);
714 unsigned int max_rep_depth = table->search_depth[spec.type];
715 unsigned int max_ins_depth =
716 spec.priority <= EFX_FILTER_PRI_HINT ?
717 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
718 EFX_FARCH_FILTER_CTL_SRCH_MAX;
719 unsigned int i = hash & (table->size - 1);
724 spin_lock_bh(&state->lock);
727 if (!test_bit(i, table->used_bitmap)) {
730 } else if (efx_farch_filter_equal(&spec,
739 if (depth >= max_rep_depth &&
740 (ins_index >= 0 || depth >= max_ins_depth)) {
750 i = (i + incr) & (table->size - 1);
755 /* If we found a filter to be replaced, check whether we
758 if (rep_index >= 0) {
759 struct efx_farch_filter_spec *saved_spec =
760 &table->spec[rep_index];
762 if (spec.priority == saved_spec->priority && !replace_equal) {
766 if (spec.priority < saved_spec->priority) {
772 /* Insert the filter */
773 if (ins_index != rep_index) {
774 __set_bit(ins_index, table->used_bitmap);
777 table->spec[ins_index] = spec;
779 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
780 efx_farch_filter_push_rx_config(efx);
782 if (table->search_depth[spec.type] < depth) {
783 table->search_depth[spec.type] = depth;
784 if (spec.flags & EFX_FILTER_FLAG_TX)
785 efx_farch_filter_push_tx_limits(efx);
787 efx_farch_filter_push_rx_config(efx);
790 efx_writeo(efx, &filter,
791 table->offset + table->step * ins_index);
793 /* If we were able to replace a filter by inserting
794 * at a lower depth, clear the replaced filter
796 if (ins_index != rep_index && rep_index >= 0)
797 efx_farch_filter_table_clear_entry(efx, table,
801 netif_vdbg(efx, hw, efx->net_dev,
802 "%s: filter type %d index %d rxq %u set",
803 __func__, spec.type, ins_index, spec.dmaq_id);
804 rc = efx_farch_filter_make_id(&spec, ins_index);
807 spin_unlock_bh(&state->lock);
812 efx_farch_filter_table_clear_entry(struct efx_nic *efx,
813 struct efx_farch_filter_table *table,
814 unsigned int filter_idx)
816 static efx_oword_t filter;
818 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
819 /* RX default filters must always exist */
820 efx_farch_filter_reset_rx_def(efx, filter_idx);
821 efx_farch_filter_push_rx_config(efx);
822 } else if (test_bit(filter_idx, table->used_bitmap)) {
823 __clear_bit(filter_idx, table->used_bitmap);
825 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
827 efx_writeo(efx, &filter,
828 table->offset + table->step * filter_idx);
833 * efx_filter_remove_id_safe - remove a filter by ID, carefully
834 * @efx: NIC from which to remove the filter
835 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
836 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
838 * This function will range-check @filter_id, so it is safe to call
839 * with a value passed from userland.
841 int efx_filter_remove_id_safe(struct efx_nic *efx,
842 enum efx_filter_priority priority,
845 struct efx_filter_state *state = efx->filter_state;
846 enum efx_farch_filter_table_id table_id;
847 struct efx_farch_filter_table *table;
848 unsigned int filter_idx;
849 struct efx_farch_filter_spec *spec;
852 table_id = efx_farch_filter_id_table_id(filter_id);
853 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
855 table = &state->table[table_id];
857 filter_idx = efx_farch_filter_id_index(filter_id);
858 if (filter_idx >= table->size)
860 spec = &table->spec[filter_idx];
862 spin_lock_bh(&state->lock);
864 if (test_bit(filter_idx, table->used_bitmap) &&
865 spec->priority == priority) {
866 efx_farch_filter_table_clear_entry(efx, table, filter_idx);
867 if (table->used == 0)
868 efx_farch_filter_table_reset_search_depth(table);
874 spin_unlock_bh(&state->lock);
880 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
881 * @efx: NIC from which to remove the filter
882 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
883 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
884 * @spec: Buffer in which to store filter specification
886 * This function will range-check @filter_id, so it is safe to call
887 * with a value passed from userland.
889 int efx_filter_get_filter_safe(struct efx_nic *efx,
890 enum efx_filter_priority priority,
891 u32 filter_id, struct efx_filter_spec *spec_buf)
893 struct efx_filter_state *state = efx->filter_state;
894 enum efx_farch_filter_table_id table_id;
895 struct efx_farch_filter_table *table;
896 struct efx_farch_filter_spec *spec;
897 unsigned int filter_idx;
900 table_id = efx_farch_filter_id_table_id(filter_id);
901 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
903 table = &state->table[table_id];
905 filter_idx = efx_farch_filter_id_index(filter_id);
906 if (filter_idx >= table->size)
908 spec = &table->spec[filter_idx];
910 spin_lock_bh(&state->lock);
912 if (test_bit(filter_idx, table->used_bitmap) &&
913 spec->priority == priority) {
914 /* XXX efx_farch_filter_spec and efx_filter_spec will diverge */
915 memcpy(spec_buf, spec, sizeof(*spec));
921 spin_unlock_bh(&state->lock);
927 efx_farch_filter_table_clear(struct efx_nic *efx,
928 enum efx_farch_filter_table_id table_id,
929 enum efx_filter_priority priority)
931 struct efx_filter_state *state = efx->filter_state;
932 struct efx_farch_filter_table *table = &state->table[table_id];
933 unsigned int filter_idx;
935 spin_lock_bh(&state->lock);
937 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
938 if (table->spec[filter_idx].priority <= priority)
939 efx_farch_filter_table_clear_entry(efx, table,
941 if (table->used == 0)
942 efx_farch_filter_table_reset_search_depth(table);
944 spin_unlock_bh(&state->lock);
948 * efx_filter_clear_rx - remove RX filters by priority
949 * @efx: NIC from which to remove the filters
950 * @priority: Maximum priority to remove
952 void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
954 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
956 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
960 u32 efx_filter_count_rx_used(struct efx_nic *efx,
961 enum efx_filter_priority priority)
963 struct efx_filter_state *state = efx->filter_state;
964 enum efx_farch_filter_table_id table_id;
965 struct efx_farch_filter_table *table;
966 unsigned int filter_idx;
969 spin_lock_bh(&state->lock);
971 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
972 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
974 table = &state->table[table_id];
975 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
976 if (test_bit(filter_idx, table->used_bitmap) &&
977 table->spec[filter_idx].priority == priority)
982 spin_unlock_bh(&state->lock);
987 s32 efx_filter_get_rx_ids(struct efx_nic *efx,
988 enum efx_filter_priority priority,
991 struct efx_filter_state *state = efx->filter_state;
992 enum efx_farch_filter_table_id table_id;
993 struct efx_farch_filter_table *table;
994 unsigned int filter_idx;
997 spin_lock_bh(&state->lock);
999 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
1000 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
1002 table = &state->table[table_id];
1003 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1004 if (test_bit(filter_idx, table->used_bitmap) &&
1005 table->spec[filter_idx].priority == priority) {
1006 if (count == size) {
1010 buf[count++] = efx_farch_filter_make_id(
1011 &table->spec[filter_idx], filter_idx);
1016 spin_unlock_bh(&state->lock);
1021 /* Restore filter stater after reset */
1022 void efx_restore_filters(struct efx_nic *efx)
1024 struct efx_filter_state *state = efx->filter_state;
1025 enum efx_farch_filter_table_id table_id;
1026 struct efx_farch_filter_table *table;
1028 unsigned int filter_idx;
1030 spin_lock_bh(&state->lock);
1032 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
1033 table = &state->table[table_id];
1035 /* Check whether this is a regular register table */
1036 if (table->step == 0)
1039 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1040 if (!test_bit(filter_idx, table->used_bitmap))
1042 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
1043 efx_writeo(efx, &filter,
1044 table->offset + table->step * filter_idx);
1048 efx_farch_filter_push_rx_config(efx);
1049 efx_farch_filter_push_tx_limits(efx);
1051 spin_unlock_bh(&state->lock);
1054 int efx_probe_filters(struct efx_nic *efx)
1056 struct efx_filter_state *state;
1057 struct efx_farch_filter_table *table;
1060 state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
1063 efx->filter_state = state;
1065 spin_lock_init(&state->lock);
1067 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1068 #ifdef CONFIG_RFS_ACCEL
1069 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
1070 sizeof(*state->rps_flow_id),
1072 if (!state->rps_flow_id)
1075 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1076 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
1077 table->offset = FR_BZ_RX_FILTER_TBL0;
1078 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
1079 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
1082 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1083 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
1084 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
1085 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
1086 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
1087 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
1089 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
1090 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
1091 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
1093 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
1094 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
1095 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
1096 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
1097 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
1100 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
1101 table = &state->table[table_id];
1102 if (table->size == 0)
1104 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
1105 sizeof(unsigned long),
1107 if (!table->used_bitmap)
1109 table->spec = vzalloc(table->size * sizeof(*table->spec));
1114 if (state->table[EFX_FARCH_FILTER_TABLE_RX_DEF].size) {
1115 /* RX default filters must always exist */
1117 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++)
1118 efx_farch_filter_reset_rx_def(efx, i);
1121 efx_farch_filter_push_rx_config(efx);
1126 efx_remove_filters(efx);
1130 void efx_remove_filters(struct efx_nic *efx)
1132 struct efx_filter_state *state = efx->filter_state;
1133 enum efx_farch_filter_table_id table_id;
1135 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
1136 kfree(state->table[table_id].used_bitmap);
1137 vfree(state->table[table_id].spec);
1139 #ifdef CONFIG_RFS_ACCEL
1140 kfree(state->rps_flow_id);
1145 /* Update scatter enable flags for filters pointing to our own RX queues */
1146 void efx_filter_update_rx_scatter(struct efx_nic *efx)
1148 struct efx_filter_state *state = efx->filter_state;
1149 enum efx_farch_filter_table_id table_id;
1150 struct efx_farch_filter_table *table;
1152 unsigned int filter_idx;
1154 spin_lock_bh(&state->lock);
1156 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
1157 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
1159 table = &state->table[table_id];
1161 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1162 if (!test_bit(filter_idx, table->used_bitmap) ||
1163 table->spec[filter_idx].dmaq_id >=
1167 if (efx->rx_scatter)
1168 table->spec[filter_idx].flags |=
1169 EFX_FILTER_FLAG_RX_SCATTER;
1171 table->spec[filter_idx].flags &=
1172 ~EFX_FILTER_FLAG_RX_SCATTER;
1174 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
1175 /* Pushed by efx_farch_filter_push_rx_config() */
1178 efx_farch_filter_build(&filter, &table->spec[filter_idx]);
1179 efx_writeo(efx, &filter,
1180 table->offset + table->step * filter_idx);
1184 efx_farch_filter_push_rx_config(efx);
1186 spin_unlock_bh(&state->lock);
1189 #ifdef CONFIG_RFS_ACCEL
1191 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1192 u16 rxq_index, u32 flow_id)
1194 struct efx_nic *efx = netdev_priv(net_dev);
1195 struct efx_channel *channel;
1196 struct efx_filter_state *state = efx->filter_state;
1197 struct efx_filter_spec spec;
1198 const struct iphdr *ip;
1199 const __be16 *ports;
1203 nhoff = skb_network_offset(skb);
1205 if (skb->protocol == htons(ETH_P_8021Q)) {
1206 EFX_BUG_ON_PARANOID(skb_headlen(skb) <
1207 nhoff + sizeof(struct vlan_hdr));
1208 if (((const struct vlan_hdr *)skb->data + nhoff)->
1209 h_vlan_encapsulated_proto != htons(ETH_P_IP))
1210 return -EPROTONOSUPPORT;
1212 /* This is IP over 802.1q VLAN. We can't filter on the
1213 * IP 5-tuple and the vlan together, so just strip the
1214 * vlan header and filter on the IP part.
1216 nhoff += sizeof(struct vlan_hdr);
1217 } else if (skb->protocol != htons(ETH_P_IP)) {
1218 return -EPROTONOSUPPORT;
1221 /* RFS must validate the IP header length before calling us */
1222 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
1223 ip = (const struct iphdr *)(skb->data + nhoff);
1224 if (ip_is_fragment(ip))
1225 return -EPROTONOSUPPORT;
1226 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1227 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1229 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
1230 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
1232 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1233 ip->daddr, ports[1], ip->saddr, ports[0]);
1237 rc = efx_filter_insert_filter(efx, &spec, true);
1241 /* Remember this so we can check whether to expire the filter later */
1242 state->rps_flow_id[rc] = flow_id;
1243 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
1244 ++channel->rfs_filters_added;
1246 netif_info(efx, rx_status, efx->net_dev,
1247 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
1248 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
1249 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
1250 rxq_index, flow_id, rc);
1255 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
1257 struct efx_filter_state *state = efx->filter_state;
1258 struct efx_farch_filter_table *table =
1259 &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
1260 unsigned mask = table->size - 1;
1264 if (!spin_trylock_bh(&state->lock))
1267 index = state->rps_expire_index;
1268 stop = (index + quota) & mask;
1270 while (index != stop) {
1271 if (test_bit(index, table->used_bitmap) &&
1272 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
1273 rps_may_expire_flow(efx->net_dev,
1274 table->spec[index].dmaq_id,
1275 state->rps_flow_id[index], index)) {
1276 netif_info(efx, rx_status, efx->net_dev,
1277 "expiring filter %d [flow %u]\n",
1278 index, state->rps_flow_id[index]);
1279 efx_farch_filter_table_clear_entry(efx, table, index);
1281 index = (index + 1) & mask;
1284 state->rps_expire_index = stop;
1285 if (table->used == 0)
1286 efx_farch_filter_table_reset_search_depth(table);
1288 spin_unlock_bh(&state->lock);
1292 #endif /* CONFIG_RFS_ACCEL */