1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
18 /* "Fudge factors" - difference between programmed value and actual depth.
19 * Due to pipelined implementation we need to program H/W with a value that
20 * is larger than the hop limit we want.
22 #define FILTER_CTL_SRCH_FUDGE_WILD 3
23 #define FILTER_CTL_SRCH_FUDGE_FULL 1
25 /* Hard maximum hop limit. Hardware will time-out beyond 200-something.
26 * We also need to avoid infinite loops in efx_filter_search() when the
29 #define FILTER_CTL_SRCH_MAX 200
31 /* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33 #define FILTER_CTL_SRCH_HINT_MAX 5
35 enum efx_filter_table_id {
36 EFX_FILTER_TABLE_RX_IP = 0,
37 EFX_FILTER_TABLE_RX_MAC,
38 EFX_FILTER_TABLE_RX_DEF,
39 EFX_FILTER_TABLE_TX_MAC,
40 EFX_FILTER_TABLE_COUNT,
43 enum efx_filter_index {
44 EFX_FILTER_INDEX_UC_DEF,
45 EFX_FILTER_INDEX_MC_DEF,
46 EFX_FILTER_SIZE_RX_DEF,
49 struct efx_filter_table {
50 enum efx_filter_table_id id;
51 u32 offset; /* address of table relative to BAR */
52 unsigned size; /* number of entries */
53 unsigned step; /* step between entries */
54 unsigned used; /* number currently used */
55 unsigned long *used_bitmap;
56 struct efx_filter_spec *spec;
57 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
60 struct efx_filter_state {
62 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
63 #ifdef CONFIG_RFS_ACCEL
65 unsigned rps_expire_index;
69 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
70 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
71 static u16 efx_filter_hash(u32 key)
76 tmp = 0x1fff ^ key >> 16;
77 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
80 tmp = tmp ^ tmp << 13 ^ key;
81 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
82 return tmp ^ tmp >> 9;
85 /* To allow for hash collisions, filter search continues at these
86 * increments from the first possible entry selected by the hash. */
87 static u16 efx_filter_increment(u32 key)
92 static enum efx_filter_table_id
93 efx_filter_spec_table_id(const struct efx_filter_spec *spec)
95 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
96 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
97 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
98 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
99 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
100 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
101 BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
102 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
103 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
106 static struct efx_filter_table *
107 efx_filter_spec_table(struct efx_filter_state *state,
108 const struct efx_filter_spec *spec)
110 if (spec->type == EFX_FILTER_UNSPEC)
113 return &state->table[efx_filter_spec_table_id(spec)];
116 static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
118 memset(table->search_depth, 0, sizeof(table->search_depth));
121 static void efx_filter_push_rx_config(struct efx_nic *efx)
123 struct efx_filter_state *state = efx->filter_state;
124 struct efx_filter_table *table;
125 efx_oword_t filter_ctl;
127 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
129 table = &state->table[EFX_FILTER_TABLE_RX_IP];
130 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
131 table->search_depth[EFX_FILTER_TCP_FULL] +
132 FILTER_CTL_SRCH_FUDGE_FULL);
133 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
134 table->search_depth[EFX_FILTER_TCP_WILD] +
135 FILTER_CTL_SRCH_FUDGE_WILD);
136 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
137 table->search_depth[EFX_FILTER_UDP_FULL] +
138 FILTER_CTL_SRCH_FUDGE_FULL);
139 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
140 table->search_depth[EFX_FILTER_UDP_WILD] +
141 FILTER_CTL_SRCH_FUDGE_WILD);
143 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
146 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
147 table->search_depth[EFX_FILTER_MAC_FULL] +
148 FILTER_CTL_SRCH_FUDGE_FULL);
150 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
151 table->search_depth[EFX_FILTER_MAC_WILD] +
152 FILTER_CTL_SRCH_FUDGE_WILD);
155 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
158 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
159 table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
161 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
162 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
163 EFX_FILTER_FLAG_RX_RSS));
165 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
166 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
168 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
169 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
170 EFX_FILTER_FLAG_RX_RSS));
173 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
176 static void efx_filter_push_tx_limits(struct efx_nic *efx)
178 struct efx_filter_state *state = efx->filter_state;
179 struct efx_filter_table *table;
182 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
184 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
187 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
188 table->search_depth[EFX_FILTER_MAC_FULL] +
189 FILTER_CTL_SRCH_FUDGE_FULL);
191 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
192 table->search_depth[EFX_FILTER_MAC_WILD] +
193 FILTER_CTL_SRCH_FUDGE_WILD);
196 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
199 static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
200 __be32 host1, __be16 port1,
201 __be32 host2, __be16 port2)
203 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
204 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
205 spec->data[2] = ntohl(host2);
208 static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
209 __be32 *host1, __be16 *port1,
210 __be32 *host2, __be16 *port2)
212 *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
213 *port1 = htons(spec->data[0]);
214 *host2 = htonl(spec->data[2]);
215 *port2 = htons(spec->data[1] >> 16);
219 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
220 * @spec: Specification to initialise
221 * @proto: Transport layer protocol number
222 * @host: Local host address (network byte order)
223 * @port: Local port (network byte order)
225 int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
226 __be32 host, __be16 port)
231 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
233 /* This cannot currently be combined with other filtering */
234 if (spec->type != EFX_FILTER_UNSPEC)
235 return -EPROTONOSUPPORT;
242 spec->type = EFX_FILTER_TCP_WILD;
245 spec->type = EFX_FILTER_UDP_WILD;
248 return -EPROTONOSUPPORT;
251 /* Filter is constructed in terms of source and destination,
252 * with the odd wrinkle that the ports are swapped in a UDP
253 * wildcard filter. We need to convert from local and remote
254 * (= zero for wildcard) addresses.
257 if (proto != IPPROTO_UDP) {
264 __efx_filter_set_ipv4(spec, host1, port1, host, port);
268 int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
269 u8 *proto, __be32 *host, __be16 *port)
274 switch (spec->type) {
275 case EFX_FILTER_TCP_WILD:
276 *proto = IPPROTO_TCP;
277 __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
279 case EFX_FILTER_UDP_WILD:
280 *proto = IPPROTO_UDP;
281 __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
289 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
290 * @spec: Specification to initialise
291 * @proto: Transport layer protocol number
292 * @host: Local host address (network byte order)
293 * @port: Local port (network byte order)
294 * @rhost: Remote host address (network byte order)
295 * @rport: Remote port (network byte order)
297 int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
298 __be32 host, __be16 port,
299 __be32 rhost, __be16 rport)
301 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
303 /* This cannot currently be combined with other filtering */
304 if (spec->type != EFX_FILTER_UNSPEC)
305 return -EPROTONOSUPPORT;
307 if (port == 0 || rport == 0)
312 spec->type = EFX_FILTER_TCP_FULL;
315 spec->type = EFX_FILTER_UDP_FULL;
318 return -EPROTONOSUPPORT;
321 __efx_filter_set_ipv4(spec, rhost, rport, host, port);
325 int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
326 u8 *proto, __be32 *host, __be16 *port,
327 __be32 *rhost, __be16 *rport)
329 switch (spec->type) {
330 case EFX_FILTER_TCP_FULL:
331 *proto = IPPROTO_TCP;
333 case EFX_FILTER_UDP_FULL:
334 *proto = IPPROTO_UDP;
340 __efx_filter_get_ipv4(spec, rhost, rport, host, port);
345 * efx_filter_set_eth_local - specify local Ethernet address and optional VID
346 * @spec: Specification to initialise
347 * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
348 * @addr: Local Ethernet MAC address
350 int efx_filter_set_eth_local(struct efx_filter_spec *spec,
351 u16 vid, const u8 *addr)
353 EFX_BUG_ON_PARANOID(!(spec->flags &
354 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
356 /* This cannot currently be combined with other filtering */
357 if (spec->type != EFX_FILTER_UNSPEC)
358 return -EPROTONOSUPPORT;
360 if (vid == EFX_FILTER_VID_UNSPEC) {
361 spec->type = EFX_FILTER_MAC_WILD;
364 spec->type = EFX_FILTER_MAC_FULL;
368 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
369 spec->data[2] = addr[0] << 8 | addr[1];
374 * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
375 * @spec: Specification to initialise
377 int efx_filter_set_uc_def(struct efx_filter_spec *spec)
379 EFX_BUG_ON_PARANOID(!(spec->flags &
380 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
382 if (spec->type != EFX_FILTER_UNSPEC)
385 spec->type = EFX_FILTER_UC_DEF;
386 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
391 * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
392 * @spec: Specification to initialise
394 int efx_filter_set_mc_def(struct efx_filter_spec *spec)
396 EFX_BUG_ON_PARANOID(!(spec->flags &
397 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
399 if (spec->type != EFX_FILTER_UNSPEC)
402 spec->type = EFX_FILTER_MC_DEF;
403 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
407 static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
409 struct efx_filter_state *state = efx->filter_state;
410 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
411 struct efx_filter_spec *spec = &table->spec[filter_idx];
413 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL,
414 EFX_FILTER_FLAG_RX_RSS, 0);
415 spec->type = EFX_FILTER_UC_DEF + filter_idx;
416 table->used_bitmap[0] |= 1 << filter_idx;
419 int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
422 switch (spec->type) {
423 case EFX_FILTER_MAC_WILD:
424 *vid = EFX_FILTER_VID_UNSPEC;
426 case EFX_FILTER_MAC_FULL:
427 *vid = spec->data[0];
433 addr[0] = spec->data[2] >> 8;
434 addr[1] = spec->data[2];
435 addr[2] = spec->data[1] >> 24;
436 addr[3] = spec->data[1] >> 16;
437 addr[4] = spec->data[1] >> 8;
438 addr[5] = spec->data[1];
442 /* Build a filter entry and return its n-tuple key. */
443 static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
447 switch (efx_filter_spec_table_id(spec)) {
448 case EFX_FILTER_TABLE_RX_IP: {
449 bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
450 spec->type == EFX_FILTER_UDP_WILD);
451 EFX_POPULATE_OWORD_7(
454 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
456 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
457 FRF_BZ_TCP_UDP, is_udp,
458 FRF_BZ_RXQ_ID, spec->dmaq_id,
459 EFX_DWORD_2, spec->data[2],
460 EFX_DWORD_1, spec->data[1],
461 EFX_DWORD_0, spec->data[0]);
466 case EFX_FILTER_TABLE_RX_DEF:
467 /* One filter spec per type */
468 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
469 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
470 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
471 return spec->type - EFX_FILTER_UC_DEF;
473 case EFX_FILTER_TABLE_RX_MAC: {
474 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
475 EFX_POPULATE_OWORD_7(
478 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
479 FRF_CZ_RMFT_SCATTER_EN,
480 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
481 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
482 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
483 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
484 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
485 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
490 case EFX_FILTER_TABLE_TX_MAC: {
491 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
492 EFX_POPULATE_OWORD_5(*filter,
493 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
494 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
495 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
496 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
497 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
498 data3 = is_wild | spec->dmaq_id << 1;
506 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
509 static bool efx_filter_equal(const struct efx_filter_spec *left,
510 const struct efx_filter_spec *right)
512 if (left->type != right->type ||
513 memcmp(left->data, right->data, sizeof(left->data)))
516 if (left->flags & EFX_FILTER_FLAG_TX &&
517 left->dmaq_id != right->dmaq_id)
523 static int efx_filter_search(struct efx_filter_table *table,
524 struct efx_filter_spec *spec, u32 key,
525 bool for_insert, unsigned int *depth_required)
527 unsigned hash, incr, filter_idx, depth, depth_max;
529 hash = efx_filter_hash(key);
530 incr = efx_filter_increment(key);
532 filter_idx = hash & (table->size - 1);
534 depth_max = (for_insert ?
535 (spec->priority <= EFX_FILTER_PRI_HINT ?
536 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
537 table->search_depth[spec->type]);
540 /* Return success if entry is used and matches this spec
541 * or entry is unused and we are trying to insert.
543 if (test_bit(filter_idx, table->used_bitmap) ?
544 efx_filter_equal(spec, &table->spec[filter_idx]) :
546 *depth_required = depth;
550 /* Return failure if we reached the maximum search depth */
551 if (depth == depth_max)
552 return for_insert ? -EBUSY : -ENOENT;
554 filter_idx = (filter_idx + incr) & (table->size - 1);
560 * Construct/deconstruct external filter IDs. At least the RX filter
561 * IDs must be ordered by matching priority, for RX NFC semantics.
563 * Deconstruction needs to be robust against invalid IDs so that
564 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
565 * accept user-provided IDs.
568 #define EFX_FILTER_MATCH_PRI_COUNT 5
570 static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
571 [EFX_FILTER_TCP_FULL] = 0,
572 [EFX_FILTER_UDP_FULL] = 0,
573 [EFX_FILTER_TCP_WILD] = 1,
574 [EFX_FILTER_UDP_WILD] = 1,
575 [EFX_FILTER_MAC_FULL] = 2,
576 [EFX_FILTER_MAC_WILD] = 3,
577 [EFX_FILTER_UC_DEF] = 4,
578 [EFX_FILTER_MC_DEF] = 4,
581 static const enum efx_filter_table_id efx_filter_range_table[] = {
582 EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
583 EFX_FILTER_TABLE_RX_IP,
584 EFX_FILTER_TABLE_RX_MAC,
585 EFX_FILTER_TABLE_RX_MAC,
586 EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
587 EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
588 EFX_FILTER_TABLE_COUNT, /* invalid */
589 EFX_FILTER_TABLE_TX_MAC,
590 EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
593 #define EFX_FILTER_INDEX_WIDTH 13
594 #define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
597 efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
601 range = efx_filter_type_match_pri[spec->type];
602 if (!(spec->flags & EFX_FILTER_FLAG_RX))
603 range += EFX_FILTER_MATCH_PRI_COUNT;
605 return range << EFX_FILTER_INDEX_WIDTH | index;
608 static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
610 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
612 if (range < ARRAY_SIZE(efx_filter_range_table))
613 return efx_filter_range_table[range];
615 return EFX_FILTER_TABLE_COUNT; /* invalid */
618 static inline unsigned int efx_filter_id_index(u32 id)
620 return id & EFX_FILTER_INDEX_MASK;
623 static inline u8 efx_filter_id_flags(u32 id)
625 unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
627 if (range < EFX_FILTER_MATCH_PRI_COUNT)
628 return EFX_FILTER_FLAG_RX;
630 return EFX_FILTER_FLAG_TX;
633 u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
635 struct efx_filter_state *state = efx->filter_state;
636 unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
637 enum efx_filter_table_id table_id;
640 table_id = efx_filter_range_table[range];
641 if (state->table[table_id].size != 0)
642 return range << EFX_FILTER_INDEX_WIDTH |
643 state->table[table_id].size;
650 * efx_filter_insert_filter - add or replace a filter
651 * @efx: NIC in which to insert the filter
652 * @spec: Specification for the filter
653 * @replace: Flag for whether the specified filter may replace a filter
654 * with an identical match expression and equal or lower priority
656 * On success, return the filter ID.
657 * On failure, return a negative error code.
659 s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
662 struct efx_filter_state *state = efx->filter_state;
663 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
664 struct efx_filter_spec *saved_spec;
666 unsigned int filter_idx, depth = 0;
670 if (!table || table->size == 0)
673 key = efx_filter_build(&filter, spec);
675 netif_vdbg(efx, hw, efx->net_dev,
676 "%s: type %d search_depth=%d", __func__, spec->type,
677 table->search_depth[spec->type]);
679 spin_lock_bh(&state->lock);
681 rc = efx_filter_search(table, spec, key, true, &depth);
685 BUG_ON(filter_idx >= table->size);
686 saved_spec = &table->spec[filter_idx];
688 if (test_bit(filter_idx, table->used_bitmap)) {
689 /* Should we replace the existing filter? */
694 if (spec->priority < saved_spec->priority) {
699 __set_bit(filter_idx, table->used_bitmap);
704 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
705 efx_filter_push_rx_config(efx);
707 if (table->search_depth[spec->type] < depth) {
708 table->search_depth[spec->type] = depth;
709 if (spec->flags & EFX_FILTER_FLAG_TX)
710 efx_filter_push_tx_limits(efx);
712 efx_filter_push_rx_config(efx);
715 efx_writeo(efx, &filter,
716 table->offset + table->step * filter_idx);
719 netif_vdbg(efx, hw, efx->net_dev,
720 "%s: filter type %d index %d rxq %u set",
721 __func__, spec->type, filter_idx, spec->dmaq_id);
722 rc = efx_filter_make_id(spec, filter_idx);
725 spin_unlock_bh(&state->lock);
729 static void efx_filter_table_clear_entry(struct efx_nic *efx,
730 struct efx_filter_table *table,
731 unsigned int filter_idx)
733 static efx_oword_t filter;
735 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
736 /* RX default filters must always exist */
737 efx_filter_reset_rx_def(efx, filter_idx);
738 efx_filter_push_rx_config(efx);
739 } else if (test_bit(filter_idx, table->used_bitmap)) {
740 __clear_bit(filter_idx, table->used_bitmap);
742 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
744 efx_writeo(efx, &filter,
745 table->offset + table->step * filter_idx);
750 * efx_filter_remove_id_safe - remove a filter by ID, carefully
751 * @efx: NIC from which to remove the filter
752 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
753 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
755 * This function will range-check @filter_id, so it is safe to call
756 * with a value passed from userland.
758 int efx_filter_remove_id_safe(struct efx_nic *efx,
759 enum efx_filter_priority priority,
762 struct efx_filter_state *state = efx->filter_state;
763 enum efx_filter_table_id table_id;
764 struct efx_filter_table *table;
765 unsigned int filter_idx;
766 struct efx_filter_spec *spec;
770 table_id = efx_filter_id_table_id(filter_id);
771 if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
773 table = &state->table[table_id];
775 filter_idx = efx_filter_id_index(filter_id);
776 if (filter_idx >= table->size)
778 spec = &table->spec[filter_idx];
780 filter_flags = efx_filter_id_flags(filter_id);
782 spin_lock_bh(&state->lock);
784 if (test_bit(filter_idx, table->used_bitmap) &&
785 spec->priority == priority) {
786 efx_filter_table_clear_entry(efx, table, filter_idx);
787 if (table->used == 0)
788 efx_filter_table_reset_search_depth(table);
794 spin_unlock_bh(&state->lock);
800 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
801 * @efx: NIC from which to remove the filter
802 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
803 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
804 * @spec: Buffer in which to store filter specification
806 * This function will range-check @filter_id, so it is safe to call
807 * with a value passed from userland.
809 int efx_filter_get_filter_safe(struct efx_nic *efx,
810 enum efx_filter_priority priority,
811 u32 filter_id, struct efx_filter_spec *spec_buf)
813 struct efx_filter_state *state = efx->filter_state;
814 enum efx_filter_table_id table_id;
815 struct efx_filter_table *table;
816 struct efx_filter_spec *spec;
817 unsigned int filter_idx;
821 table_id = efx_filter_id_table_id(filter_id);
822 if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
824 table = &state->table[table_id];
826 filter_idx = efx_filter_id_index(filter_id);
827 if (filter_idx >= table->size)
829 spec = &table->spec[filter_idx];
831 filter_flags = efx_filter_id_flags(filter_id);
833 spin_lock_bh(&state->lock);
835 if (test_bit(filter_idx, table->used_bitmap) &&
836 spec->priority == priority) {
843 spin_unlock_bh(&state->lock);
848 static void efx_filter_table_clear(struct efx_nic *efx,
849 enum efx_filter_table_id table_id,
850 enum efx_filter_priority priority)
852 struct efx_filter_state *state = efx->filter_state;
853 struct efx_filter_table *table = &state->table[table_id];
854 unsigned int filter_idx;
856 spin_lock_bh(&state->lock);
858 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
859 if (table->spec[filter_idx].priority <= priority)
860 efx_filter_table_clear_entry(efx, table, filter_idx);
861 if (table->used == 0)
862 efx_filter_table_reset_search_depth(table);
864 spin_unlock_bh(&state->lock);
868 * efx_filter_clear_rx - remove RX filters by priority
869 * @efx: NIC from which to remove the filters
870 * @priority: Maximum priority to remove
872 void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
874 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
875 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
878 u32 efx_filter_count_rx_used(struct efx_nic *efx,
879 enum efx_filter_priority priority)
881 struct efx_filter_state *state = efx->filter_state;
882 enum efx_filter_table_id table_id;
883 struct efx_filter_table *table;
884 unsigned int filter_idx;
887 spin_lock_bh(&state->lock);
889 for (table_id = EFX_FILTER_TABLE_RX_IP;
890 table_id <= EFX_FILTER_TABLE_RX_DEF;
892 table = &state->table[table_id];
893 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
894 if (test_bit(filter_idx, table->used_bitmap) &&
895 table->spec[filter_idx].priority == priority)
900 spin_unlock_bh(&state->lock);
905 s32 efx_filter_get_rx_ids(struct efx_nic *efx,
906 enum efx_filter_priority priority,
909 struct efx_filter_state *state = efx->filter_state;
910 enum efx_filter_table_id table_id;
911 struct efx_filter_table *table;
912 unsigned int filter_idx;
915 spin_lock_bh(&state->lock);
917 for (table_id = EFX_FILTER_TABLE_RX_IP;
918 table_id <= EFX_FILTER_TABLE_RX_DEF;
920 table = &state->table[table_id];
921 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
922 if (test_bit(filter_idx, table->used_bitmap) &&
923 table->spec[filter_idx].priority == priority) {
928 buf[count++] = efx_filter_make_id(
929 &table->spec[filter_idx], filter_idx);
934 spin_unlock_bh(&state->lock);
939 /* Restore filter stater after reset */
940 void efx_restore_filters(struct efx_nic *efx)
942 struct efx_filter_state *state = efx->filter_state;
943 enum efx_filter_table_id table_id;
944 struct efx_filter_table *table;
946 unsigned int filter_idx;
948 spin_lock_bh(&state->lock);
950 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
951 table = &state->table[table_id];
953 /* Check whether this is a regular register table */
954 if (table->step == 0)
957 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
958 if (!test_bit(filter_idx, table->used_bitmap))
960 efx_filter_build(&filter, &table->spec[filter_idx]);
961 efx_writeo(efx, &filter,
962 table->offset + table->step * filter_idx);
966 efx_filter_push_rx_config(efx);
967 efx_filter_push_tx_limits(efx);
969 spin_unlock_bh(&state->lock);
972 int efx_probe_filters(struct efx_nic *efx)
974 struct efx_filter_state *state;
975 struct efx_filter_table *table;
978 state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
981 efx->filter_state = state;
983 spin_lock_init(&state->lock);
985 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
986 #ifdef CONFIG_RFS_ACCEL
987 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
988 sizeof(*state->rps_flow_id),
990 if (!state->rps_flow_id)
993 table = &state->table[EFX_FILTER_TABLE_RX_IP];
994 table->id = EFX_FILTER_TABLE_RX_IP;
995 table->offset = FR_BZ_RX_FILTER_TBL0;
996 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
997 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
1000 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1001 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
1002 table->id = EFX_FILTER_TABLE_RX_MAC;
1003 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
1004 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
1005 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
1007 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
1008 table->id = EFX_FILTER_TABLE_RX_DEF;
1009 table->size = EFX_FILTER_SIZE_RX_DEF;
1011 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
1012 table->id = EFX_FILTER_TABLE_TX_MAC;
1013 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
1014 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
1015 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
1018 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1019 table = &state->table[table_id];
1020 if (table->size == 0)
1022 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
1023 sizeof(unsigned long),
1025 if (!table->used_bitmap)
1027 table->spec = vzalloc(table->size * sizeof(*table->spec));
1032 if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
1033 /* RX default filters must always exist */
1035 for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
1036 efx_filter_reset_rx_def(efx, i);
1039 efx_filter_push_rx_config(efx);
1044 efx_remove_filters(efx);
1048 void efx_remove_filters(struct efx_nic *efx)
1050 struct efx_filter_state *state = efx->filter_state;
1051 enum efx_filter_table_id table_id;
1053 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1054 kfree(state->table[table_id].used_bitmap);
1055 vfree(state->table[table_id].spec);
1057 #ifdef CONFIG_RFS_ACCEL
1058 kfree(state->rps_flow_id);
1063 #ifdef CONFIG_RFS_ACCEL
1065 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1066 u16 rxq_index, u32 flow_id)
1068 struct efx_nic *efx = netdev_priv(net_dev);
1069 struct efx_channel *channel;
1070 struct efx_filter_state *state = efx->filter_state;
1071 struct efx_filter_spec spec;
1072 const struct iphdr *ip;
1073 const __be16 *ports;
1077 nhoff = skb_network_offset(skb);
1079 if (skb->protocol != htons(ETH_P_IP))
1080 return -EPROTONOSUPPORT;
1082 /* RFS must validate the IP header length before calling us */
1083 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
1084 ip = (const struct iphdr *)(skb->data + nhoff);
1085 if (ip_is_fragment(ip))
1086 return -EPROTONOSUPPORT;
1087 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1088 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1090 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
1091 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1092 ip->daddr, ports[1], ip->saddr, ports[0]);
1096 rc = efx_filter_insert_filter(efx, &spec, true);
1100 /* Remember this so we can check whether to expire the filter later */
1101 state->rps_flow_id[rc] = flow_id;
1102 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
1103 ++channel->rfs_filters_added;
1105 netif_info(efx, rx_status, efx->net_dev,
1106 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
1107 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
1108 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
1109 rxq_index, flow_id, rc);
1114 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
1116 struct efx_filter_state *state = efx->filter_state;
1117 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
1118 unsigned mask = table->size - 1;
1122 if (!spin_trylock_bh(&state->lock))
1125 index = state->rps_expire_index;
1126 stop = (index + quota) & mask;
1128 while (index != stop) {
1129 if (test_bit(index, table->used_bitmap) &&
1130 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
1131 rps_may_expire_flow(efx->net_dev,
1132 table->spec[index].dmaq_id,
1133 state->rps_flow_id[index], index)) {
1134 netif_info(efx, rx_status, efx->net_dev,
1135 "expiring filter %d [flow %u]\n",
1136 index, state->rps_flow_id[index]);
1137 efx_filter_table_clear_entry(efx, table, index);
1139 index = (index + 1) & mask;
1142 state->rps_expire_index = stop;
1143 if (table->used == 0)
1144 efx_filter_table_reset_search_depth(table);
1146 spin_unlock_bh(&state->lock);
1150 #endif /* CONFIG_RFS_ACCEL */