1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2010 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10 #include <linux/in.h>
11 #include <net/ip.h>
12 #include "efx.h"
13 #include "filter.h"
14 #include "io.h"
15 #include "nic.h"
16 #include "regs.h"
17
18 /* "Fudge factors" - difference between programmed value and actual depth.
19 * Due to pipelined implementation we need to program H/W with a value that
20 * is larger than the hop limit we want.
21 */
22 #define FILTER_CTL_SRCH_FUDGE_WILD 3
23 #define FILTER_CTL_SRCH_FUDGE_FULL 1
24
25 /* Hard maximum hop limit. Hardware will time-out beyond 200-something.
26 * We also need to avoid infinite loops in efx_filter_search() when the
27 * table is full.
28 */
29 #define FILTER_CTL_SRCH_MAX 200
30
31 /* Don't try very hard to find space for performance hints, as this is
32 * counter-productive. */
33 #define FILTER_CTL_SRCH_HINT_MAX 5
34
35 enum efx_filter_table_id {
36 EFX_FILTER_TABLE_RX_IP = 0,
37 EFX_FILTER_TABLE_RX_MAC,
38 EFX_FILTER_TABLE_RX_DEF,
39 EFX_FILTER_TABLE_TX_MAC,
40 EFX_FILTER_TABLE_COUNT,
41 };
42
43 enum efx_filter_index {
44 EFX_FILTER_INDEX_UC_DEF,
45 EFX_FILTER_INDEX_MC_DEF,
46 EFX_FILTER_SIZE_RX_DEF,
47 };
48
49 struct efx_filter_table {
50 enum efx_filter_table_id id;
51 u32 offset; /* address of table relative to BAR */
52 unsigned size; /* number of entries */
53 unsigned step; /* step between entries */
54 unsigned used; /* number currently used */
55 unsigned long *used_bitmap;
56 struct efx_filter_spec *spec;
57 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
58 };
59
60 struct efx_filter_state {
61 spinlock_t lock;
62 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
63 #ifdef CONFIG_RFS_ACCEL
64 u32 *rps_flow_id;
65 unsigned rps_expire_index;
66 #endif
67 };
68
69 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
70 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
efx_filter_hash(u32 key)71 static u16 efx_filter_hash(u32 key)
72 {
73 u16 tmp;
74
75 /* First 16 rounds */
76 tmp = 0x1fff ^ key >> 16;
77 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
78 tmp = tmp ^ tmp >> 9;
79 /* Last 16 rounds */
80 tmp = tmp ^ tmp << 13 ^ key;
81 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
82 return tmp ^ tmp >> 9;
83 }
84
85 /* To allow for hash collisions, filter search continues at these
86 * increments from the first possible entry selected by the hash. */
efx_filter_increment(u32 key)87 static u16 efx_filter_increment(u32 key)
88 {
89 return key * 2 - 1;
90 }
91
92 static enum efx_filter_table_id
efx_filter_spec_table_id(const struct efx_filter_spec * spec)93 efx_filter_spec_table_id(const struct efx_filter_spec *spec)
94 {
95 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
96 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
97 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
98 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
99 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
100 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
101 BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
102 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
103 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
104 }
105
106 static struct efx_filter_table *
efx_filter_spec_table(struct efx_filter_state * state,const struct efx_filter_spec * spec)107 efx_filter_spec_table(struct efx_filter_state *state,
108 const struct efx_filter_spec *spec)
109 {
110 if (spec->type == EFX_FILTER_UNSPEC)
111 return NULL;
112 else
113 return &state->table[efx_filter_spec_table_id(spec)];
114 }
115
efx_filter_table_reset_search_depth(struct efx_filter_table * table)116 static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
117 {
118 memset(table->search_depth, 0, sizeof(table->search_depth));
119 }
120
efx_filter_push_rx_config(struct efx_nic * efx)121 static void efx_filter_push_rx_config(struct efx_nic *efx)
122 {
123 struct efx_filter_state *state = efx->filter_state;
124 struct efx_filter_table *table;
125 efx_oword_t filter_ctl;
126
127 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
128
129 table = &state->table[EFX_FILTER_TABLE_RX_IP];
130 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
131 table->search_depth[EFX_FILTER_TCP_FULL] +
132 FILTER_CTL_SRCH_FUDGE_FULL);
133 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
134 table->search_depth[EFX_FILTER_TCP_WILD] +
135 FILTER_CTL_SRCH_FUDGE_WILD);
136 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
137 table->search_depth[EFX_FILTER_UDP_FULL] +
138 FILTER_CTL_SRCH_FUDGE_FULL);
139 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
140 table->search_depth[EFX_FILTER_UDP_WILD] +
141 FILTER_CTL_SRCH_FUDGE_WILD);
142
143 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
144 if (table->size) {
145 EFX_SET_OWORD_FIELD(
146 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
147 table->search_depth[EFX_FILTER_MAC_FULL] +
148 FILTER_CTL_SRCH_FUDGE_FULL);
149 EFX_SET_OWORD_FIELD(
150 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
151 table->search_depth[EFX_FILTER_MAC_WILD] +
152 FILTER_CTL_SRCH_FUDGE_WILD);
153 }
154
155 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
156 if (table->size) {
157 EFX_SET_OWORD_FIELD(
158 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
159 table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
160 EFX_SET_OWORD_FIELD(
161 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
162 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
163 EFX_FILTER_FLAG_RX_RSS));
164 EFX_SET_OWORD_FIELD(
165 filter_ctl, FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE,
166 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
167 EFX_FILTER_FLAG_RX_OVERRIDE_IP));
168 EFX_SET_OWORD_FIELD(
169 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
170 table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
171 EFX_SET_OWORD_FIELD(
172 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
173 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
174 EFX_FILTER_FLAG_RX_RSS));
175 EFX_SET_OWORD_FIELD(
176 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE,
177 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
178 EFX_FILTER_FLAG_RX_OVERRIDE_IP));
179 }
180
181 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
182 }
183
efx_filter_push_tx_limits(struct efx_nic * efx)184 static void efx_filter_push_tx_limits(struct efx_nic *efx)
185 {
186 struct efx_filter_state *state = efx->filter_state;
187 struct efx_filter_table *table;
188 efx_oword_t tx_cfg;
189
190 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
191
192 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
193 if (table->size) {
194 EFX_SET_OWORD_FIELD(
195 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
196 table->search_depth[EFX_FILTER_MAC_FULL] +
197 FILTER_CTL_SRCH_FUDGE_FULL);
198 EFX_SET_OWORD_FIELD(
199 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
200 table->search_depth[EFX_FILTER_MAC_WILD] +
201 FILTER_CTL_SRCH_FUDGE_WILD);
202 }
203
204 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
205 }
206
__efx_filter_set_ipv4(struct efx_filter_spec * spec,__be32 host1,__be16 port1,__be32 host2,__be16 port2)207 static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
208 __be32 host1, __be16 port1,
209 __be32 host2, __be16 port2)
210 {
211 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
212 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
213 spec->data[2] = ntohl(host2);
214 }
215
__efx_filter_get_ipv4(const struct efx_filter_spec * spec,__be32 * host1,__be16 * port1,__be32 * host2,__be16 * port2)216 static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
217 __be32 *host1, __be16 *port1,
218 __be32 *host2, __be16 *port2)
219 {
220 *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
221 *port1 = htons(spec->data[0]);
222 *host2 = htonl(spec->data[2]);
223 *port2 = htons(spec->data[1] >> 16);
224 }
225
226 /**
227 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
228 * @spec: Specification to initialise
229 * @proto: Transport layer protocol number
230 * @host: Local host address (network byte order)
231 * @port: Local port (network byte order)
232 */
efx_filter_set_ipv4_local(struct efx_filter_spec * spec,u8 proto,__be32 host,__be16 port)233 int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
234 __be32 host, __be16 port)
235 {
236 __be32 host1;
237 __be16 port1;
238
239 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
240
241 /* This cannot currently be combined with other filtering */
242 if (spec->type != EFX_FILTER_UNSPEC)
243 return -EPROTONOSUPPORT;
244
245 if (port == 0)
246 return -EINVAL;
247
248 switch (proto) {
249 case IPPROTO_TCP:
250 spec->type = EFX_FILTER_TCP_WILD;
251 break;
252 case IPPROTO_UDP:
253 spec->type = EFX_FILTER_UDP_WILD;
254 break;
255 default:
256 return -EPROTONOSUPPORT;
257 }
258
259 /* Filter is constructed in terms of source and destination,
260 * with the odd wrinkle that the ports are swapped in a UDP
261 * wildcard filter. We need to convert from local and remote
262 * (= zero for wildcard) addresses.
263 */
264 host1 = 0;
265 if (proto != IPPROTO_UDP) {
266 port1 = 0;
267 } else {
268 port1 = port;
269 port = 0;
270 }
271
272 __efx_filter_set_ipv4(spec, host1, port1, host, port);
273 return 0;
274 }
275
efx_filter_get_ipv4_local(const struct efx_filter_spec * spec,u8 * proto,__be32 * host,__be16 * port)276 int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
277 u8 *proto, __be32 *host, __be16 *port)
278 {
279 __be32 host1;
280 __be16 port1;
281
282 switch (spec->type) {
283 case EFX_FILTER_TCP_WILD:
284 *proto = IPPROTO_TCP;
285 __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
286 return 0;
287 case EFX_FILTER_UDP_WILD:
288 *proto = IPPROTO_UDP;
289 __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
290 return 0;
291 default:
292 return -EINVAL;
293 }
294 }
295
296 /**
297 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
298 * @spec: Specification to initialise
299 * @proto: Transport layer protocol number
300 * @host: Local host address (network byte order)
301 * @port: Local port (network byte order)
302 * @rhost: Remote host address (network byte order)
303 * @rport: Remote port (network byte order)
304 */
efx_filter_set_ipv4_full(struct efx_filter_spec * spec,u8 proto,__be32 host,__be16 port,__be32 rhost,__be16 rport)305 int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
306 __be32 host, __be16 port,
307 __be32 rhost, __be16 rport)
308 {
309 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
310
311 /* This cannot currently be combined with other filtering */
312 if (spec->type != EFX_FILTER_UNSPEC)
313 return -EPROTONOSUPPORT;
314
315 if (port == 0 || rport == 0)
316 return -EINVAL;
317
318 switch (proto) {
319 case IPPROTO_TCP:
320 spec->type = EFX_FILTER_TCP_FULL;
321 break;
322 case IPPROTO_UDP:
323 spec->type = EFX_FILTER_UDP_FULL;
324 break;
325 default:
326 return -EPROTONOSUPPORT;
327 }
328
329 __efx_filter_set_ipv4(spec, rhost, rport, host, port);
330 return 0;
331 }
332
efx_filter_get_ipv4_full(const struct efx_filter_spec * spec,u8 * proto,__be32 * host,__be16 * port,__be32 * rhost,__be16 * rport)333 int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
334 u8 *proto, __be32 *host, __be16 *port,
335 __be32 *rhost, __be16 *rport)
336 {
337 switch (spec->type) {
338 case EFX_FILTER_TCP_FULL:
339 *proto = IPPROTO_TCP;
340 break;
341 case EFX_FILTER_UDP_FULL:
342 *proto = IPPROTO_UDP;
343 break;
344 default:
345 return -EINVAL;
346 }
347
348 __efx_filter_get_ipv4(spec, rhost, rport, host, port);
349 return 0;
350 }
351
352 /**
353 * efx_filter_set_eth_local - specify local Ethernet address and optional VID
354 * @spec: Specification to initialise
355 * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
356 * @addr: Local Ethernet MAC address
357 */
efx_filter_set_eth_local(struct efx_filter_spec * spec,u16 vid,const u8 * addr)358 int efx_filter_set_eth_local(struct efx_filter_spec *spec,
359 u16 vid, const u8 *addr)
360 {
361 EFX_BUG_ON_PARANOID(!(spec->flags &
362 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
363
364 /* This cannot currently be combined with other filtering */
365 if (spec->type != EFX_FILTER_UNSPEC)
366 return -EPROTONOSUPPORT;
367
368 if (vid == EFX_FILTER_VID_UNSPEC) {
369 spec->type = EFX_FILTER_MAC_WILD;
370 spec->data[0] = 0;
371 } else {
372 spec->type = EFX_FILTER_MAC_FULL;
373 spec->data[0] = vid;
374 }
375
376 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
377 spec->data[2] = addr[0] << 8 | addr[1];
378 return 0;
379 }
380
381 /**
382 * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
383 * @spec: Specification to initialise
384 */
efx_filter_set_uc_def(struct efx_filter_spec * spec)385 int efx_filter_set_uc_def(struct efx_filter_spec *spec)
386 {
387 EFX_BUG_ON_PARANOID(!(spec->flags &
388 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
389
390 if (spec->type != EFX_FILTER_UNSPEC)
391 return -EINVAL;
392
393 spec->type = EFX_FILTER_UC_DEF;
394 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
395 return 0;
396 }
397
398 /**
399 * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
400 * @spec: Specification to initialise
401 */
efx_filter_set_mc_def(struct efx_filter_spec * spec)402 int efx_filter_set_mc_def(struct efx_filter_spec *spec)
403 {
404 EFX_BUG_ON_PARANOID(!(spec->flags &
405 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
406
407 if (spec->type != EFX_FILTER_UNSPEC)
408 return -EINVAL;
409
410 spec->type = EFX_FILTER_MC_DEF;
411 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
412 return 0;
413 }
414
efx_filter_reset_rx_def(struct efx_nic * efx,unsigned filter_idx)415 static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
416 {
417 struct efx_filter_state *state = efx->filter_state;
418 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
419 struct efx_filter_spec *spec = &table->spec[filter_idx];
420
421 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL,
422 EFX_FILTER_FLAG_RX_RSS, 0);
423 spec->type = EFX_FILTER_UC_DEF + filter_idx;
424 table->used_bitmap[0] |= 1 << filter_idx;
425 }
426
efx_filter_get_eth_local(const struct efx_filter_spec * spec,u16 * vid,u8 * addr)427 int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
428 u16 *vid, u8 *addr)
429 {
430 switch (spec->type) {
431 case EFX_FILTER_MAC_WILD:
432 *vid = EFX_FILTER_VID_UNSPEC;
433 break;
434 case EFX_FILTER_MAC_FULL:
435 *vid = spec->data[0];
436 break;
437 default:
438 return -EINVAL;
439 }
440
441 addr[0] = spec->data[2] >> 8;
442 addr[1] = spec->data[2];
443 addr[2] = spec->data[1] >> 24;
444 addr[3] = spec->data[1] >> 16;
445 addr[4] = spec->data[1] >> 8;
446 addr[5] = spec->data[1];
447 return 0;
448 }
449
450 /* Build a filter entry and return its n-tuple key. */
efx_filter_build(efx_oword_t * filter,struct efx_filter_spec * spec)451 static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
452 {
453 u32 data3;
454
455 switch (efx_filter_spec_table_id(spec)) {
456 case EFX_FILTER_TABLE_RX_IP: {
457 bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
458 spec->type == EFX_FILTER_UDP_WILD);
459 EFX_POPULATE_OWORD_7(
460 *filter,
461 FRF_BZ_RSS_EN,
462 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
463 FRF_BZ_SCATTER_EN,
464 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
465 FRF_BZ_TCP_UDP, is_udp,
466 FRF_BZ_RXQ_ID, spec->dmaq_id,
467 EFX_DWORD_2, spec->data[2],
468 EFX_DWORD_1, spec->data[1],
469 EFX_DWORD_0, spec->data[0]);
470 data3 = is_udp;
471 break;
472 }
473
474 case EFX_FILTER_TABLE_RX_DEF:
475 /* One filter spec per type */
476 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
477 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
478 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
479 return spec->type - EFX_FILTER_UC_DEF;
480
481 case EFX_FILTER_TABLE_RX_MAC: {
482 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
483 EFX_POPULATE_OWORD_8(
484 *filter,
485 FRF_CZ_RMFT_RSS_EN,
486 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
487 FRF_CZ_RMFT_SCATTER_EN,
488 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
489 FRF_CZ_RMFT_IP_OVERRIDE,
490 !!(spec->flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP),
491 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
492 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
493 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
494 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
495 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
496 data3 = is_wild;
497 break;
498 }
499
500 case EFX_FILTER_TABLE_TX_MAC: {
501 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
502 EFX_POPULATE_OWORD_5(*filter,
503 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
504 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
505 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
506 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
507 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
508 data3 = is_wild | spec->dmaq_id << 1;
509 break;
510 }
511
512 default:
513 BUG();
514 }
515
516 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
517 }
518
efx_filter_equal(const struct efx_filter_spec * left,const struct efx_filter_spec * right)519 static bool efx_filter_equal(const struct efx_filter_spec *left,
520 const struct efx_filter_spec *right)
521 {
522 if (left->type != right->type ||
523 memcmp(left->data, right->data, sizeof(left->data)))
524 return false;
525
526 if (left->flags & EFX_FILTER_FLAG_TX &&
527 left->dmaq_id != right->dmaq_id)
528 return false;
529
530 return true;
531 }
532
efx_filter_search(struct efx_filter_table * table,struct efx_filter_spec * spec,u32 key,bool for_insert,unsigned int * depth_required)533 static int efx_filter_search(struct efx_filter_table *table,
534 struct efx_filter_spec *spec, u32 key,
535 bool for_insert, unsigned int *depth_required)
536 {
537 unsigned hash, incr, filter_idx, depth, depth_max;
538
539 hash = efx_filter_hash(key);
540 incr = efx_filter_increment(key);
541
542 filter_idx = hash & (table->size - 1);
543 depth = 1;
544 depth_max = (for_insert ?
545 (spec->priority <= EFX_FILTER_PRI_HINT ?
546 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
547 table->search_depth[spec->type]);
548
549 for (;;) {
550 /* Return success if entry is used and matches this spec
551 * or entry is unused and we are trying to insert.
552 */
553 if (test_bit(filter_idx, table->used_bitmap) ?
554 efx_filter_equal(spec, &table->spec[filter_idx]) :
555 for_insert) {
556 *depth_required = depth;
557 return filter_idx;
558 }
559
560 /* Return failure if we reached the maximum search depth */
561 if (depth == depth_max)
562 return for_insert ? -EBUSY : -ENOENT;
563
564 filter_idx = (filter_idx + incr) & (table->size - 1);
565 ++depth;
566 }
567 }
568
569 /*
570 * Construct/deconstruct external filter IDs. These must be ordered
571 * by matching priority, for RX NFC semantics.
572 *
573 * Each RX MAC filter entry has a flag for whether it can override an
574 * RX IP filter that also matches. So we assign locations for MAC
575 * filters with overriding behaviour, then for IP filters, then for
576 * MAC filters without overriding behaviour.
577 */
578
579 #define EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP 0
580 #define EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP 1
581 #define EFX_FILTER_MATCH_PRI_NORMAL_BASE 2
582
583 #define EFX_FILTER_INDEX_WIDTH 13
584 #define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
585
efx_filter_make_id(enum efx_filter_table_id table_id,unsigned int index,u8 flags)586 static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
587 unsigned int index, u8 flags)
588 {
589 unsigned int match_pri = EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id;
590
591 if (flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) {
592 if (table_id == EFX_FILTER_TABLE_RX_MAC)
593 match_pri = EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP;
594 else if (table_id == EFX_FILTER_TABLE_RX_DEF)
595 match_pri = EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP;
596 }
597
598 return match_pri << EFX_FILTER_INDEX_WIDTH | index;
599 }
600
efx_filter_id_table_id(u32 id)601 static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
602 {
603 unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
604
605 switch (match_pri) {
606 case EFX_FILTER_MATCH_PRI_RX_MAC_OVERRIDE_IP:
607 return EFX_FILTER_TABLE_RX_MAC;
608 case EFX_FILTER_MATCH_PRI_RX_DEF_OVERRIDE_IP:
609 return EFX_FILTER_TABLE_RX_DEF;
610 default:
611 return match_pri - EFX_FILTER_MATCH_PRI_NORMAL_BASE;
612 }
613 }
614
efx_filter_id_index(u32 id)615 static inline unsigned int efx_filter_id_index(u32 id)
616 {
617 return id & EFX_FILTER_INDEX_MASK;
618 }
619
efx_filter_id_flags(u32 id)620 static inline u8 efx_filter_id_flags(u32 id)
621 {
622 unsigned int match_pri = id >> EFX_FILTER_INDEX_WIDTH;
623
624 if (match_pri < EFX_FILTER_MATCH_PRI_NORMAL_BASE)
625 return EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP;
626 else if (match_pri <=
627 EFX_FILTER_MATCH_PRI_NORMAL_BASE + EFX_FILTER_TABLE_RX_DEF)
628 return EFX_FILTER_FLAG_RX;
629 else
630 return EFX_FILTER_FLAG_TX;
631 }
632
efx_filter_get_rx_id_limit(struct efx_nic * efx)633 u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
634 {
635 struct efx_filter_state *state = efx->filter_state;
636 unsigned int table_id = EFX_FILTER_TABLE_RX_DEF;
637
638 do {
639 if (state->table[table_id].size != 0)
640 return ((EFX_FILTER_MATCH_PRI_NORMAL_BASE + table_id)
641 << EFX_FILTER_INDEX_WIDTH) +
642 state->table[table_id].size;
643 } while (table_id--);
644
645 return 0;
646 }
647
648 /**
649 * efx_filter_insert_filter - add or replace a filter
650 * @efx: NIC in which to insert the filter
651 * @spec: Specification for the filter
652 * @replace: Flag for whether the specified filter may replace a filter
653 * with an identical match expression and equal or lower priority
654 *
655 * On success, return the filter ID.
656 * On failure, return a negative error code.
657 */
efx_filter_insert_filter(struct efx_nic * efx,struct efx_filter_spec * spec,bool replace)658 s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
659 bool replace)
660 {
661 struct efx_filter_state *state = efx->filter_state;
662 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
663 struct efx_filter_spec *saved_spec;
664 efx_oword_t filter;
665 unsigned int filter_idx, depth;
666 u32 key;
667 int rc;
668
669 if (!table || table->size == 0)
670 return -EINVAL;
671
672 key = efx_filter_build(&filter, spec);
673
674 netif_vdbg(efx, hw, efx->net_dev,
675 "%s: type %d search_depth=%d", __func__, spec->type,
676 table->search_depth[spec->type]);
677
678 spin_lock_bh(&state->lock);
679
680 rc = efx_filter_search(table, spec, key, true, &depth);
681 if (rc < 0)
682 goto out;
683 filter_idx = rc;
684 BUG_ON(filter_idx >= table->size);
685 saved_spec = &table->spec[filter_idx];
686
687 if (test_bit(filter_idx, table->used_bitmap)) {
688 /* Should we replace the existing filter? */
689 if (!replace) {
690 rc = -EEXIST;
691 goto out;
692 }
693 if (spec->priority < saved_spec->priority) {
694 rc = -EPERM;
695 goto out;
696 }
697 } else {
698 __set_bit(filter_idx, table->used_bitmap);
699 ++table->used;
700 }
701 *saved_spec = *spec;
702
703 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
704 efx_filter_push_rx_config(efx);
705 } else {
706 if (table->search_depth[spec->type] < depth) {
707 table->search_depth[spec->type] = depth;
708 if (spec->flags & EFX_FILTER_FLAG_TX)
709 efx_filter_push_tx_limits(efx);
710 else
711 efx_filter_push_rx_config(efx);
712 }
713
714 efx_writeo(efx, &filter,
715 table->offset + table->step * filter_idx);
716 }
717
718 netif_vdbg(efx, hw, efx->net_dev,
719 "%s: filter type %d index %d rxq %u set",
720 __func__, spec->type, filter_idx, spec->dmaq_id);
721 rc = efx_filter_make_id(table->id, filter_idx, spec->flags);
722
723 out:
724 spin_unlock_bh(&state->lock);
725 return rc;
726 }
727
efx_filter_table_clear_entry(struct efx_nic * efx,struct efx_filter_table * table,unsigned int filter_idx)728 static void efx_filter_table_clear_entry(struct efx_nic *efx,
729 struct efx_filter_table *table,
730 unsigned int filter_idx)
731 {
732 static efx_oword_t filter;
733
734 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
735 /* RX default filters must always exist */
736 efx_filter_reset_rx_def(efx, filter_idx);
737 efx_filter_push_rx_config(efx);
738 } else if (test_bit(filter_idx, table->used_bitmap)) {
739 __clear_bit(filter_idx, table->used_bitmap);
740 --table->used;
741 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
742
743 efx_writeo(efx, &filter,
744 table->offset + table->step * filter_idx);
745 }
746 }
747
748 /**
749 * efx_filter_remove_id_safe - remove a filter by ID, carefully
750 * @efx: NIC from which to remove the filter
751 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
752 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
753 *
754 * This function will range-check @filter_id, so it is safe to call
755 * with a value passed from userland.
756 */
efx_filter_remove_id_safe(struct efx_nic * efx,enum efx_filter_priority priority,u32 filter_id)757 int efx_filter_remove_id_safe(struct efx_nic *efx,
758 enum efx_filter_priority priority,
759 u32 filter_id)
760 {
761 struct efx_filter_state *state = efx->filter_state;
762 enum efx_filter_table_id table_id;
763 struct efx_filter_table *table;
764 unsigned int filter_idx;
765 struct efx_filter_spec *spec;
766 u8 filter_flags;
767 int rc;
768
769 table_id = efx_filter_id_table_id(filter_id);
770 if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
771 return -ENOENT;
772 table = &state->table[table_id];
773
774 filter_idx = efx_filter_id_index(filter_id);
775 if (filter_idx >= table->size)
776 return -ENOENT;
777 spec = &table->spec[filter_idx];
778
779 filter_flags = efx_filter_id_flags(filter_id);
780
781 spin_lock_bh(&state->lock);
782
783 if (test_bit(filter_idx, table->used_bitmap) &&
784 spec->priority == priority &&
785 !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
786 efx_filter_table_clear_entry(efx, table, filter_idx);
787 if (table->used == 0)
788 efx_filter_table_reset_search_depth(table);
789 rc = 0;
790 } else {
791 rc = -ENOENT;
792 }
793
794 spin_unlock_bh(&state->lock);
795
796 return rc;
797 }
798
799 /**
800 * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
801 * @efx: NIC from which to remove the filter
802 * @priority: Priority of filter, as passed to @efx_filter_insert_filter
803 * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
804 * @spec: Buffer in which to store filter specification
805 *
806 * This function will range-check @filter_id, so it is safe to call
807 * with a value passed from userland.
808 */
efx_filter_get_filter_safe(struct efx_nic * efx,enum efx_filter_priority priority,u32 filter_id,struct efx_filter_spec * spec_buf)809 int efx_filter_get_filter_safe(struct efx_nic *efx,
810 enum efx_filter_priority priority,
811 u32 filter_id, struct efx_filter_spec *spec_buf)
812 {
813 struct efx_filter_state *state = efx->filter_state;
814 enum efx_filter_table_id table_id;
815 struct efx_filter_table *table;
816 struct efx_filter_spec *spec;
817 unsigned int filter_idx;
818 u8 filter_flags;
819 int rc;
820
821 table_id = efx_filter_id_table_id(filter_id);
822 if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
823 return -ENOENT;
824 table = &state->table[table_id];
825
826 filter_idx = efx_filter_id_index(filter_id);
827 if (filter_idx >= table->size)
828 return -ENOENT;
829 spec = &table->spec[filter_idx];
830
831 filter_flags = efx_filter_id_flags(filter_id);
832
833 spin_lock_bh(&state->lock);
834
835 if (test_bit(filter_idx, table->used_bitmap) &&
836 spec->priority == priority &&
837 !((spec->flags ^ filter_flags) & EFX_FILTER_FLAG_RX_OVERRIDE_IP)) {
838 *spec_buf = *spec;
839 rc = 0;
840 } else {
841 rc = -ENOENT;
842 }
843
844 spin_unlock_bh(&state->lock);
845
846 return rc;
847 }
848
efx_filter_table_clear(struct efx_nic * efx,enum efx_filter_table_id table_id,enum efx_filter_priority priority)849 static void efx_filter_table_clear(struct efx_nic *efx,
850 enum efx_filter_table_id table_id,
851 enum efx_filter_priority priority)
852 {
853 struct efx_filter_state *state = efx->filter_state;
854 struct efx_filter_table *table = &state->table[table_id];
855 unsigned int filter_idx;
856
857 spin_lock_bh(&state->lock);
858
859 for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
860 if (table->spec[filter_idx].priority <= priority)
861 efx_filter_table_clear_entry(efx, table, filter_idx);
862 if (table->used == 0)
863 efx_filter_table_reset_search_depth(table);
864
865 spin_unlock_bh(&state->lock);
866 }
867
868 /**
869 * efx_filter_clear_rx - remove RX filters by priority
870 * @efx: NIC from which to remove the filters
871 * @priority: Maximum priority to remove
872 */
efx_filter_clear_rx(struct efx_nic * efx,enum efx_filter_priority priority)873 void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
874 {
875 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
876 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
877 }
878
efx_filter_count_rx_used(struct efx_nic * efx,enum efx_filter_priority priority)879 u32 efx_filter_count_rx_used(struct efx_nic *efx,
880 enum efx_filter_priority priority)
881 {
882 struct efx_filter_state *state = efx->filter_state;
883 enum efx_filter_table_id table_id;
884 struct efx_filter_table *table;
885 unsigned int filter_idx;
886 u32 count = 0;
887
888 spin_lock_bh(&state->lock);
889
890 for (table_id = EFX_FILTER_TABLE_RX_IP;
891 table_id <= EFX_FILTER_TABLE_RX_DEF;
892 table_id++) {
893 table = &state->table[table_id];
894 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
895 if (test_bit(filter_idx, table->used_bitmap) &&
896 table->spec[filter_idx].priority == priority)
897 ++count;
898 }
899 }
900
901 spin_unlock_bh(&state->lock);
902
903 return count;
904 }
905
efx_filter_get_rx_ids(struct efx_nic * efx,enum efx_filter_priority priority,u32 * buf,u32 size)906 s32 efx_filter_get_rx_ids(struct efx_nic *efx,
907 enum efx_filter_priority priority,
908 u32 *buf, u32 size)
909 {
910 struct efx_filter_state *state = efx->filter_state;
911 enum efx_filter_table_id table_id;
912 struct efx_filter_table *table;
913 unsigned int filter_idx;
914 s32 count = 0;
915
916 spin_lock_bh(&state->lock);
917
918 for (table_id = EFX_FILTER_TABLE_RX_IP;
919 table_id <= EFX_FILTER_TABLE_RX_DEF;
920 table_id++) {
921 table = &state->table[table_id];
922 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
923 if (test_bit(filter_idx, table->used_bitmap) &&
924 table->spec[filter_idx].priority == priority) {
925 if (count == size) {
926 count = -EMSGSIZE;
927 goto out;
928 }
929 buf[count++] = efx_filter_make_id(
930 table_id, filter_idx,
931 table->spec[filter_idx].flags);
932 }
933 }
934 }
935 out:
936 spin_unlock_bh(&state->lock);
937
938 return count;
939 }
940
941 /* Restore filter stater after reset */
efx_restore_filters(struct efx_nic * efx)942 void efx_restore_filters(struct efx_nic *efx)
943 {
944 struct efx_filter_state *state = efx->filter_state;
945 enum efx_filter_table_id table_id;
946 struct efx_filter_table *table;
947 efx_oword_t filter;
948 unsigned int filter_idx;
949
950 spin_lock_bh(&state->lock);
951
952 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
953 table = &state->table[table_id];
954
955 /* Check whether this is a regular register table */
956 if (table->step == 0)
957 continue;
958
959 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
960 if (!test_bit(filter_idx, table->used_bitmap))
961 continue;
962 efx_filter_build(&filter, &table->spec[filter_idx]);
963 efx_writeo(efx, &filter,
964 table->offset + table->step * filter_idx);
965 }
966 }
967
968 efx_filter_push_rx_config(efx);
969 efx_filter_push_tx_limits(efx);
970
971 spin_unlock_bh(&state->lock);
972 }
973
efx_probe_filters(struct efx_nic * efx)974 int efx_probe_filters(struct efx_nic *efx)
975 {
976 struct efx_filter_state *state;
977 struct efx_filter_table *table;
978 unsigned table_id;
979
980 state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
981 if (!state)
982 return -ENOMEM;
983 efx->filter_state = state;
984
985 spin_lock_init(&state->lock);
986
987 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
988 #ifdef CONFIG_RFS_ACCEL
989 state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
990 sizeof(*state->rps_flow_id),
991 GFP_KERNEL);
992 if (!state->rps_flow_id)
993 goto fail;
994 #endif
995 table = &state->table[EFX_FILTER_TABLE_RX_IP];
996 table->id = EFX_FILTER_TABLE_RX_IP;
997 table->offset = FR_BZ_RX_FILTER_TBL0;
998 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
999 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
1000 }
1001
1002 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1003 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
1004 table->id = EFX_FILTER_TABLE_RX_MAC;
1005 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
1006 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
1007 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
1008
1009 table = &state->table[EFX_FILTER_TABLE_RX_DEF];
1010 table->id = EFX_FILTER_TABLE_RX_DEF;
1011 table->size = EFX_FILTER_SIZE_RX_DEF;
1012
1013 table = &state->table[EFX_FILTER_TABLE_TX_MAC];
1014 table->id = EFX_FILTER_TABLE_TX_MAC;
1015 table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
1016 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
1017 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
1018 }
1019
1020 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1021 table = &state->table[table_id];
1022 if (table->size == 0)
1023 continue;
1024 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
1025 sizeof(unsigned long),
1026 GFP_KERNEL);
1027 if (!table->used_bitmap)
1028 goto fail;
1029 table->spec = vzalloc(table->size * sizeof(*table->spec));
1030 if (!table->spec)
1031 goto fail;
1032 }
1033
1034 if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
1035 /* RX default filters must always exist */
1036 unsigned i;
1037 for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
1038 efx_filter_reset_rx_def(efx, i);
1039 }
1040
1041 efx_filter_push_rx_config(efx);
1042
1043 return 0;
1044
1045 fail:
1046 efx_remove_filters(efx);
1047 return -ENOMEM;
1048 }
1049
efx_remove_filters(struct efx_nic * efx)1050 void efx_remove_filters(struct efx_nic *efx)
1051 {
1052 struct efx_filter_state *state = efx->filter_state;
1053 enum efx_filter_table_id table_id;
1054
1055 for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
1056 kfree(state->table[table_id].used_bitmap);
1057 vfree(state->table[table_id].spec);
1058 }
1059 #ifdef CONFIG_RFS_ACCEL
1060 kfree(state->rps_flow_id);
1061 #endif
1062 kfree(state);
1063 }
1064
1065 #ifdef CONFIG_RFS_ACCEL
1066
efx_filter_rfs(struct net_device * net_dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)1067 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
1068 u16 rxq_index, u32 flow_id)
1069 {
1070 struct efx_nic *efx = netdev_priv(net_dev);
1071 struct efx_channel *channel;
1072 struct efx_filter_state *state = efx->filter_state;
1073 struct efx_filter_spec spec;
1074 const struct iphdr *ip;
1075 const __be16 *ports;
1076 int nhoff;
1077 int rc;
1078
1079 nhoff = skb_network_offset(skb);
1080
1081 if (skb->protocol != htons(ETH_P_IP))
1082 return -EPROTONOSUPPORT;
1083
1084 /* RFS must validate the IP header length before calling us */
1085 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
1086 ip = (const struct iphdr *)(skb->data + nhoff);
1087 if (ip_is_fragment(ip))
1088 return -EPROTONOSUPPORT;
1089 EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
1090 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
1091
1092 efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
1093 rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
1094 ip->daddr, ports[1], ip->saddr, ports[0]);
1095 if (rc)
1096 return rc;
1097
1098 rc = efx_filter_insert_filter(efx, &spec, true);
1099 if (rc < 0)
1100 return rc;
1101
1102 /* Remember this so we can check whether to expire the filter later */
1103 state->rps_flow_id[rc] = flow_id;
1104 channel = efx_get_channel(efx, skb_get_rx_queue(skb));
1105 ++channel->rfs_filters_added;
1106
1107 netif_info(efx, rx_status, efx->net_dev,
1108 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
1109 (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
1110 &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
1111 rxq_index, flow_id, rc);
1112
1113 return rc;
1114 }
1115
__efx_filter_rfs_expire(struct efx_nic * efx,unsigned quota)1116 bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
1117 {
1118 struct efx_filter_state *state = efx->filter_state;
1119 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
1120 unsigned mask = table->size - 1;
1121 unsigned index;
1122 unsigned stop;
1123
1124 if (!spin_trylock_bh(&state->lock))
1125 return false;
1126
1127 index = state->rps_expire_index;
1128 stop = (index + quota) & mask;
1129
1130 while (index != stop) {
1131 if (test_bit(index, table->used_bitmap) &&
1132 table->spec[index].priority == EFX_FILTER_PRI_HINT &&
1133 rps_may_expire_flow(efx->net_dev,
1134 table->spec[index].dmaq_id,
1135 state->rps_flow_id[index], index)) {
1136 netif_info(efx, rx_status, efx->net_dev,
1137 "expiring filter %d [flow %u]\n",
1138 index, state->rps_flow_id[index]);
1139 efx_filter_table_clear_entry(efx, table, index);
1140 }
1141 index = (index + 1) & mask;
1142 }
1143
1144 state->rps_expire_index = stop;
1145 if (table->used == 0)
1146 efx_filter_table_reset_search_depth(table);
1147
1148 spin_unlock_bh(&state->lock);
1149 return true;
1150 }
1151
1152 #endif /* CONFIG_RFS_ACCEL */
1153