1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37
38 #include "mlx5_core.h"
39 #include "fs_core.h"
40 #include "fs_cmd.h"
41 #include "fs_ft_pool.h"
42 #include "diag/fs_tracepoint.h"
43
44 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
45 sizeof(struct init_tree_node))
46
47 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
48 ...) {.type = FS_TYPE_PRIO,\
49 .min_ft_level = min_level_val,\
50 .num_levels = num_levels_val,\
51 .num_leaf_prios = num_prios_val,\
52 .caps = caps_val,\
53 .children = (struct init_tree_node[]) {__VA_ARGS__},\
54 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
55 }
56
57 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
58 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
59 __VA_ARGS__)\
60
61 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
62 .def_miss_action = def_miss_act,\
63 .children = (struct init_tree_node[]) {__VA_ARGS__},\
64 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
65 }
66
67 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
68 sizeof(long))
69
70 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
71
72 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
73 .caps = (long[]) {__VA_ARGS__} }
74
75 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
76 FS_CAP(flow_table_properties_nic_receive.modify_root), \
77 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
78 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
79
80 #define FS_CHAINING_CAPS_EGRESS \
81 FS_REQUIRED_CAPS( \
82 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
83 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
84 FS_CAP(flow_table_properties_nic_transmit \
85 .identified_miss_table_mode), \
86 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
87
88 #define FS_CHAINING_CAPS_RDMA_TX \
89 FS_REQUIRED_CAPS( \
90 FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
91 FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
92 FS_CAP(flow_table_properties_nic_transmit_rdma \
93 .identified_miss_table_mode), \
94 FS_CAP(flow_table_properties_nic_transmit_rdma \
95 .flow_table_modify))
96
97 #define LEFTOVERS_NUM_LEVELS 1
98 #define LEFTOVERS_NUM_PRIOS 1
99
100 #define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
101 #define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
102
103 #define BY_PASS_PRIO_NUM_LEVELS 1
104 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
105 LEFTOVERS_NUM_PRIOS)
106
107 #define ETHTOOL_PRIO_NUM_LEVELS 1
108 #define ETHTOOL_NUM_PRIOS 11
109 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
110 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
111 #define KERNEL_NIC_PRIO_NUM_LEVELS 7
112 #define KERNEL_NIC_NUM_PRIOS 1
113 /* One more level for tc */
114 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
115
116 #define KERNEL_NIC_TC_NUM_PRIOS 1
117 #define KERNEL_NIC_TC_NUM_LEVELS 3
118
119 #define ANCHOR_NUM_LEVELS 1
120 #define ANCHOR_NUM_PRIOS 1
121 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
122
123 #define OFFLOADS_MAX_FT 2
124 #define OFFLOADS_NUM_PRIOS 2
125 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
126
127 #define LAG_PRIO_NUM_LEVELS 1
128 #define LAG_NUM_PRIOS 1
129 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
130
131 #define KERNEL_TX_IPSEC_NUM_PRIOS 1
132 #define KERNEL_TX_IPSEC_NUM_LEVELS 1
133 #define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
134
135 struct node_caps {
136 size_t arr_sz;
137 long *caps;
138 };
139
140 static struct init_tree_node {
141 enum fs_node_type type;
142 struct init_tree_node *children;
143 int ar_size;
144 struct node_caps caps;
145 int min_ft_level;
146 int num_leaf_prios;
147 int prio;
148 int num_levels;
149 enum mlx5_flow_table_miss_action def_miss_action;
150 } root_fs = {
151 .type = FS_TYPE_NAMESPACE,
152 .ar_size = 7,
153 .children = (struct init_tree_node[]){
154 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
155 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
156 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
157 BY_PASS_PRIO_NUM_LEVELS))),
158 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
159 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
160 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
161 LAG_PRIO_NUM_LEVELS))),
162 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
163 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
164 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
165 OFFLOADS_MAX_FT))),
166 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
167 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
168 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
169 ETHTOOL_PRIO_NUM_LEVELS))),
170 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
171 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
172 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
173 KERNEL_NIC_TC_NUM_LEVELS),
174 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
175 KERNEL_NIC_PRIO_NUM_LEVELS))),
176 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
177 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
178 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
179 LEFTOVERS_NUM_LEVELS))),
180 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
181 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
182 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
183 ANCHOR_NUM_LEVELS))),
184 }
185 };
186
187 static struct init_tree_node egress_root_fs = {
188 .type = FS_TYPE_NAMESPACE,
189 .ar_size = 2,
190 .children = (struct init_tree_node[]) {
191 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
192 FS_CHAINING_CAPS_EGRESS,
193 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
194 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
195 BY_PASS_PRIO_NUM_LEVELS))),
196 ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
197 FS_CHAINING_CAPS_EGRESS,
198 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
199 ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
200 KERNEL_TX_IPSEC_NUM_LEVELS))),
201 }
202 };
203
204 enum {
205 RDMA_RX_COUNTERS_PRIO,
206 RDMA_RX_BYPASS_PRIO,
207 RDMA_RX_KERNEL_PRIO,
208 };
209
210 #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
211 #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
212 #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
213
214 static struct init_tree_node rdma_rx_root_fs = {
215 .type = FS_TYPE_NAMESPACE,
216 .ar_size = 3,
217 .children = (struct init_tree_node[]) {
218 [RDMA_RX_COUNTERS_PRIO] =
219 ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
220 FS_CHAINING_CAPS,
221 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
222 ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
223 RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
224 [RDMA_RX_BYPASS_PRIO] =
225 ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
226 FS_CHAINING_CAPS,
227 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
228 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
229 BY_PASS_PRIO_NUM_LEVELS))),
230 [RDMA_RX_KERNEL_PRIO] =
231 ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
232 FS_CHAINING_CAPS,
233 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
234 ADD_MULTIPLE_PRIO(1, 1))),
235 }
236 };
237
238 enum {
239 RDMA_TX_COUNTERS_PRIO,
240 RDMA_TX_BYPASS_PRIO,
241 };
242
243 #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
244 #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
245
246 static struct init_tree_node rdma_tx_root_fs = {
247 .type = FS_TYPE_NAMESPACE,
248 .ar_size = 2,
249 .children = (struct init_tree_node[]) {
250 [RDMA_TX_COUNTERS_PRIO] =
251 ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
252 FS_CHAINING_CAPS,
253 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
254 ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
255 RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
256 [RDMA_TX_BYPASS_PRIO] =
257 ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
258 FS_CHAINING_CAPS_RDMA_TX,
259 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
260 ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
261 BY_PASS_PRIO_NUM_LEVELS))),
262 }
263 };
264
265 enum fs_i_lock_class {
266 FS_LOCK_GRANDPARENT,
267 FS_LOCK_PARENT,
268 FS_LOCK_CHILD
269 };
270
271 static const struct rhashtable_params rhash_fte = {
272 .key_len = sizeof_field(struct fs_fte, val),
273 .key_offset = offsetof(struct fs_fte, val),
274 .head_offset = offsetof(struct fs_fte, hash),
275 .automatic_shrinking = true,
276 .min_size = 1,
277 };
278
279 static const struct rhashtable_params rhash_fg = {
280 .key_len = sizeof_field(struct mlx5_flow_group, mask),
281 .key_offset = offsetof(struct mlx5_flow_group, mask),
282 .head_offset = offsetof(struct mlx5_flow_group, hash),
283 .automatic_shrinking = true,
284 .min_size = 1,
285
286 };
287
288 static void del_hw_flow_table(struct fs_node *node);
289 static void del_hw_flow_group(struct fs_node *node);
290 static void del_hw_fte(struct fs_node *node);
291 static void del_sw_flow_table(struct fs_node *node);
292 static void del_sw_flow_group(struct fs_node *node);
293 static void del_sw_fte(struct fs_node *node);
294 static void del_sw_prio(struct fs_node *node);
295 static void del_sw_ns(struct fs_node *node);
296 /* Delete rule (destination) is special case that
297 * requires to lock the FTE for all the deletion process.
298 */
299 static void del_sw_hw_rule(struct fs_node *node);
300 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
301 struct mlx5_flow_destination *d2);
302 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
303 static struct mlx5_flow_rule *
304 find_flow_rule(struct fs_fte *fte,
305 struct mlx5_flow_destination *dest);
306
tree_init_node(struct fs_node * node,void (* del_hw_func)(struct fs_node *),void (* del_sw_func)(struct fs_node *))307 static void tree_init_node(struct fs_node *node,
308 void (*del_hw_func)(struct fs_node *),
309 void (*del_sw_func)(struct fs_node *))
310 {
311 refcount_set(&node->refcount, 1);
312 INIT_LIST_HEAD(&node->list);
313 INIT_LIST_HEAD(&node->children);
314 init_rwsem(&node->lock);
315 node->del_hw_func = del_hw_func;
316 node->del_sw_func = del_sw_func;
317 node->active = false;
318 }
319
tree_add_node(struct fs_node * node,struct fs_node * parent)320 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
321 {
322 if (parent)
323 refcount_inc(&parent->refcount);
324 node->parent = parent;
325
326 /* Parent is the root */
327 if (!parent)
328 node->root = node;
329 else
330 node->root = parent->root;
331 }
332
tree_get_node(struct fs_node * node)333 static int tree_get_node(struct fs_node *node)
334 {
335 return refcount_inc_not_zero(&node->refcount);
336 }
337
nested_down_read_ref_node(struct fs_node * node,enum fs_i_lock_class class)338 static void nested_down_read_ref_node(struct fs_node *node,
339 enum fs_i_lock_class class)
340 {
341 if (node) {
342 down_read_nested(&node->lock, class);
343 refcount_inc(&node->refcount);
344 }
345 }
346
nested_down_write_ref_node(struct fs_node * node,enum fs_i_lock_class class)347 static void nested_down_write_ref_node(struct fs_node *node,
348 enum fs_i_lock_class class)
349 {
350 if (node) {
351 down_write_nested(&node->lock, class);
352 refcount_inc(&node->refcount);
353 }
354 }
355
down_write_ref_node(struct fs_node * node,bool locked)356 static void down_write_ref_node(struct fs_node *node, bool locked)
357 {
358 if (node) {
359 if (!locked)
360 down_write(&node->lock);
361 refcount_inc(&node->refcount);
362 }
363 }
364
up_read_ref_node(struct fs_node * node)365 static void up_read_ref_node(struct fs_node *node)
366 {
367 refcount_dec(&node->refcount);
368 up_read(&node->lock);
369 }
370
up_write_ref_node(struct fs_node * node,bool locked)371 static void up_write_ref_node(struct fs_node *node, bool locked)
372 {
373 refcount_dec(&node->refcount);
374 if (!locked)
375 up_write(&node->lock);
376 }
377
tree_put_node(struct fs_node * node,bool locked)378 static void tree_put_node(struct fs_node *node, bool locked)
379 {
380 struct fs_node *parent_node = node->parent;
381
382 if (refcount_dec_and_test(&node->refcount)) {
383 if (node->del_hw_func)
384 node->del_hw_func(node);
385 if (parent_node) {
386 down_write_ref_node(parent_node, locked);
387 list_del_init(&node->list);
388 }
389 node->del_sw_func(node);
390 if (parent_node)
391 up_write_ref_node(parent_node, locked);
392 node = NULL;
393 }
394 if (!node && parent_node)
395 tree_put_node(parent_node, locked);
396 }
397
tree_remove_node(struct fs_node * node,bool locked)398 static int tree_remove_node(struct fs_node *node, bool locked)
399 {
400 if (refcount_read(&node->refcount) > 1) {
401 refcount_dec(&node->refcount);
402 return -EEXIST;
403 }
404 tree_put_node(node, locked);
405 return 0;
406 }
407
find_prio(struct mlx5_flow_namespace * ns,unsigned int prio)408 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
409 unsigned int prio)
410 {
411 struct fs_prio *iter_prio;
412
413 fs_for_each_prio(iter_prio, ns) {
414 if (iter_prio->prio == prio)
415 return iter_prio;
416 }
417
418 return NULL;
419 }
420
is_fwd_next_action(u32 action)421 static bool is_fwd_next_action(u32 action)
422 {
423 return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
424 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
425 }
426
is_fwd_dest_type(enum mlx5_flow_destination_type type)427 static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
428 {
429 return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
430 type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
431 type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
432 type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
433 type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
434 type == MLX5_FLOW_DESTINATION_TYPE_TIR;
435 }
436
check_valid_spec(const struct mlx5_flow_spec * spec)437 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
438 {
439 int i;
440
441 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
442 if (spec->match_value[i] & ~spec->match_criteria[i]) {
443 pr_warn("mlx5_core: match_value differs from match_criteria\n");
444 return false;
445 }
446
447 return true;
448 }
449
find_root(struct fs_node * node)450 struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
451 {
452 struct fs_node *root;
453 struct mlx5_flow_namespace *ns;
454
455 root = node->root;
456
457 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
458 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
459 return NULL;
460 }
461
462 ns = container_of(root, struct mlx5_flow_namespace, node);
463 return container_of(ns, struct mlx5_flow_root_namespace, ns);
464 }
465
get_steering(struct fs_node * node)466 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
467 {
468 struct mlx5_flow_root_namespace *root = find_root(node);
469
470 if (root)
471 return root->dev->priv.steering;
472 return NULL;
473 }
474
get_dev(struct fs_node * node)475 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
476 {
477 struct mlx5_flow_root_namespace *root = find_root(node);
478
479 if (root)
480 return root->dev;
481 return NULL;
482 }
483
del_sw_ns(struct fs_node * node)484 static void del_sw_ns(struct fs_node *node)
485 {
486 kfree(node);
487 }
488
del_sw_prio(struct fs_node * node)489 static void del_sw_prio(struct fs_node *node)
490 {
491 kfree(node);
492 }
493
del_hw_flow_table(struct fs_node * node)494 static void del_hw_flow_table(struct fs_node *node)
495 {
496 struct mlx5_flow_root_namespace *root;
497 struct mlx5_flow_table *ft;
498 struct mlx5_core_dev *dev;
499 int err;
500
501 fs_get_obj(ft, node);
502 dev = get_dev(&ft->node);
503 root = find_root(&ft->node);
504 trace_mlx5_fs_del_ft(ft);
505
506 if (node->active) {
507 err = root->cmds->destroy_flow_table(root, ft);
508 if (err)
509 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
510 }
511 }
512
del_sw_flow_table(struct fs_node * node)513 static void del_sw_flow_table(struct fs_node *node)
514 {
515 struct mlx5_flow_table *ft;
516 struct fs_prio *prio;
517
518 fs_get_obj(ft, node);
519
520 rhltable_destroy(&ft->fgs_hash);
521 if (ft->node.parent) {
522 fs_get_obj(prio, ft->node.parent);
523 prio->num_ft--;
524 }
525 kfree(ft);
526 }
527
modify_fte(struct fs_fte * fte)528 static void modify_fte(struct fs_fte *fte)
529 {
530 struct mlx5_flow_root_namespace *root;
531 struct mlx5_flow_table *ft;
532 struct mlx5_flow_group *fg;
533 struct mlx5_core_dev *dev;
534 int err;
535
536 fs_get_obj(fg, fte->node.parent);
537 fs_get_obj(ft, fg->node.parent);
538 dev = get_dev(&fte->node);
539
540 root = find_root(&ft->node);
541 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
542 if (err)
543 mlx5_core_warn(dev,
544 "%s can't del rule fg id=%d fte_index=%d\n",
545 __func__, fg->id, fte->index);
546 fte->modify_mask = 0;
547 }
548
del_sw_hw_rule(struct fs_node * node)549 static void del_sw_hw_rule(struct fs_node *node)
550 {
551 struct mlx5_flow_rule *rule;
552 struct fs_fte *fte;
553
554 fs_get_obj(rule, node);
555 fs_get_obj(fte, rule->node.parent);
556 trace_mlx5_fs_del_rule(rule);
557 if (is_fwd_next_action(rule->sw_action)) {
558 mutex_lock(&rule->dest_attr.ft->lock);
559 list_del(&rule->next_ft);
560 mutex_unlock(&rule->dest_attr.ft->lock);
561 }
562
563 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
564 --fte->dests_size;
565 fte->modify_mask |=
566 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
567 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
568 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
569 goto out;
570 }
571
572 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
573 --fte->dests_size;
574 fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
575 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
576 goto out;
577 }
578
579 if (is_fwd_dest_type(rule->dest_attr.type)) {
580 --fte->dests_size;
581 --fte->fwd_dests;
582
583 if (!fte->fwd_dests)
584 fte->action.action &=
585 ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
586 fte->modify_mask |=
587 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
588 goto out;
589 }
590 out:
591 kfree(rule);
592 }
593
del_hw_fte(struct fs_node * node)594 static void del_hw_fte(struct fs_node *node)
595 {
596 struct mlx5_flow_root_namespace *root;
597 struct mlx5_flow_table *ft;
598 struct mlx5_flow_group *fg;
599 struct mlx5_core_dev *dev;
600 struct fs_fte *fte;
601 int err;
602
603 fs_get_obj(fte, node);
604 fs_get_obj(fg, fte->node.parent);
605 fs_get_obj(ft, fg->node.parent);
606
607 trace_mlx5_fs_del_fte(fte);
608 WARN_ON(fte->dests_size);
609 dev = get_dev(&ft->node);
610 root = find_root(&ft->node);
611 if (node->active) {
612 err = root->cmds->delete_fte(root, ft, fte);
613 if (err)
614 mlx5_core_warn(dev,
615 "flow steering can't delete fte in index %d of flow group id %d\n",
616 fte->index, fg->id);
617 node->active = false;
618 }
619 }
620
del_sw_fte(struct fs_node * node)621 static void del_sw_fte(struct fs_node *node)
622 {
623 struct mlx5_flow_steering *steering = get_steering(node);
624 struct mlx5_flow_group *fg;
625 struct fs_fte *fte;
626 int err;
627
628 fs_get_obj(fte, node);
629 fs_get_obj(fg, fte->node.parent);
630
631 err = rhashtable_remove_fast(&fg->ftes_hash,
632 &fte->hash,
633 rhash_fte);
634 WARN_ON(err);
635 ida_free(&fg->fte_allocator, fte->index - fg->start_index);
636 kmem_cache_free(steering->ftes_cache, fte);
637 }
638
del_hw_flow_group(struct fs_node * node)639 static void del_hw_flow_group(struct fs_node *node)
640 {
641 struct mlx5_flow_root_namespace *root;
642 struct mlx5_flow_group *fg;
643 struct mlx5_flow_table *ft;
644 struct mlx5_core_dev *dev;
645
646 fs_get_obj(fg, node);
647 fs_get_obj(ft, fg->node.parent);
648 dev = get_dev(&ft->node);
649 trace_mlx5_fs_del_fg(fg);
650
651 root = find_root(&ft->node);
652 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
653 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
654 fg->id, ft->id);
655 }
656
del_sw_flow_group(struct fs_node * node)657 static void del_sw_flow_group(struct fs_node *node)
658 {
659 struct mlx5_flow_steering *steering = get_steering(node);
660 struct mlx5_flow_group *fg;
661 struct mlx5_flow_table *ft;
662 int err;
663
664 fs_get_obj(fg, node);
665 fs_get_obj(ft, fg->node.parent);
666
667 rhashtable_destroy(&fg->ftes_hash);
668 ida_destroy(&fg->fte_allocator);
669 if (ft->autogroup.active &&
670 fg->max_ftes == ft->autogroup.group_size &&
671 fg->start_index < ft->autogroup.max_fte)
672 ft->autogroup.num_groups--;
673 err = rhltable_remove(&ft->fgs_hash,
674 &fg->hash,
675 rhash_fg);
676 WARN_ON(err);
677 kmem_cache_free(steering->fgs_cache, fg);
678 }
679
insert_fte(struct mlx5_flow_group * fg,struct fs_fte * fte)680 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
681 {
682 int index;
683 int ret;
684
685 index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
686 if (index < 0)
687 return index;
688
689 fte->index = index + fg->start_index;
690 ret = rhashtable_insert_fast(&fg->ftes_hash,
691 &fte->hash,
692 rhash_fte);
693 if (ret)
694 goto err_ida_remove;
695
696 tree_add_node(&fte->node, &fg->node);
697 list_add_tail(&fte->node.list, &fg->node.children);
698 return 0;
699
700 err_ida_remove:
701 ida_free(&fg->fte_allocator, index);
702 return ret;
703 }
704
alloc_fte(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act)705 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
706 const struct mlx5_flow_spec *spec,
707 struct mlx5_flow_act *flow_act)
708 {
709 struct mlx5_flow_steering *steering = get_steering(&ft->node);
710 struct fs_fte *fte;
711
712 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
713 if (!fte)
714 return ERR_PTR(-ENOMEM);
715
716 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
717 fte->node.type = FS_TYPE_FLOW_ENTRY;
718 fte->action = *flow_act;
719 fte->flow_context = spec->flow_context;
720
721 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
722
723 return fte;
724 }
725
dealloc_flow_group(struct mlx5_flow_steering * steering,struct mlx5_flow_group * fg)726 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
727 struct mlx5_flow_group *fg)
728 {
729 rhashtable_destroy(&fg->ftes_hash);
730 kmem_cache_free(steering->fgs_cache, fg);
731 }
732
alloc_flow_group(struct mlx5_flow_steering * steering,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index)733 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
734 u8 match_criteria_enable,
735 const void *match_criteria,
736 int start_index,
737 int end_index)
738 {
739 struct mlx5_flow_group *fg;
740 int ret;
741
742 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
743 if (!fg)
744 return ERR_PTR(-ENOMEM);
745
746 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
747 if (ret) {
748 kmem_cache_free(steering->fgs_cache, fg);
749 return ERR_PTR(ret);
750 }
751
752 ida_init(&fg->fte_allocator);
753 fg->mask.match_criteria_enable = match_criteria_enable;
754 memcpy(&fg->mask.match_criteria, match_criteria,
755 sizeof(fg->mask.match_criteria));
756 fg->node.type = FS_TYPE_FLOW_GROUP;
757 fg->start_index = start_index;
758 fg->max_ftes = end_index - start_index + 1;
759
760 return fg;
761 }
762
alloc_insert_flow_group(struct mlx5_flow_table * ft,u8 match_criteria_enable,const void * match_criteria,int start_index,int end_index,struct list_head * prev)763 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
764 u8 match_criteria_enable,
765 const void *match_criteria,
766 int start_index,
767 int end_index,
768 struct list_head *prev)
769 {
770 struct mlx5_flow_steering *steering = get_steering(&ft->node);
771 struct mlx5_flow_group *fg;
772 int ret;
773
774 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
775 start_index, end_index);
776 if (IS_ERR(fg))
777 return fg;
778
779 /* initialize refcnt, add to parent list */
780 ret = rhltable_insert(&ft->fgs_hash,
781 &fg->hash,
782 rhash_fg);
783 if (ret) {
784 dealloc_flow_group(steering, fg);
785 return ERR_PTR(ret);
786 }
787
788 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
789 tree_add_node(&fg->node, &ft->node);
790 /* Add node to group list */
791 list_add(&fg->node.list, prev);
792 atomic_inc(&ft->node.version);
793
794 return fg;
795 }
796
alloc_flow_table(int level,u16 vport,enum fs_flow_table_type table_type,enum fs_flow_table_op_mod op_mod,u32 flags)797 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
798 enum fs_flow_table_type table_type,
799 enum fs_flow_table_op_mod op_mod,
800 u32 flags)
801 {
802 struct mlx5_flow_table *ft;
803 int ret;
804
805 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
806 if (!ft)
807 return ERR_PTR(-ENOMEM);
808
809 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
810 if (ret) {
811 kfree(ft);
812 return ERR_PTR(ret);
813 }
814
815 ft->level = level;
816 ft->node.type = FS_TYPE_FLOW_TABLE;
817 ft->op_mod = op_mod;
818 ft->type = table_type;
819 ft->vport = vport;
820 ft->flags = flags;
821 INIT_LIST_HEAD(&ft->fwd_rules);
822 mutex_init(&ft->lock);
823
824 return ft;
825 }
826
827 /* If reverse is false, then we search for the first flow table in the
828 * root sub-tree from start(closest from right), else we search for the
829 * last flow table in the root sub-tree till start(closest from left).
830 */
find_closest_ft_recursive(struct fs_node * root,struct list_head * start,bool reverse)831 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
832 struct list_head *start,
833 bool reverse)
834 {
835 #define list_advance_entry(pos, reverse) \
836 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
837
838 #define list_for_each_advance_continue(pos, head, reverse) \
839 for (pos = list_advance_entry(pos, reverse); \
840 &pos->list != (head); \
841 pos = list_advance_entry(pos, reverse))
842
843 struct fs_node *iter = list_entry(start, struct fs_node, list);
844 struct mlx5_flow_table *ft = NULL;
845
846 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
847 return NULL;
848
849 list_for_each_advance_continue(iter, &root->children, reverse) {
850 if (iter->type == FS_TYPE_FLOW_TABLE) {
851 fs_get_obj(ft, iter);
852 return ft;
853 }
854 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
855 if (ft)
856 return ft;
857 }
858
859 return ft;
860 }
861
862 /* If reverse is false then return the first flow table in next priority of
863 * prio in the tree, else return the last flow table in the previous priority
864 * of prio in the tree.
865 */
find_closest_ft(struct fs_prio * prio,bool reverse)866 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
867 {
868 struct mlx5_flow_table *ft = NULL;
869 struct fs_node *curr_node;
870 struct fs_node *parent;
871
872 parent = prio->node.parent;
873 curr_node = &prio->node;
874 while (!ft && parent) {
875 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
876 curr_node = parent;
877 parent = curr_node->parent;
878 }
879 return ft;
880 }
881
882 /* Assuming all the tree is locked by mutex chain lock */
find_next_chained_ft(struct fs_prio * prio)883 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
884 {
885 return find_closest_ft(prio, false);
886 }
887
888 /* Assuming all the tree is locked by mutex chain lock */
find_prev_chained_ft(struct fs_prio * prio)889 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
890 {
891 return find_closest_ft(prio, true);
892 }
893
find_next_fwd_ft(struct mlx5_flow_table * ft,struct mlx5_flow_act * flow_act)894 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
895 struct mlx5_flow_act *flow_act)
896 {
897 struct fs_prio *prio;
898 bool next_ns;
899
900 next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
901 fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
902
903 return find_next_chained_ft(prio);
904 }
905
connect_fts_in_prio(struct mlx5_core_dev * dev,struct fs_prio * prio,struct mlx5_flow_table * ft)906 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
907 struct fs_prio *prio,
908 struct mlx5_flow_table *ft)
909 {
910 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
911 struct mlx5_flow_table *iter;
912 int err;
913
914 fs_for_each_ft(iter, prio) {
915 err = root->cmds->modify_flow_table(root, iter, ft);
916 if (err) {
917 mlx5_core_err(dev,
918 "Failed to modify flow table id %d, type %d, err %d\n",
919 iter->id, iter->type, err);
920 /* The driver is out of sync with the FW */
921 return err;
922 }
923 }
924 return 0;
925 }
926
927 /* Connect flow tables from previous priority of prio to ft */
connect_prev_fts(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)928 static int connect_prev_fts(struct mlx5_core_dev *dev,
929 struct mlx5_flow_table *ft,
930 struct fs_prio *prio)
931 {
932 struct mlx5_flow_table *prev_ft;
933
934 prev_ft = find_prev_chained_ft(prio);
935 if (prev_ft) {
936 struct fs_prio *prev_prio;
937
938 fs_get_obj(prev_prio, prev_ft->node.parent);
939 return connect_fts_in_prio(dev, prev_prio, ft);
940 }
941 return 0;
942 }
943
update_root_ft_create(struct mlx5_flow_table * ft,struct fs_prio * prio)944 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
945 *prio)
946 {
947 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
948 struct mlx5_ft_underlay_qp *uqp;
949 int min_level = INT_MAX;
950 int err = 0;
951 u32 qpn;
952
953 if (root->root_ft)
954 min_level = root->root_ft->level;
955
956 if (ft->level >= min_level)
957 return 0;
958
959 if (list_empty(&root->underlay_qpns)) {
960 /* Don't set any QPN (zero) in case QPN list is empty */
961 qpn = 0;
962 err = root->cmds->update_root_ft(root, ft, qpn, false);
963 } else {
964 list_for_each_entry(uqp, &root->underlay_qpns, list) {
965 qpn = uqp->qpn;
966 err = root->cmds->update_root_ft(root, ft,
967 qpn, false);
968 if (err)
969 break;
970 }
971 }
972
973 if (err)
974 mlx5_core_warn(root->dev,
975 "Update root flow table of id(%u) qpn(%d) failed\n",
976 ft->id, qpn);
977 else
978 root->root_ft = ft;
979
980 return err;
981 }
982
_mlx5_modify_rule_destination(struct mlx5_flow_rule * rule,struct mlx5_flow_destination * dest)983 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
984 struct mlx5_flow_destination *dest)
985 {
986 struct mlx5_flow_root_namespace *root;
987 struct mlx5_flow_table *ft;
988 struct mlx5_flow_group *fg;
989 struct fs_fte *fte;
990 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
991 int err = 0;
992
993 fs_get_obj(fte, rule->node.parent);
994 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
995 return -EINVAL;
996 down_write_ref_node(&fte->node, false);
997 fs_get_obj(fg, fte->node.parent);
998 fs_get_obj(ft, fg->node.parent);
999
1000 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1001 root = find_root(&ft->node);
1002 err = root->cmds->update_fte(root, ft, fg,
1003 modify_mask, fte);
1004 up_write_ref_node(&fte->node, false);
1005
1006 return err;
1007 }
1008
mlx5_modify_rule_destination(struct mlx5_flow_handle * handle,struct mlx5_flow_destination * new_dest,struct mlx5_flow_destination * old_dest)1009 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1010 struct mlx5_flow_destination *new_dest,
1011 struct mlx5_flow_destination *old_dest)
1012 {
1013 int i;
1014
1015 if (!old_dest) {
1016 if (handle->num_rules != 1)
1017 return -EINVAL;
1018 return _mlx5_modify_rule_destination(handle->rule[0],
1019 new_dest);
1020 }
1021
1022 for (i = 0; i < handle->num_rules; i++) {
1023 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
1024 return _mlx5_modify_rule_destination(handle->rule[i],
1025 new_dest);
1026 }
1027
1028 return -EINVAL;
1029 }
1030
1031 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
connect_fwd_rules(struct mlx5_core_dev * dev,struct mlx5_flow_table * new_next_ft,struct mlx5_flow_table * old_next_ft)1032 static int connect_fwd_rules(struct mlx5_core_dev *dev,
1033 struct mlx5_flow_table *new_next_ft,
1034 struct mlx5_flow_table *old_next_ft)
1035 {
1036 struct mlx5_flow_destination dest = {};
1037 struct mlx5_flow_rule *iter;
1038 int err = 0;
1039
1040 /* new_next_ft and old_next_ft could be NULL only
1041 * when we create/destroy the anchor flow table.
1042 */
1043 if (!new_next_ft || !old_next_ft)
1044 return 0;
1045
1046 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1047 dest.ft = new_next_ft;
1048
1049 mutex_lock(&old_next_ft->lock);
1050 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1051 mutex_unlock(&old_next_ft->lock);
1052 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1053 if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1054 iter->ft->ns == new_next_ft->ns)
1055 continue;
1056
1057 err = _mlx5_modify_rule_destination(iter, &dest);
1058 if (err)
1059 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1060 new_next_ft->id);
1061 }
1062 return 0;
1063 }
1064
connect_flow_table(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_prio * prio)1065 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1066 struct fs_prio *prio)
1067 {
1068 struct mlx5_flow_table *next_ft, *first_ft;
1069 int err = 0;
1070
1071 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1072
1073 first_ft = list_first_entry_or_null(&prio->node.children,
1074 struct mlx5_flow_table, node.list);
1075 if (!first_ft || first_ft->level > ft->level) {
1076 err = connect_prev_fts(dev, ft, prio);
1077 if (err)
1078 return err;
1079
1080 next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1081 err = connect_fwd_rules(dev, ft, next_ft);
1082 if (err)
1083 return err;
1084 }
1085
1086 if (MLX5_CAP_FLOWTABLE(dev,
1087 flow_table_properties_nic_receive.modify_root))
1088 err = update_root_ft_create(ft, prio);
1089 return err;
1090 }
1091
list_add_flow_table(struct mlx5_flow_table * ft,struct fs_prio * prio)1092 static void list_add_flow_table(struct mlx5_flow_table *ft,
1093 struct fs_prio *prio)
1094 {
1095 struct list_head *prev = &prio->node.children;
1096 struct mlx5_flow_table *iter;
1097
1098 fs_for_each_ft(iter, prio) {
1099 if (iter->level > ft->level)
1100 break;
1101 prev = &iter->node.list;
1102 }
1103 list_add(&ft->node.list, prev);
1104 }
1105
__mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,enum fs_flow_table_op_mod op_mod,u16 vport)1106 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1107 struct mlx5_flow_table_attr *ft_attr,
1108 enum fs_flow_table_op_mod op_mod,
1109 u16 vport)
1110 {
1111 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1112 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1113 struct mlx5_flow_table *next_ft;
1114 struct fs_prio *fs_prio = NULL;
1115 struct mlx5_flow_table *ft;
1116 int err;
1117
1118 if (!root) {
1119 pr_err("mlx5: flow steering failed to find root of namespace\n");
1120 return ERR_PTR(-ENODEV);
1121 }
1122
1123 mutex_lock(&root->chain_lock);
1124 fs_prio = find_prio(ns, ft_attr->prio);
1125 if (!fs_prio) {
1126 err = -EINVAL;
1127 goto unlock_root;
1128 }
1129 if (!unmanaged) {
1130 /* The level is related to the
1131 * priority level range.
1132 */
1133 if (ft_attr->level >= fs_prio->num_levels) {
1134 err = -ENOSPC;
1135 goto unlock_root;
1136 }
1137
1138 ft_attr->level += fs_prio->start_level;
1139 }
1140
1141 /* The level is related to the
1142 * priority level range.
1143 */
1144 ft = alloc_flow_table(ft_attr->level,
1145 vport,
1146 root->table_type,
1147 op_mod, ft_attr->flags);
1148 if (IS_ERR(ft)) {
1149 err = PTR_ERR(ft);
1150 goto unlock_root;
1151 }
1152
1153 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1154 next_ft = unmanaged ? ft_attr->next_ft :
1155 find_next_chained_ft(fs_prio);
1156 ft->def_miss_action = ns->def_miss_action;
1157 ft->ns = ns;
1158 err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
1159 if (err)
1160 goto free_ft;
1161
1162 if (!unmanaged) {
1163 err = connect_flow_table(root->dev, ft, fs_prio);
1164 if (err)
1165 goto destroy_ft;
1166 }
1167
1168 ft->node.active = true;
1169 down_write_ref_node(&fs_prio->node, false);
1170 if (!unmanaged) {
1171 tree_add_node(&ft->node, &fs_prio->node);
1172 list_add_flow_table(ft, fs_prio);
1173 } else {
1174 ft->node.root = fs_prio->node.root;
1175 }
1176 fs_prio->num_ft++;
1177 up_write_ref_node(&fs_prio->node, false);
1178 mutex_unlock(&root->chain_lock);
1179 trace_mlx5_fs_add_ft(ft);
1180 return ft;
1181 destroy_ft:
1182 root->cmds->destroy_flow_table(root, ft);
1183 free_ft:
1184 rhltable_destroy(&ft->fgs_hash);
1185 kfree(ft);
1186 unlock_root:
1187 mutex_unlock(&root->chain_lock);
1188 return ERR_PTR(err);
1189 }
1190
mlx5_create_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1191 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1192 struct mlx5_flow_table_attr *ft_attr)
1193 {
1194 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1195 }
1196 EXPORT_SYMBOL(mlx5_create_flow_table);
1197
1198 struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr,u16 vport)1199 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1200 struct mlx5_flow_table_attr *ft_attr, u16 vport)
1201 {
1202 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1203 }
1204
1205 struct mlx5_flow_table*
mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace * ns,int prio,u32 level)1206 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1207 int prio, u32 level)
1208 {
1209 struct mlx5_flow_table_attr ft_attr = {};
1210
1211 ft_attr.level = level;
1212 ft_attr.prio = prio;
1213 ft_attr.max_fte = 1;
1214
1215 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1216 }
1217 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1218
1219 #define MAX_FLOW_GROUP_SIZE BIT(24)
1220 struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace * ns,struct mlx5_flow_table_attr * ft_attr)1221 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1222 struct mlx5_flow_table_attr *ft_attr)
1223 {
1224 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1225 int max_num_groups = ft_attr->autogroup.max_num_groups;
1226 struct mlx5_flow_table *ft;
1227 int autogroups_max_fte;
1228
1229 ft = mlx5_create_flow_table(ns, ft_attr);
1230 if (IS_ERR(ft))
1231 return ft;
1232
1233 autogroups_max_fte = ft->max_fte - num_reserved_entries;
1234 if (max_num_groups > autogroups_max_fte)
1235 goto err_validate;
1236 if (num_reserved_entries > ft->max_fte)
1237 goto err_validate;
1238
1239 /* Align the number of groups according to the largest group size */
1240 if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1241 max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1242
1243 ft->autogroup.active = true;
1244 ft->autogroup.required_groups = max_num_groups;
1245 ft->autogroup.max_fte = autogroups_max_fte;
1246 /* We save place for flow groups in addition to max types */
1247 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1248
1249 return ft;
1250
1251 err_validate:
1252 mlx5_destroy_flow_table(ft);
1253 return ERR_PTR(-ENOSPC);
1254 }
1255 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1256
mlx5_create_flow_group(struct mlx5_flow_table * ft,u32 * fg_in)1257 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1258 u32 *fg_in)
1259 {
1260 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1261 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1262 fg_in, match_criteria);
1263 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1264 fg_in,
1265 match_criteria_enable);
1266 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1267 start_flow_index);
1268 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1269 end_flow_index);
1270 struct mlx5_flow_group *fg;
1271 int err;
1272
1273 if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1274 return ERR_PTR(-EPERM);
1275
1276 down_write_ref_node(&ft->node, false);
1277 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1278 start_index, end_index,
1279 ft->node.children.prev);
1280 up_write_ref_node(&ft->node, false);
1281 if (IS_ERR(fg))
1282 return fg;
1283
1284 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1285 if (err) {
1286 tree_put_node(&fg->node, false);
1287 return ERR_PTR(err);
1288 }
1289 trace_mlx5_fs_add_fg(fg);
1290 fg->node.active = true;
1291
1292 return fg;
1293 }
1294 EXPORT_SYMBOL(mlx5_create_flow_group);
1295
alloc_rule(struct mlx5_flow_destination * dest)1296 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1297 {
1298 struct mlx5_flow_rule *rule;
1299
1300 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1301 if (!rule)
1302 return NULL;
1303
1304 INIT_LIST_HEAD(&rule->next_ft);
1305 rule->node.type = FS_TYPE_FLOW_DEST;
1306 if (dest)
1307 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1308 else
1309 rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
1310
1311 return rule;
1312 }
1313
alloc_handle(int num_rules)1314 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1315 {
1316 struct mlx5_flow_handle *handle;
1317
1318 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1319 if (!handle)
1320 return NULL;
1321
1322 handle->num_rules = num_rules;
1323
1324 return handle;
1325 }
1326
destroy_flow_handle(struct fs_fte * fte,struct mlx5_flow_handle * handle,struct mlx5_flow_destination * dest,int i)1327 static void destroy_flow_handle(struct fs_fte *fte,
1328 struct mlx5_flow_handle *handle,
1329 struct mlx5_flow_destination *dest,
1330 int i)
1331 {
1332 for (; --i >= 0;) {
1333 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1334 fte->dests_size--;
1335 list_del(&handle->rule[i]->node.list);
1336 kfree(handle->rule[i]);
1337 }
1338 }
1339 kfree(handle);
1340 }
1341
1342 static struct mlx5_flow_handle *
create_flow_handle(struct fs_fte * fte,struct mlx5_flow_destination * dest,int dest_num,int * modify_mask,bool * new_rule)1343 create_flow_handle(struct fs_fte *fte,
1344 struct mlx5_flow_destination *dest,
1345 int dest_num,
1346 int *modify_mask,
1347 bool *new_rule)
1348 {
1349 struct mlx5_flow_handle *handle;
1350 struct mlx5_flow_rule *rule = NULL;
1351 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1352 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1353 int type;
1354 int i = 0;
1355
1356 handle = alloc_handle((dest_num) ? dest_num : 1);
1357 if (!handle)
1358 return ERR_PTR(-ENOMEM);
1359
1360 do {
1361 if (dest) {
1362 rule = find_flow_rule(fte, dest + i);
1363 if (rule) {
1364 refcount_inc(&rule->node.refcount);
1365 goto rule_found;
1366 }
1367 }
1368
1369 *new_rule = true;
1370 rule = alloc_rule(dest + i);
1371 if (!rule)
1372 goto free_rules;
1373
1374 /* Add dest to dests list- we need flow tables to be in the
1375 * end of the list for forward to next prio rules.
1376 */
1377 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1378 if (dest &&
1379 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1380 list_add(&rule->node.list, &fte->node.children);
1381 else
1382 list_add_tail(&rule->node.list, &fte->node.children);
1383 if (dest) {
1384 fte->dests_size++;
1385
1386 if (is_fwd_dest_type(dest[i].type))
1387 fte->fwd_dests++;
1388
1389 type = dest[i].type ==
1390 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1391 *modify_mask |= type ? count : dst;
1392 }
1393 rule_found:
1394 handle->rule[i] = rule;
1395 } while (++i < dest_num);
1396
1397 return handle;
1398
1399 free_rules:
1400 destroy_flow_handle(fte, handle, dest, i);
1401 return ERR_PTR(-ENOMEM);
1402 }
1403
1404 /* fte should not be deleted while calling this function */
1405 static struct mlx5_flow_handle *
add_rule_fte(struct fs_fte * fte,struct mlx5_flow_group * fg,struct mlx5_flow_destination * dest,int dest_num,bool update_action)1406 add_rule_fte(struct fs_fte *fte,
1407 struct mlx5_flow_group *fg,
1408 struct mlx5_flow_destination *dest,
1409 int dest_num,
1410 bool update_action)
1411 {
1412 struct mlx5_flow_root_namespace *root;
1413 struct mlx5_flow_handle *handle;
1414 struct mlx5_flow_table *ft;
1415 int modify_mask = 0;
1416 int err;
1417 bool new_rule = false;
1418
1419 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1420 &new_rule);
1421 if (IS_ERR(handle) || !new_rule)
1422 goto out;
1423
1424 if (update_action)
1425 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1426
1427 fs_get_obj(ft, fg->node.parent);
1428 root = find_root(&fg->node);
1429 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1430 err = root->cmds->create_fte(root, ft, fg, fte);
1431 else
1432 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1433 if (err)
1434 goto free_handle;
1435
1436 fte->node.active = true;
1437 fte->status |= FS_FTE_STATUS_EXISTING;
1438 atomic_inc(&fg->node.version);
1439
1440 out:
1441 return handle;
1442
1443 free_handle:
1444 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1445 return ERR_PTR(err);
1446 }
1447
alloc_auto_flow_group(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec)1448 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1449 const struct mlx5_flow_spec *spec)
1450 {
1451 struct list_head *prev = &ft->node.children;
1452 u32 max_fte = ft->autogroup.max_fte;
1453 unsigned int candidate_index = 0;
1454 unsigned int group_size = 0;
1455 struct mlx5_flow_group *fg;
1456
1457 if (!ft->autogroup.active)
1458 return ERR_PTR(-ENOENT);
1459
1460 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1461 group_size = ft->autogroup.group_size;
1462
1463 /* max_fte == ft->autogroup.max_types */
1464 if (group_size == 0)
1465 group_size = 1;
1466
1467 /* sorted by start_index */
1468 fs_for_each_fg(fg, ft) {
1469 if (candidate_index + group_size > fg->start_index)
1470 candidate_index = fg->start_index + fg->max_ftes;
1471 else
1472 break;
1473 prev = &fg->node.list;
1474 }
1475
1476 if (candidate_index + group_size > max_fte)
1477 return ERR_PTR(-ENOSPC);
1478
1479 fg = alloc_insert_flow_group(ft,
1480 spec->match_criteria_enable,
1481 spec->match_criteria,
1482 candidate_index,
1483 candidate_index + group_size - 1,
1484 prev);
1485 if (IS_ERR(fg))
1486 goto out;
1487
1488 if (group_size == ft->autogroup.group_size)
1489 ft->autogroup.num_groups++;
1490
1491 out:
1492 return fg;
1493 }
1494
create_auto_flow_group(struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)1495 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1496 struct mlx5_flow_group *fg)
1497 {
1498 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1499 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1500 void *match_criteria_addr;
1501 u8 src_esw_owner_mask_on;
1502 void *misc;
1503 int err;
1504 u32 *in;
1505
1506 in = kvzalloc(inlen, GFP_KERNEL);
1507 if (!in)
1508 return -ENOMEM;
1509
1510 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1511 fg->mask.match_criteria_enable);
1512 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1513 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1514 fg->max_ftes - 1);
1515
1516 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1517 misc_parameters);
1518 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1519 source_eswitch_owner_vhca_id);
1520 MLX5_SET(create_flow_group_in, in,
1521 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1522
1523 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1524 in, match_criteria);
1525 memcpy(match_criteria_addr, fg->mask.match_criteria,
1526 sizeof(fg->mask.match_criteria));
1527
1528 err = root->cmds->create_flow_group(root, ft, in, fg);
1529 if (!err) {
1530 fg->node.active = true;
1531 trace_mlx5_fs_add_fg(fg);
1532 }
1533
1534 kvfree(in);
1535 return err;
1536 }
1537
mlx5_flow_dests_cmp(struct mlx5_flow_destination * d1,struct mlx5_flow_destination * d2)1538 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1539 struct mlx5_flow_destination *d2)
1540 {
1541 if (d1->type == d2->type) {
1542 if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
1543 d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
1544 d1->vport.num == d2->vport.num &&
1545 d1->vport.flags == d2->vport.flags &&
1546 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1547 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1548 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1549 (d1->vport.pkt_reformat->id ==
1550 d2->vport.pkt_reformat->id) : true)) ||
1551 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1552 d1->ft == d2->ft) ||
1553 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1554 d1->tir_num == d2->tir_num) ||
1555 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1556 d1->ft_num == d2->ft_num) ||
1557 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1558 d1->sampler_id == d2->sampler_id))
1559 return true;
1560 }
1561
1562 return false;
1563 }
1564
find_flow_rule(struct fs_fte * fte,struct mlx5_flow_destination * dest)1565 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1566 struct mlx5_flow_destination *dest)
1567 {
1568 struct mlx5_flow_rule *rule;
1569
1570 list_for_each_entry(rule, &fte->node.children, node.list) {
1571 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1572 return rule;
1573 }
1574 return NULL;
1575 }
1576
check_conflicting_actions_vlan(const struct mlx5_fs_vlan * vlan0,const struct mlx5_fs_vlan * vlan1)1577 static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
1578 const struct mlx5_fs_vlan *vlan1)
1579 {
1580 return vlan0->ethtype != vlan1->ethtype ||
1581 vlan0->vid != vlan1->vid ||
1582 vlan0->prio != vlan1->prio;
1583 }
1584
check_conflicting_actions(const struct mlx5_flow_act * act1,const struct mlx5_flow_act * act2)1585 static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
1586 const struct mlx5_flow_act *act2)
1587 {
1588 u32 action1 = act1->action;
1589 u32 action2 = act2->action;
1590 u32 xored_actions;
1591
1592 xored_actions = action1 ^ action2;
1593
1594 /* if one rule only wants to count, it's ok */
1595 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1596 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1597 return false;
1598
1599 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1600 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1601 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1602 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1603 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1604 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1605 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1606 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1607 return true;
1608
1609 if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
1610 act1->pkt_reformat != act2->pkt_reformat)
1611 return true;
1612
1613 if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1614 act1->modify_hdr != act2->modify_hdr)
1615 return true;
1616
1617 if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
1618 check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
1619 return true;
1620
1621 if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
1622 check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
1623 return true;
1624
1625 return false;
1626 }
1627
check_conflicting_ftes(struct fs_fte * fte,const struct mlx5_flow_context * flow_context,const struct mlx5_flow_act * flow_act)1628 static int check_conflicting_ftes(struct fs_fte *fte,
1629 const struct mlx5_flow_context *flow_context,
1630 const struct mlx5_flow_act *flow_act)
1631 {
1632 if (check_conflicting_actions(flow_act, &fte->action)) {
1633 mlx5_core_warn(get_dev(&fte->node),
1634 "Found two FTEs with conflicting actions\n");
1635 return -EEXIST;
1636 }
1637
1638 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1639 fte->flow_context.flow_tag != flow_context->flow_tag) {
1640 mlx5_core_warn(get_dev(&fte->node),
1641 "FTE flow tag %u already exists with different flow tag %u\n",
1642 fte->flow_context.flow_tag,
1643 flow_context->flow_tag);
1644 return -EEXIST;
1645 }
1646
1647 return 0;
1648 }
1649
add_rule_fg(struct mlx5_flow_group * fg,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,struct fs_fte * fte)1650 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1651 const struct mlx5_flow_spec *spec,
1652 struct mlx5_flow_act *flow_act,
1653 struct mlx5_flow_destination *dest,
1654 int dest_num,
1655 struct fs_fte *fte)
1656 {
1657 struct mlx5_flow_handle *handle;
1658 int old_action;
1659 int i;
1660 int ret;
1661
1662 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1663 if (ret)
1664 return ERR_PTR(ret);
1665
1666 old_action = fte->action.action;
1667 fte->action.action |= flow_act->action;
1668 handle = add_rule_fte(fte, fg, dest, dest_num,
1669 old_action != flow_act->action);
1670 if (IS_ERR(handle)) {
1671 fte->action.action = old_action;
1672 return handle;
1673 }
1674 trace_mlx5_fs_set_fte(fte, false);
1675
1676 for (i = 0; i < handle->num_rules; i++) {
1677 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1678 tree_add_node(&handle->rule[i]->node, &fte->node);
1679 trace_mlx5_fs_add_rule(handle->rule[i]);
1680 }
1681 }
1682 return handle;
1683 }
1684
counter_is_valid(u32 action)1685 static bool counter_is_valid(u32 action)
1686 {
1687 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1688 MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1689 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1690 }
1691
dest_is_valid(struct mlx5_flow_destination * dest,struct mlx5_flow_act * flow_act,struct mlx5_flow_table * ft)1692 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1693 struct mlx5_flow_act *flow_act,
1694 struct mlx5_flow_table *ft)
1695 {
1696 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1697 u32 action = flow_act->action;
1698
1699 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1700 return counter_is_valid(action);
1701
1702 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1703 return true;
1704
1705 if (ignore_level) {
1706 if (ft->type != FS_FT_FDB &&
1707 ft->type != FS_FT_NIC_RX)
1708 return false;
1709
1710 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1711 ft->type != dest->ft->type)
1712 return false;
1713 }
1714
1715 if (!dest || ((dest->type ==
1716 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1717 (dest->ft->level <= ft->level && !ignore_level)))
1718 return false;
1719 return true;
1720 }
1721
1722 struct match_list {
1723 struct list_head list;
1724 struct mlx5_flow_group *g;
1725 };
1726
free_match_list(struct match_list * head,bool ft_locked)1727 static void free_match_list(struct match_list *head, bool ft_locked)
1728 {
1729 struct match_list *iter, *match_tmp;
1730
1731 list_for_each_entry_safe(iter, match_tmp, &head->list,
1732 list) {
1733 tree_put_node(&iter->g->node, ft_locked);
1734 list_del(&iter->list);
1735 kfree(iter);
1736 }
1737 }
1738
build_match_list(struct match_list * match_head,struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_group * fg,bool ft_locked)1739 static int build_match_list(struct match_list *match_head,
1740 struct mlx5_flow_table *ft,
1741 const struct mlx5_flow_spec *spec,
1742 struct mlx5_flow_group *fg,
1743 bool ft_locked)
1744 {
1745 struct rhlist_head *tmp, *list;
1746 struct mlx5_flow_group *g;
1747 int err = 0;
1748
1749 rcu_read_lock();
1750 INIT_LIST_HEAD(&match_head->list);
1751 /* Collect all fgs which has a matching match_criteria */
1752 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1753 /* RCU is atomic, we can't execute FW commands here */
1754 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1755 struct match_list *curr_match;
1756
1757 if (fg && fg != g)
1758 continue;
1759
1760 if (unlikely(!tree_get_node(&g->node)))
1761 continue;
1762
1763 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1764 if (!curr_match) {
1765 rcu_read_unlock();
1766 free_match_list(match_head, ft_locked);
1767 return -ENOMEM;
1768 }
1769 curr_match->g = g;
1770 list_add_tail(&curr_match->list, &match_head->list);
1771 }
1772 rcu_read_unlock();
1773 return err;
1774 }
1775
matched_fgs_get_version(struct list_head * match_head)1776 static u64 matched_fgs_get_version(struct list_head *match_head)
1777 {
1778 struct match_list *iter;
1779 u64 version = 0;
1780
1781 list_for_each_entry(iter, match_head, list)
1782 version += (u64)atomic_read(&iter->g->node.version);
1783 return version;
1784 }
1785
1786 static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group * g,const u32 * match_value,bool take_write)1787 lookup_fte_locked(struct mlx5_flow_group *g,
1788 const u32 *match_value,
1789 bool take_write)
1790 {
1791 struct fs_fte *fte_tmp;
1792
1793 if (take_write)
1794 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1795 else
1796 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1797 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1798 rhash_fte);
1799 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1800 fte_tmp = NULL;
1801 goto out;
1802 }
1803 if (!fte_tmp->node.active) {
1804 tree_put_node(&fte_tmp->node, false);
1805 fte_tmp = NULL;
1806 goto out;
1807 }
1808
1809 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1810 out:
1811 if (take_write)
1812 up_write_ref_node(&g->node, false);
1813 else
1814 up_read_ref_node(&g->node);
1815 return fte_tmp;
1816 }
1817
1818 static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table * ft,struct list_head * match_head,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num,int ft_version)1819 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1820 struct list_head *match_head,
1821 const struct mlx5_flow_spec *spec,
1822 struct mlx5_flow_act *flow_act,
1823 struct mlx5_flow_destination *dest,
1824 int dest_num,
1825 int ft_version)
1826 {
1827 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1828 struct mlx5_flow_group *g;
1829 struct mlx5_flow_handle *rule;
1830 struct match_list *iter;
1831 bool take_write = false;
1832 struct fs_fte *fte;
1833 u64 version = 0;
1834 int err;
1835
1836 fte = alloc_fte(ft, spec, flow_act);
1837 if (IS_ERR(fte))
1838 return ERR_PTR(-ENOMEM);
1839
1840 search_again_locked:
1841 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1842 goto skip_search;
1843 version = matched_fgs_get_version(match_head);
1844 /* Try to find an fte with identical match value and attempt update its
1845 * action.
1846 */
1847 list_for_each_entry(iter, match_head, list) {
1848 struct fs_fte *fte_tmp;
1849
1850 g = iter->g;
1851 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1852 if (!fte_tmp)
1853 continue;
1854 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1855 /* No error check needed here, because insert_fte() is not called */
1856 up_write_ref_node(&fte_tmp->node, false);
1857 tree_put_node(&fte_tmp->node, false);
1858 kmem_cache_free(steering->ftes_cache, fte);
1859 return rule;
1860 }
1861
1862 skip_search:
1863 /* No group with matching fte found, or we skipped the search.
1864 * Try to add a new fte to any matching fg.
1865 */
1866
1867 /* Check the ft version, for case that new flow group
1868 * was added while the fgs weren't locked
1869 */
1870 if (atomic_read(&ft->node.version) != ft_version) {
1871 rule = ERR_PTR(-EAGAIN);
1872 goto out;
1873 }
1874
1875 /* Check the fgs version. If version have changed it could be that an
1876 * FTE with the same match value was added while the fgs weren't
1877 * locked.
1878 */
1879 if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1880 version != matched_fgs_get_version(match_head)) {
1881 take_write = true;
1882 goto search_again_locked;
1883 }
1884
1885 list_for_each_entry(iter, match_head, list) {
1886 g = iter->g;
1887
1888 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1889
1890 if (!g->node.active) {
1891 up_write_ref_node(&g->node, false);
1892 continue;
1893 }
1894
1895 err = insert_fte(g, fte);
1896 if (err) {
1897 up_write_ref_node(&g->node, false);
1898 if (err == -ENOSPC)
1899 continue;
1900 kmem_cache_free(steering->ftes_cache, fte);
1901 return ERR_PTR(err);
1902 }
1903
1904 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1905 up_write_ref_node(&g->node, false);
1906 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1907 up_write_ref_node(&fte->node, false);
1908 if (IS_ERR(rule))
1909 tree_put_node(&fte->node, false);
1910 return rule;
1911 }
1912 rule = ERR_PTR(-ENOENT);
1913 out:
1914 kmem_cache_free(steering->ftes_cache, fte);
1915 return rule;
1916 }
1917
1918 static struct mlx5_flow_handle *
_mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int dest_num)1919 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1920 const struct mlx5_flow_spec *spec,
1921 struct mlx5_flow_act *flow_act,
1922 struct mlx5_flow_destination *dest,
1923 int dest_num)
1924
1925 {
1926 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1927 struct mlx5_flow_handle *rule;
1928 struct match_list match_head;
1929 struct mlx5_flow_group *g;
1930 bool take_write = false;
1931 struct fs_fte *fte;
1932 int version;
1933 int err;
1934 int i;
1935
1936 if (!check_valid_spec(spec))
1937 return ERR_PTR(-EINVAL);
1938
1939 if (flow_act->fg && ft->autogroup.active)
1940 return ERR_PTR(-EINVAL);
1941
1942 for (i = 0; i < dest_num; i++) {
1943 if (!dest_is_valid(&dest[i], flow_act, ft))
1944 return ERR_PTR(-EINVAL);
1945 }
1946 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1947 search_again_locked:
1948 version = atomic_read(&ft->node.version);
1949
1950 /* Collect all fgs which has a matching match_criteria */
1951 err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
1952 if (err) {
1953 if (take_write)
1954 up_write_ref_node(&ft->node, false);
1955 else
1956 up_read_ref_node(&ft->node);
1957 return ERR_PTR(err);
1958 }
1959
1960 if (!take_write)
1961 up_read_ref_node(&ft->node);
1962
1963 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1964 dest_num, version);
1965 free_match_list(&match_head, take_write);
1966 if (!IS_ERR(rule) ||
1967 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1968 if (take_write)
1969 up_write_ref_node(&ft->node, false);
1970 return rule;
1971 }
1972
1973 if (!take_write) {
1974 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1975 take_write = true;
1976 }
1977
1978 if (PTR_ERR(rule) == -EAGAIN ||
1979 version != atomic_read(&ft->node.version))
1980 goto search_again_locked;
1981
1982 g = alloc_auto_flow_group(ft, spec);
1983 if (IS_ERR(g)) {
1984 rule = ERR_CAST(g);
1985 up_write_ref_node(&ft->node, false);
1986 return rule;
1987 }
1988
1989 fte = alloc_fte(ft, spec, flow_act);
1990 if (IS_ERR(fte)) {
1991 up_write_ref_node(&ft->node, false);
1992 err = PTR_ERR(fte);
1993 goto err_alloc_fte;
1994 }
1995
1996 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1997 up_write_ref_node(&ft->node, false);
1998
1999 err = create_auto_flow_group(ft, g);
2000 if (err)
2001 goto err_release_fg;
2002
2003 err = insert_fte(g, fte);
2004 if (err)
2005 goto err_release_fg;
2006
2007 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
2008 up_write_ref_node(&g->node, false);
2009 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
2010 up_write_ref_node(&fte->node, false);
2011 if (IS_ERR(rule))
2012 tree_put_node(&fte->node, false);
2013 tree_put_node(&g->node, false);
2014 return rule;
2015
2016 err_release_fg:
2017 up_write_ref_node(&g->node, false);
2018 kmem_cache_free(steering->ftes_cache, fte);
2019 err_alloc_fte:
2020 tree_put_node(&g->node, false);
2021 return ERR_PTR(err);
2022 }
2023
fwd_next_prio_supported(struct mlx5_flow_table * ft)2024 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
2025 {
2026 return ((ft->type == FS_FT_NIC_RX) &&
2027 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
2028 }
2029
2030 struct mlx5_flow_handle *
mlx5_add_flow_rules(struct mlx5_flow_table * ft,const struct mlx5_flow_spec * spec,struct mlx5_flow_act * flow_act,struct mlx5_flow_destination * dest,int num_dest)2031 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
2032 const struct mlx5_flow_spec *spec,
2033 struct mlx5_flow_act *flow_act,
2034 struct mlx5_flow_destination *dest,
2035 int num_dest)
2036 {
2037 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2038 static const struct mlx5_flow_spec zero_spec = {};
2039 struct mlx5_flow_destination *gen_dest = NULL;
2040 struct mlx5_flow_table *next_ft = NULL;
2041 struct mlx5_flow_handle *handle = NULL;
2042 u32 sw_action = flow_act->action;
2043 int i;
2044
2045 if (!spec)
2046 spec = &zero_spec;
2047
2048 if (!is_fwd_next_action(sw_action))
2049 return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2050
2051 if (!fwd_next_prio_supported(ft))
2052 return ERR_PTR(-EOPNOTSUPP);
2053
2054 mutex_lock(&root->chain_lock);
2055 next_ft = find_next_fwd_ft(ft, flow_act);
2056 if (!next_ft) {
2057 handle = ERR_PTR(-EOPNOTSUPP);
2058 goto unlock;
2059 }
2060
2061 gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2062 GFP_KERNEL);
2063 if (!gen_dest) {
2064 handle = ERR_PTR(-ENOMEM);
2065 goto unlock;
2066 }
2067 for (i = 0; i < num_dest; i++)
2068 gen_dest[i] = dest[i];
2069 gen_dest[i].type =
2070 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2071 gen_dest[i].ft = next_ft;
2072 dest = gen_dest;
2073 num_dest++;
2074 flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2075 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2076 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2077 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2078 if (IS_ERR(handle))
2079 goto unlock;
2080
2081 if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2082 mutex_lock(&next_ft->lock);
2083 list_add(&handle->rule[num_dest - 1]->next_ft,
2084 &next_ft->fwd_rules);
2085 mutex_unlock(&next_ft->lock);
2086 handle->rule[num_dest - 1]->sw_action = sw_action;
2087 handle->rule[num_dest - 1]->ft = ft;
2088 }
2089 unlock:
2090 mutex_unlock(&root->chain_lock);
2091 kfree(gen_dest);
2092 return handle;
2093 }
2094 EXPORT_SYMBOL(mlx5_add_flow_rules);
2095
mlx5_del_flow_rules(struct mlx5_flow_handle * handle)2096 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2097 {
2098 struct fs_fte *fte;
2099 int i;
2100
2101 /* In order to consolidate the HW changes we lock the FTE for other
2102 * changes, and increase its refcount, in order not to perform the
2103 * "del" functions of the FTE. Will handle them here.
2104 * The removal of the rules is done under locked FTE.
2105 * After removing all the handle's rules, if there are remaining
2106 * rules, it means we just need to modify the FTE in FW, and
2107 * unlock/decrease the refcount we increased before.
2108 * Otherwise, it means the FTE should be deleted. First delete the
2109 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2110 * the FTE, which will handle the last decrease of the refcount, as
2111 * well as required handling of its parent.
2112 */
2113 fs_get_obj(fte, handle->rule[0]->node.parent);
2114 down_write_ref_node(&fte->node, false);
2115 for (i = handle->num_rules - 1; i >= 0; i--)
2116 tree_remove_node(&handle->rule[i]->node, true);
2117 if (list_empty(&fte->node.children)) {
2118 fte->node.del_hw_func(&fte->node);
2119 /* Avoid double call to del_hw_fte */
2120 fte->node.del_hw_func = NULL;
2121 up_write_ref_node(&fte->node, false);
2122 tree_put_node(&fte->node, false);
2123 } else if (fte->dests_size) {
2124 if (fte->modify_mask)
2125 modify_fte(fte);
2126 up_write_ref_node(&fte->node, false);
2127 } else {
2128 up_write_ref_node(&fte->node, false);
2129 }
2130 kfree(handle);
2131 }
2132 EXPORT_SYMBOL(mlx5_del_flow_rules);
2133
2134 /* Assuming prio->node.children(flow tables) is sorted by level */
find_next_ft(struct mlx5_flow_table * ft)2135 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2136 {
2137 struct fs_prio *prio;
2138
2139 fs_get_obj(prio, ft->node.parent);
2140
2141 if (!list_is_last(&ft->node.list, &prio->node.children))
2142 return list_next_entry(ft, node.list);
2143 return find_next_chained_ft(prio);
2144 }
2145
update_root_ft_destroy(struct mlx5_flow_table * ft)2146 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2147 {
2148 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2149 struct mlx5_ft_underlay_qp *uqp;
2150 struct mlx5_flow_table *new_root_ft = NULL;
2151 int err = 0;
2152 u32 qpn;
2153
2154 if (root->root_ft != ft)
2155 return 0;
2156
2157 new_root_ft = find_next_ft(ft);
2158 if (!new_root_ft) {
2159 root->root_ft = NULL;
2160 return 0;
2161 }
2162
2163 if (list_empty(&root->underlay_qpns)) {
2164 /* Don't set any QPN (zero) in case QPN list is empty */
2165 qpn = 0;
2166 err = root->cmds->update_root_ft(root, new_root_ft,
2167 qpn, false);
2168 } else {
2169 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2170 qpn = uqp->qpn;
2171 err = root->cmds->update_root_ft(root,
2172 new_root_ft, qpn,
2173 false);
2174 if (err)
2175 break;
2176 }
2177 }
2178
2179 if (err)
2180 mlx5_core_warn(root->dev,
2181 "Update root flow table of id(%u) qpn(%d) failed\n",
2182 ft->id, qpn);
2183 else
2184 root->root_ft = new_root_ft;
2185
2186 return 0;
2187 }
2188
2189 /* Connect flow table from previous priority to
2190 * the next flow table.
2191 */
disconnect_flow_table(struct mlx5_flow_table * ft)2192 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2193 {
2194 struct mlx5_core_dev *dev = get_dev(&ft->node);
2195 struct mlx5_flow_table *next_ft;
2196 struct fs_prio *prio;
2197 int err = 0;
2198
2199 err = update_root_ft_destroy(ft);
2200 if (err)
2201 return err;
2202
2203 fs_get_obj(prio, ft->node.parent);
2204 if (!(list_first_entry(&prio->node.children,
2205 struct mlx5_flow_table,
2206 node.list) == ft))
2207 return 0;
2208
2209 next_ft = find_next_ft(ft);
2210 err = connect_fwd_rules(dev, next_ft, ft);
2211 if (err)
2212 return err;
2213
2214 err = connect_prev_fts(dev, next_ft, prio);
2215 if (err)
2216 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2217 ft->id);
2218 return err;
2219 }
2220
mlx5_destroy_flow_table(struct mlx5_flow_table * ft)2221 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2222 {
2223 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2224 int err = 0;
2225
2226 mutex_lock(&root->chain_lock);
2227 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2228 err = disconnect_flow_table(ft);
2229 if (err) {
2230 mutex_unlock(&root->chain_lock);
2231 return err;
2232 }
2233 if (tree_remove_node(&ft->node, false))
2234 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2235 ft->id);
2236 mutex_unlock(&root->chain_lock);
2237
2238 return err;
2239 }
2240 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2241
mlx5_destroy_flow_group(struct mlx5_flow_group * fg)2242 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2243 {
2244 if (tree_remove_node(&fg->node, false))
2245 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2246 fg->id);
2247 }
2248 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2249
mlx5_get_fdb_sub_ns(struct mlx5_core_dev * dev,int n)2250 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2251 int n)
2252 {
2253 struct mlx5_flow_steering *steering = dev->priv.steering;
2254
2255 if (!steering || !steering->fdb_sub_ns)
2256 return NULL;
2257
2258 return steering->fdb_sub_ns[n];
2259 }
2260 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2261
is_nic_rx_ns(enum mlx5_flow_namespace_type type)2262 static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
2263 {
2264 switch (type) {
2265 case MLX5_FLOW_NAMESPACE_BYPASS:
2266 case MLX5_FLOW_NAMESPACE_LAG:
2267 case MLX5_FLOW_NAMESPACE_OFFLOADS:
2268 case MLX5_FLOW_NAMESPACE_ETHTOOL:
2269 case MLX5_FLOW_NAMESPACE_KERNEL:
2270 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2271 case MLX5_FLOW_NAMESPACE_ANCHOR:
2272 return true;
2273 default:
2274 return false;
2275 }
2276 }
2277
mlx5_get_flow_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)2278 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2279 enum mlx5_flow_namespace_type type)
2280 {
2281 struct mlx5_flow_steering *steering = dev->priv.steering;
2282 struct mlx5_flow_root_namespace *root_ns;
2283 int prio = 0;
2284 struct fs_prio *fs_prio;
2285 struct mlx5_flow_namespace *ns;
2286
2287 if (!steering)
2288 return NULL;
2289
2290 switch (type) {
2291 case MLX5_FLOW_NAMESPACE_FDB:
2292 if (steering->fdb_root_ns)
2293 return &steering->fdb_root_ns->ns;
2294 return NULL;
2295 case MLX5_FLOW_NAMESPACE_PORT_SEL:
2296 if (steering->port_sel_root_ns)
2297 return &steering->port_sel_root_ns->ns;
2298 return NULL;
2299 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2300 if (steering->sniffer_rx_root_ns)
2301 return &steering->sniffer_rx_root_ns->ns;
2302 return NULL;
2303 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2304 if (steering->sniffer_tx_root_ns)
2305 return &steering->sniffer_tx_root_ns->ns;
2306 return NULL;
2307 case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
2308 root_ns = steering->fdb_root_ns;
2309 prio = FDB_BYPASS_PATH;
2310 break;
2311 case MLX5_FLOW_NAMESPACE_EGRESS:
2312 case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
2313 root_ns = steering->egress_root_ns;
2314 prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2315 break;
2316 case MLX5_FLOW_NAMESPACE_RDMA_RX:
2317 root_ns = steering->rdma_rx_root_ns;
2318 prio = RDMA_RX_BYPASS_PRIO;
2319 break;
2320 case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
2321 root_ns = steering->rdma_rx_root_ns;
2322 prio = RDMA_RX_KERNEL_PRIO;
2323 break;
2324 case MLX5_FLOW_NAMESPACE_RDMA_TX:
2325 root_ns = steering->rdma_tx_root_ns;
2326 break;
2327 case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
2328 root_ns = steering->rdma_rx_root_ns;
2329 prio = RDMA_RX_COUNTERS_PRIO;
2330 break;
2331 case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
2332 root_ns = steering->rdma_tx_root_ns;
2333 prio = RDMA_TX_COUNTERS_PRIO;
2334 break;
2335 default: /* Must be NIC RX */
2336 WARN_ON(!is_nic_rx_ns(type));
2337 root_ns = steering->root_ns;
2338 prio = type;
2339 break;
2340 }
2341
2342 if (!root_ns)
2343 return NULL;
2344
2345 fs_prio = find_prio(&root_ns->ns, prio);
2346 if (!fs_prio)
2347 return NULL;
2348
2349 ns = list_first_entry(&fs_prio->node.children,
2350 typeof(*ns),
2351 node.list);
2352
2353 return ns;
2354 }
2355 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2356
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type,int vport)2357 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2358 enum mlx5_flow_namespace_type type,
2359 int vport)
2360 {
2361 struct mlx5_flow_steering *steering = dev->priv.steering;
2362
2363 if (!steering)
2364 return NULL;
2365
2366 switch (type) {
2367 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2368 if (vport >= steering->esw_egress_acl_vports)
2369 return NULL;
2370 if (steering->esw_egress_root_ns &&
2371 steering->esw_egress_root_ns[vport])
2372 return &steering->esw_egress_root_ns[vport]->ns;
2373 else
2374 return NULL;
2375 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2376 if (vport >= steering->esw_ingress_acl_vports)
2377 return NULL;
2378 if (steering->esw_ingress_root_ns &&
2379 steering->esw_ingress_root_ns[vport])
2380 return &steering->esw_ingress_root_ns[vport]->ns;
2381 else
2382 return NULL;
2383 default:
2384 return NULL;
2385 }
2386 }
2387
_fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels,enum fs_node_type type)2388 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2389 unsigned int prio,
2390 int num_levels,
2391 enum fs_node_type type)
2392 {
2393 struct fs_prio *fs_prio;
2394
2395 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2396 if (!fs_prio)
2397 return ERR_PTR(-ENOMEM);
2398
2399 fs_prio->node.type = type;
2400 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2401 tree_add_node(&fs_prio->node, &ns->node);
2402 fs_prio->num_levels = num_levels;
2403 fs_prio->prio = prio;
2404 list_add_tail(&fs_prio->node.list, &ns->node.children);
2405
2406 return fs_prio;
2407 }
2408
fs_create_prio_chained(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2409 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2410 unsigned int prio,
2411 int num_levels)
2412 {
2413 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2414 }
2415
fs_create_prio(struct mlx5_flow_namespace * ns,unsigned int prio,int num_levels)2416 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2417 unsigned int prio, int num_levels)
2418 {
2419 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2420 }
2421
fs_init_namespace(struct mlx5_flow_namespace * ns)2422 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2423 *ns)
2424 {
2425 ns->node.type = FS_TYPE_NAMESPACE;
2426
2427 return ns;
2428 }
2429
fs_create_namespace(struct fs_prio * prio,int def_miss_act)2430 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2431 int def_miss_act)
2432 {
2433 struct mlx5_flow_namespace *ns;
2434
2435 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2436 if (!ns)
2437 return ERR_PTR(-ENOMEM);
2438
2439 fs_init_namespace(ns);
2440 ns->def_miss_action = def_miss_act;
2441 tree_init_node(&ns->node, NULL, del_sw_ns);
2442 tree_add_node(&ns->node, &prio->node);
2443 list_add_tail(&ns->node.list, &prio->node.children);
2444
2445 return ns;
2446 }
2447
create_leaf_prios(struct mlx5_flow_namespace * ns,int prio,struct init_tree_node * prio_metadata)2448 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2449 struct init_tree_node *prio_metadata)
2450 {
2451 struct fs_prio *fs_prio;
2452 int i;
2453
2454 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2455 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2456 if (IS_ERR(fs_prio))
2457 return PTR_ERR(fs_prio);
2458 }
2459 return 0;
2460 }
2461
2462 #define FLOW_TABLE_BIT_SZ 1
2463 #define GET_FLOW_TABLE_CAP(dev, offset) \
2464 ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \
2465 offset / 32)) >> \
2466 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
has_required_caps(struct mlx5_core_dev * dev,struct node_caps * caps)2467 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2468 {
2469 int i;
2470
2471 for (i = 0; i < caps->arr_sz; i++) {
2472 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2473 return false;
2474 }
2475 return true;
2476 }
2477
init_root_tree_recursive(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node,struct init_tree_node * init_parent_node,int prio)2478 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2479 struct init_tree_node *init_node,
2480 struct fs_node *fs_parent_node,
2481 struct init_tree_node *init_parent_node,
2482 int prio)
2483 {
2484 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2485 flow_table_properties_nic_receive.
2486 max_ft_level);
2487 struct mlx5_flow_namespace *fs_ns;
2488 struct fs_prio *fs_prio;
2489 struct fs_node *base;
2490 int i;
2491 int err;
2492
2493 if (init_node->type == FS_TYPE_PRIO) {
2494 if ((init_node->min_ft_level > max_ft_level) ||
2495 !has_required_caps(steering->dev, &init_node->caps))
2496 return 0;
2497
2498 fs_get_obj(fs_ns, fs_parent_node);
2499 if (init_node->num_leaf_prios)
2500 return create_leaf_prios(fs_ns, prio, init_node);
2501 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2502 if (IS_ERR(fs_prio))
2503 return PTR_ERR(fs_prio);
2504 base = &fs_prio->node;
2505 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2506 fs_get_obj(fs_prio, fs_parent_node);
2507 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2508 if (IS_ERR(fs_ns))
2509 return PTR_ERR(fs_ns);
2510 base = &fs_ns->node;
2511 } else {
2512 return -EINVAL;
2513 }
2514 prio = 0;
2515 for (i = 0; i < init_node->ar_size; i++) {
2516 err = init_root_tree_recursive(steering, &init_node->children[i],
2517 base, init_node, prio);
2518 if (err)
2519 return err;
2520 if (init_node->children[i].type == FS_TYPE_PRIO &&
2521 init_node->children[i].num_leaf_prios) {
2522 prio += init_node->children[i].num_leaf_prios;
2523 }
2524 }
2525
2526 return 0;
2527 }
2528
init_root_tree(struct mlx5_flow_steering * steering,struct init_tree_node * init_node,struct fs_node * fs_parent_node)2529 static int init_root_tree(struct mlx5_flow_steering *steering,
2530 struct init_tree_node *init_node,
2531 struct fs_node *fs_parent_node)
2532 {
2533 int err;
2534 int i;
2535
2536 for (i = 0; i < init_node->ar_size; i++) {
2537 err = init_root_tree_recursive(steering, &init_node->children[i],
2538 fs_parent_node,
2539 init_node, i);
2540 if (err)
2541 return err;
2542 }
2543 return 0;
2544 }
2545
del_sw_root_ns(struct fs_node * node)2546 static void del_sw_root_ns(struct fs_node *node)
2547 {
2548 struct mlx5_flow_root_namespace *root_ns;
2549 struct mlx5_flow_namespace *ns;
2550
2551 fs_get_obj(ns, node);
2552 root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2553 mutex_destroy(&root_ns->chain_lock);
2554 kfree(node);
2555 }
2556
2557 static struct mlx5_flow_root_namespace
create_root_ns(struct mlx5_flow_steering * steering,enum fs_flow_table_type table_type)2558 *create_root_ns(struct mlx5_flow_steering *steering,
2559 enum fs_flow_table_type table_type)
2560 {
2561 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2562 struct mlx5_flow_root_namespace *root_ns;
2563 struct mlx5_flow_namespace *ns;
2564
2565 /* Create the root namespace */
2566 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2567 if (!root_ns)
2568 return NULL;
2569
2570 root_ns->dev = steering->dev;
2571 root_ns->table_type = table_type;
2572 root_ns->cmds = cmds;
2573
2574 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2575
2576 ns = &root_ns->ns;
2577 fs_init_namespace(ns);
2578 mutex_init(&root_ns->chain_lock);
2579 tree_init_node(&ns->node, NULL, del_sw_root_ns);
2580 tree_add_node(&ns->node, NULL);
2581
2582 return root_ns;
2583 }
2584
2585 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2586
set_prio_attrs_in_ns(struct mlx5_flow_namespace * ns,int acc_level)2587 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2588 {
2589 struct fs_prio *prio;
2590
2591 fs_for_each_prio(prio, ns) {
2592 /* This updates prio start_level and num_levels */
2593 set_prio_attrs_in_prio(prio, acc_level);
2594 acc_level += prio->num_levels;
2595 }
2596 return acc_level;
2597 }
2598
set_prio_attrs_in_prio(struct fs_prio * prio,int acc_level)2599 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2600 {
2601 struct mlx5_flow_namespace *ns;
2602 int acc_level_ns = acc_level;
2603
2604 prio->start_level = acc_level;
2605 fs_for_each_ns(ns, prio) {
2606 /* This updates start_level and num_levels of ns's priority descendants */
2607 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2608
2609 /* If this a prio with chains, and we can jump from one chain
2610 * (namespace) to another, so we accumulate the levels
2611 */
2612 if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2613 acc_level = acc_level_ns;
2614 }
2615
2616 if (!prio->num_levels)
2617 prio->num_levels = acc_level_ns - prio->start_level;
2618 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2619 }
2620
set_prio_attrs(struct mlx5_flow_root_namespace * root_ns)2621 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2622 {
2623 struct mlx5_flow_namespace *ns = &root_ns->ns;
2624 struct fs_prio *prio;
2625 int start_level = 0;
2626
2627 fs_for_each_prio(prio, ns) {
2628 set_prio_attrs_in_prio(prio, start_level);
2629 start_level += prio->num_levels;
2630 }
2631 }
2632
2633 #define ANCHOR_PRIO 0
2634 #define ANCHOR_SIZE 1
2635 #define ANCHOR_LEVEL 0
create_anchor_flow_table(struct mlx5_flow_steering * steering)2636 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2637 {
2638 struct mlx5_flow_namespace *ns = NULL;
2639 struct mlx5_flow_table_attr ft_attr = {};
2640 struct mlx5_flow_table *ft;
2641
2642 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2643 if (WARN_ON(!ns))
2644 return -EINVAL;
2645
2646 ft_attr.max_fte = ANCHOR_SIZE;
2647 ft_attr.level = ANCHOR_LEVEL;
2648 ft_attr.prio = ANCHOR_PRIO;
2649
2650 ft = mlx5_create_flow_table(ns, &ft_attr);
2651 if (IS_ERR(ft)) {
2652 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2653 return PTR_ERR(ft);
2654 }
2655 return 0;
2656 }
2657
init_root_ns(struct mlx5_flow_steering * steering)2658 static int init_root_ns(struct mlx5_flow_steering *steering)
2659 {
2660 int err;
2661
2662 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2663 if (!steering->root_ns)
2664 return -ENOMEM;
2665
2666 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2667 if (err)
2668 goto out_err;
2669
2670 set_prio_attrs(steering->root_ns);
2671 err = create_anchor_flow_table(steering);
2672 if (err)
2673 goto out_err;
2674
2675 return 0;
2676
2677 out_err:
2678 cleanup_root_ns(steering->root_ns);
2679 steering->root_ns = NULL;
2680 return err;
2681 }
2682
clean_tree(struct fs_node * node)2683 static void clean_tree(struct fs_node *node)
2684 {
2685 if (node) {
2686 struct fs_node *iter;
2687 struct fs_node *temp;
2688
2689 tree_get_node(node);
2690 list_for_each_entry_safe(iter, temp, &node->children, list)
2691 clean_tree(iter);
2692 tree_put_node(node, false);
2693 tree_remove_node(node, false);
2694 }
2695 }
2696
cleanup_root_ns(struct mlx5_flow_root_namespace * root_ns)2697 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2698 {
2699 if (!root_ns)
2700 return;
2701
2702 clean_tree(&root_ns->ns.node);
2703 }
2704
init_sniffer_tx_root_ns(struct mlx5_flow_steering * steering)2705 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2706 {
2707 struct fs_prio *prio;
2708
2709 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2710 if (!steering->sniffer_tx_root_ns)
2711 return -ENOMEM;
2712
2713 /* Create single prio */
2714 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2715 return PTR_ERR_OR_ZERO(prio);
2716 }
2717
init_sniffer_rx_root_ns(struct mlx5_flow_steering * steering)2718 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2719 {
2720 struct fs_prio *prio;
2721
2722 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2723 if (!steering->sniffer_rx_root_ns)
2724 return -ENOMEM;
2725
2726 /* Create single prio */
2727 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2728 return PTR_ERR_OR_ZERO(prio);
2729 }
2730
2731 #define PORT_SEL_NUM_LEVELS 3
init_port_sel_root_ns(struct mlx5_flow_steering * steering)2732 static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
2733 {
2734 struct fs_prio *prio;
2735
2736 steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
2737 if (!steering->port_sel_root_ns)
2738 return -ENOMEM;
2739
2740 /* Create single prio */
2741 prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
2742 PORT_SEL_NUM_LEVELS);
2743 return PTR_ERR_OR_ZERO(prio);
2744 }
2745
init_rdma_rx_root_ns(struct mlx5_flow_steering * steering)2746 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2747 {
2748 int err;
2749
2750 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2751 if (!steering->rdma_rx_root_ns)
2752 return -ENOMEM;
2753
2754 err = init_root_tree(steering, &rdma_rx_root_fs,
2755 &steering->rdma_rx_root_ns->ns.node);
2756 if (err)
2757 goto out_err;
2758
2759 set_prio_attrs(steering->rdma_rx_root_ns);
2760
2761 return 0;
2762
2763 out_err:
2764 cleanup_root_ns(steering->rdma_rx_root_ns);
2765 steering->rdma_rx_root_ns = NULL;
2766 return err;
2767 }
2768
init_rdma_tx_root_ns(struct mlx5_flow_steering * steering)2769 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2770 {
2771 int err;
2772
2773 steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2774 if (!steering->rdma_tx_root_ns)
2775 return -ENOMEM;
2776
2777 err = init_root_tree(steering, &rdma_tx_root_fs,
2778 &steering->rdma_tx_root_ns->ns.node);
2779 if (err)
2780 goto out_err;
2781
2782 set_prio_attrs(steering->rdma_tx_root_ns);
2783
2784 return 0;
2785
2786 out_err:
2787 cleanup_root_ns(steering->rdma_tx_root_ns);
2788 steering->rdma_tx_root_ns = NULL;
2789 return err;
2790 }
2791
2792 /* FT and tc chains are stored in the same array so we can re-use the
2793 * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2794 * When creating a new ns for each chain store it in the first available slot.
2795 * Assume tc chains are created and stored first and only then the FT chain.
2796 */
store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct mlx5_flow_namespace * ns)2797 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2798 struct mlx5_flow_namespace *ns)
2799 {
2800 int chain = 0;
2801
2802 while (steering->fdb_sub_ns[chain])
2803 ++chain;
2804
2805 steering->fdb_sub_ns[chain] = ns;
2806 }
2807
create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering * steering,struct fs_prio * maj_prio)2808 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2809 struct fs_prio *maj_prio)
2810 {
2811 struct mlx5_flow_namespace *ns;
2812 struct fs_prio *min_prio;
2813 int prio;
2814
2815 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2816 if (IS_ERR(ns))
2817 return PTR_ERR(ns);
2818
2819 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2820 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2821 if (IS_ERR(min_prio))
2822 return PTR_ERR(min_prio);
2823 }
2824
2825 store_fdb_sub_ns_prio_chain(steering, ns);
2826
2827 return 0;
2828 }
2829
create_fdb_chains(struct mlx5_flow_steering * steering,int fs_prio,int chains)2830 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2831 int fs_prio,
2832 int chains)
2833 {
2834 struct fs_prio *maj_prio;
2835 int levels;
2836 int chain;
2837 int err;
2838
2839 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2840 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2841 fs_prio,
2842 levels);
2843 if (IS_ERR(maj_prio))
2844 return PTR_ERR(maj_prio);
2845
2846 for (chain = 0; chain < chains; chain++) {
2847 err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2848 if (err)
2849 return err;
2850 }
2851
2852 return 0;
2853 }
2854
create_fdb_fast_path(struct mlx5_flow_steering * steering)2855 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2856 {
2857 int err;
2858
2859 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2860 sizeof(*steering->fdb_sub_ns),
2861 GFP_KERNEL);
2862 if (!steering->fdb_sub_ns)
2863 return -ENOMEM;
2864
2865 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2866 if (err)
2867 return err;
2868
2869 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2870 if (err)
2871 return err;
2872
2873 return 0;
2874 }
2875
create_fdb_bypass(struct mlx5_flow_steering * steering)2876 static int create_fdb_bypass(struct mlx5_flow_steering *steering)
2877 {
2878 struct mlx5_flow_namespace *ns;
2879 struct fs_prio *prio;
2880 int i;
2881
2882 prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
2883 if (IS_ERR(prio))
2884 return PTR_ERR(prio);
2885
2886 ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2887 if (IS_ERR(ns))
2888 return PTR_ERR(ns);
2889
2890 for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
2891 prio = fs_create_prio(ns, i, 1);
2892 if (IS_ERR(prio))
2893 return PTR_ERR(prio);
2894 }
2895 return 0;
2896 }
2897
init_fdb_root_ns(struct mlx5_flow_steering * steering)2898 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2899 {
2900 struct fs_prio *maj_prio;
2901 int err;
2902
2903 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2904 if (!steering->fdb_root_ns)
2905 return -ENOMEM;
2906
2907 err = create_fdb_bypass(steering);
2908 if (err)
2909 goto out_err;
2910
2911 err = create_fdb_fast_path(steering);
2912 if (err)
2913 goto out_err;
2914
2915 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2916 if (IS_ERR(maj_prio)) {
2917 err = PTR_ERR(maj_prio);
2918 goto out_err;
2919 }
2920
2921 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
2922 if (IS_ERR(maj_prio)) {
2923 err = PTR_ERR(maj_prio);
2924 goto out_err;
2925 }
2926
2927 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2928 if (IS_ERR(maj_prio)) {
2929 err = PTR_ERR(maj_prio);
2930 goto out_err;
2931 }
2932
2933 /* We put this priority last, knowing that nothing will get here
2934 * unless explicitly forwarded to. This is possible because the
2935 * slow path tables have catch all rules and nothing gets passed
2936 * those tables.
2937 */
2938 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2939 if (IS_ERR(maj_prio)) {
2940 err = PTR_ERR(maj_prio);
2941 goto out_err;
2942 }
2943
2944 set_prio_attrs(steering->fdb_root_ns);
2945 return 0;
2946
2947 out_err:
2948 cleanup_root_ns(steering->fdb_root_ns);
2949 kfree(steering->fdb_sub_ns);
2950 steering->fdb_sub_ns = NULL;
2951 steering->fdb_root_ns = NULL;
2952 return err;
2953 }
2954
init_egress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2955 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2956 {
2957 struct fs_prio *prio;
2958
2959 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2960 if (!steering->esw_egress_root_ns[vport])
2961 return -ENOMEM;
2962
2963 /* create 1 prio*/
2964 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2965 return PTR_ERR_OR_ZERO(prio);
2966 }
2967
init_ingress_acl_root_ns(struct mlx5_flow_steering * steering,int vport)2968 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2969 {
2970 struct fs_prio *prio;
2971
2972 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2973 if (!steering->esw_ingress_root_ns[vport])
2974 return -ENOMEM;
2975
2976 /* create 1 prio*/
2977 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2978 return PTR_ERR_OR_ZERO(prio);
2979 }
2980
mlx5_fs_egress_acls_init(struct mlx5_core_dev * dev,int total_vports)2981 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2982 {
2983 struct mlx5_flow_steering *steering = dev->priv.steering;
2984 int err;
2985 int i;
2986
2987 steering->esw_egress_root_ns =
2988 kcalloc(total_vports,
2989 sizeof(*steering->esw_egress_root_ns),
2990 GFP_KERNEL);
2991 if (!steering->esw_egress_root_ns)
2992 return -ENOMEM;
2993
2994 for (i = 0; i < total_vports; i++) {
2995 err = init_egress_acl_root_ns(steering, i);
2996 if (err)
2997 goto cleanup_root_ns;
2998 }
2999 steering->esw_egress_acl_vports = total_vports;
3000 return 0;
3001
3002 cleanup_root_ns:
3003 for (i--; i >= 0; i--)
3004 cleanup_root_ns(steering->esw_egress_root_ns[i]);
3005 kfree(steering->esw_egress_root_ns);
3006 steering->esw_egress_root_ns = NULL;
3007 return err;
3008 }
3009
mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev * dev)3010 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
3011 {
3012 struct mlx5_flow_steering *steering = dev->priv.steering;
3013 int i;
3014
3015 if (!steering->esw_egress_root_ns)
3016 return;
3017
3018 for (i = 0; i < steering->esw_egress_acl_vports; i++)
3019 cleanup_root_ns(steering->esw_egress_root_ns[i]);
3020
3021 kfree(steering->esw_egress_root_ns);
3022 steering->esw_egress_root_ns = NULL;
3023 }
3024
mlx5_fs_ingress_acls_init(struct mlx5_core_dev * dev,int total_vports)3025 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3026 {
3027 struct mlx5_flow_steering *steering = dev->priv.steering;
3028 int err;
3029 int i;
3030
3031 steering->esw_ingress_root_ns =
3032 kcalloc(total_vports,
3033 sizeof(*steering->esw_ingress_root_ns),
3034 GFP_KERNEL);
3035 if (!steering->esw_ingress_root_ns)
3036 return -ENOMEM;
3037
3038 for (i = 0; i < total_vports; i++) {
3039 err = init_ingress_acl_root_ns(steering, i);
3040 if (err)
3041 goto cleanup_root_ns;
3042 }
3043 steering->esw_ingress_acl_vports = total_vports;
3044 return 0;
3045
3046 cleanup_root_ns:
3047 for (i--; i >= 0; i--)
3048 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3049 kfree(steering->esw_ingress_root_ns);
3050 steering->esw_ingress_root_ns = NULL;
3051 return err;
3052 }
3053
mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev * dev)3054 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3055 {
3056 struct mlx5_flow_steering *steering = dev->priv.steering;
3057 int i;
3058
3059 if (!steering->esw_ingress_root_ns)
3060 return;
3061
3062 for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3063 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3064
3065 kfree(steering->esw_ingress_root_ns);
3066 steering->esw_ingress_root_ns = NULL;
3067 }
3068
mlx5_fs_get_capabilities(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type type)3069 u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
3070 {
3071 struct mlx5_flow_root_namespace *root;
3072 struct mlx5_flow_namespace *ns;
3073
3074 ns = mlx5_get_flow_namespace(dev, type);
3075 if (!ns)
3076 return 0;
3077
3078 root = find_root(&ns->node);
3079 if (!root)
3080 return 0;
3081
3082 return root->cmds->get_capabilities(root, root->table_type);
3083 }
3084
init_egress_root_ns(struct mlx5_flow_steering * steering)3085 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3086 {
3087 int err;
3088
3089 steering->egress_root_ns = create_root_ns(steering,
3090 FS_FT_NIC_TX);
3091 if (!steering->egress_root_ns)
3092 return -ENOMEM;
3093
3094 err = init_root_tree(steering, &egress_root_fs,
3095 &steering->egress_root_ns->ns.node);
3096 if (err)
3097 goto cleanup;
3098 set_prio_attrs(steering->egress_root_ns);
3099 return 0;
3100 cleanup:
3101 cleanup_root_ns(steering->egress_root_ns);
3102 steering->egress_root_ns = NULL;
3103 return err;
3104 }
3105
mlx5_fs_core_cleanup(struct mlx5_core_dev * dev)3106 void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
3107 {
3108 struct mlx5_flow_steering *steering = dev->priv.steering;
3109
3110 cleanup_root_ns(steering->root_ns);
3111 cleanup_root_ns(steering->fdb_root_ns);
3112 steering->fdb_root_ns = NULL;
3113 kfree(steering->fdb_sub_ns);
3114 steering->fdb_sub_ns = NULL;
3115 cleanup_root_ns(steering->port_sel_root_ns);
3116 cleanup_root_ns(steering->sniffer_rx_root_ns);
3117 cleanup_root_ns(steering->sniffer_tx_root_ns);
3118 cleanup_root_ns(steering->rdma_rx_root_ns);
3119 cleanup_root_ns(steering->rdma_tx_root_ns);
3120 cleanup_root_ns(steering->egress_root_ns);
3121 }
3122
mlx5_fs_core_init(struct mlx5_core_dev * dev)3123 int mlx5_fs_core_init(struct mlx5_core_dev *dev)
3124 {
3125 struct mlx5_flow_steering *steering = dev->priv.steering;
3126 int err = 0;
3127
3128 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3129 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3130 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3131 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
3132 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
3133 err = init_root_ns(steering);
3134 if (err)
3135 goto err;
3136 }
3137
3138 if (MLX5_ESWITCH_MANAGER(dev)) {
3139 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3140 err = init_fdb_root_ns(steering);
3141 if (err)
3142 goto err;
3143 }
3144 }
3145
3146 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3147 err = init_sniffer_rx_root_ns(steering);
3148 if (err)
3149 goto err;
3150 }
3151
3152 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3153 err = init_sniffer_tx_root_ns(steering);
3154 if (err)
3155 goto err;
3156 }
3157
3158 if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
3159 err = init_port_sel_root_ns(steering);
3160 if (err)
3161 goto err;
3162 }
3163
3164 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3165 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3166 err = init_rdma_rx_root_ns(steering);
3167 if (err)
3168 goto err;
3169 }
3170
3171 if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3172 err = init_rdma_tx_root_ns(steering);
3173 if (err)
3174 goto err;
3175 }
3176
3177 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3178 err = init_egress_root_ns(steering);
3179 if (err)
3180 goto err;
3181 }
3182
3183 return 0;
3184
3185 err:
3186 mlx5_fs_core_cleanup(dev);
3187 return err;
3188 }
3189
mlx5_fs_core_free(struct mlx5_core_dev * dev)3190 void mlx5_fs_core_free(struct mlx5_core_dev *dev)
3191 {
3192 struct mlx5_flow_steering *steering = dev->priv.steering;
3193
3194 kmem_cache_destroy(steering->ftes_cache);
3195 kmem_cache_destroy(steering->fgs_cache);
3196 kfree(steering);
3197 mlx5_ft_pool_destroy(dev);
3198 mlx5_cleanup_fc_stats(dev);
3199 }
3200
mlx5_fs_core_alloc(struct mlx5_core_dev * dev)3201 int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
3202 {
3203 struct mlx5_flow_steering *steering;
3204 int err = 0;
3205
3206 err = mlx5_init_fc_stats(dev);
3207 if (err)
3208 return err;
3209
3210 err = mlx5_ft_pool_init(dev);
3211 if (err)
3212 goto err;
3213
3214 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3215 if (!steering) {
3216 err = -ENOMEM;
3217 goto err;
3218 }
3219
3220 steering->dev = dev;
3221 dev->priv.steering = steering;
3222
3223 if (mlx5_fs_dr_is_supported(dev))
3224 steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
3225 else
3226 steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
3227
3228 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
3229 sizeof(struct mlx5_flow_group), 0,
3230 0, NULL);
3231 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
3232 0, NULL);
3233 if (!steering->ftes_cache || !steering->fgs_cache) {
3234 err = -ENOMEM;
3235 goto err;
3236 }
3237
3238 return 0;
3239
3240 err:
3241 mlx5_fs_core_free(dev);
3242 return err;
3243 }
3244
mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3245 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3246 {
3247 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3248 struct mlx5_ft_underlay_qp *new_uqp;
3249 int err = 0;
3250
3251 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3252 if (!new_uqp)
3253 return -ENOMEM;
3254
3255 mutex_lock(&root->chain_lock);
3256
3257 if (!root->root_ft) {
3258 err = -EINVAL;
3259 goto update_ft_fail;
3260 }
3261
3262 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3263 false);
3264 if (err) {
3265 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3266 underlay_qpn, err);
3267 goto update_ft_fail;
3268 }
3269
3270 new_uqp->qpn = underlay_qpn;
3271 list_add_tail(&new_uqp->list, &root->underlay_qpns);
3272
3273 mutex_unlock(&root->chain_lock);
3274
3275 return 0;
3276
3277 update_ft_fail:
3278 mutex_unlock(&root->chain_lock);
3279 kfree(new_uqp);
3280 return err;
3281 }
3282 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3283
mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev * dev,u32 underlay_qpn)3284 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3285 {
3286 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3287 struct mlx5_ft_underlay_qp *uqp;
3288 bool found = false;
3289 int err = 0;
3290
3291 mutex_lock(&root->chain_lock);
3292 list_for_each_entry(uqp, &root->underlay_qpns, list) {
3293 if (uqp->qpn == underlay_qpn) {
3294 found = true;
3295 break;
3296 }
3297 }
3298
3299 if (!found) {
3300 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3301 underlay_qpn);
3302 err = -EINVAL;
3303 goto out;
3304 }
3305
3306 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3307 true);
3308 if (err)
3309 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3310 underlay_qpn, err);
3311
3312 list_del(&uqp->list);
3313 mutex_unlock(&root->chain_lock);
3314 kfree(uqp);
3315
3316 return 0;
3317
3318 out:
3319 mutex_unlock(&root->chain_lock);
3320 return err;
3321 }
3322 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3323
3324 static struct mlx5_flow_root_namespace
get_root_namespace(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type)3325 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3326 {
3327 struct mlx5_flow_namespace *ns;
3328
3329 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3330 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3331 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3332 else
3333 ns = mlx5_get_flow_namespace(dev, ns_type);
3334 if (!ns)
3335 return NULL;
3336
3337 return find_root(&ns->node);
3338 }
3339
mlx5_modify_header_alloc(struct mlx5_core_dev * dev,u8 ns_type,u8 num_actions,void * modify_actions)3340 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3341 u8 ns_type, u8 num_actions,
3342 void *modify_actions)
3343 {
3344 struct mlx5_flow_root_namespace *root;
3345 struct mlx5_modify_hdr *modify_hdr;
3346 int err;
3347
3348 root = get_root_namespace(dev, ns_type);
3349 if (!root)
3350 return ERR_PTR(-EOPNOTSUPP);
3351
3352 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3353 if (!modify_hdr)
3354 return ERR_PTR(-ENOMEM);
3355
3356 modify_hdr->ns_type = ns_type;
3357 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3358 modify_actions, modify_hdr);
3359 if (err) {
3360 kfree(modify_hdr);
3361 return ERR_PTR(err);
3362 }
3363
3364 return modify_hdr;
3365 }
3366 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3367
mlx5_modify_header_dealloc(struct mlx5_core_dev * dev,struct mlx5_modify_hdr * modify_hdr)3368 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3369 struct mlx5_modify_hdr *modify_hdr)
3370 {
3371 struct mlx5_flow_root_namespace *root;
3372
3373 root = get_root_namespace(dev, modify_hdr->ns_type);
3374 if (WARN_ON(!root))
3375 return;
3376 root->cmds->modify_header_dealloc(root, modify_hdr);
3377 kfree(modify_hdr);
3378 }
3379 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3380
mlx5_packet_reformat_alloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type ns_type)3381 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3382 struct mlx5_pkt_reformat_params *params,
3383 enum mlx5_flow_namespace_type ns_type)
3384 {
3385 struct mlx5_pkt_reformat *pkt_reformat;
3386 struct mlx5_flow_root_namespace *root;
3387 int err;
3388
3389 root = get_root_namespace(dev, ns_type);
3390 if (!root)
3391 return ERR_PTR(-EOPNOTSUPP);
3392
3393 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3394 if (!pkt_reformat)
3395 return ERR_PTR(-ENOMEM);
3396
3397 pkt_reformat->ns_type = ns_type;
3398 pkt_reformat->reformat_type = params->type;
3399 err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3400 pkt_reformat);
3401 if (err) {
3402 kfree(pkt_reformat);
3403 return ERR_PTR(err);
3404 }
3405
3406 return pkt_reformat;
3407 }
3408 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3409
mlx5_packet_reformat_dealloc(struct mlx5_core_dev * dev,struct mlx5_pkt_reformat * pkt_reformat)3410 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3411 struct mlx5_pkt_reformat *pkt_reformat)
3412 {
3413 struct mlx5_flow_root_namespace *root;
3414
3415 root = get_root_namespace(dev, pkt_reformat->ns_type);
3416 if (WARN_ON(!root))
3417 return;
3418 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3419 kfree(pkt_reformat);
3420 }
3421 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3422
mlx5_get_match_definer_id(struct mlx5_flow_definer * definer)3423 int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
3424 {
3425 return definer->id;
3426 }
3427
3428 struct mlx5_flow_definer *
mlx5_create_match_definer(struct mlx5_core_dev * dev,enum mlx5_flow_namespace_type ns_type,u16 format_id,u32 * match_mask)3429 mlx5_create_match_definer(struct mlx5_core_dev *dev,
3430 enum mlx5_flow_namespace_type ns_type, u16 format_id,
3431 u32 *match_mask)
3432 {
3433 struct mlx5_flow_root_namespace *root;
3434 struct mlx5_flow_definer *definer;
3435 int id;
3436
3437 root = get_root_namespace(dev, ns_type);
3438 if (!root)
3439 return ERR_PTR(-EOPNOTSUPP);
3440
3441 definer = kzalloc(sizeof(*definer), GFP_KERNEL);
3442 if (!definer)
3443 return ERR_PTR(-ENOMEM);
3444
3445 definer->ns_type = ns_type;
3446 id = root->cmds->create_match_definer(root, format_id, match_mask);
3447 if (id < 0) {
3448 mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
3449 kfree(definer);
3450 return ERR_PTR(id);
3451 }
3452 definer->id = id;
3453 return definer;
3454 }
3455
mlx5_destroy_match_definer(struct mlx5_core_dev * dev,struct mlx5_flow_definer * definer)3456 void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
3457 struct mlx5_flow_definer *definer)
3458 {
3459 struct mlx5_flow_root_namespace *root;
3460
3461 root = get_root_namespace(dev, definer->ns_type);
3462 if (WARN_ON(!root))
3463 return;
3464
3465 root->cmds->destroy_match_definer(root, definer->id);
3466 kfree(definer);
3467 }
3468
mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)3469 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3470 struct mlx5_flow_root_namespace *peer_ns)
3471 {
3472 if (peer_ns && ns->mode != peer_ns->mode) {
3473 mlx5_core_err(ns->dev,
3474 "Can't peer namespace of different steering mode\n");
3475 return -EINVAL;
3476 }
3477
3478 return ns->cmds->set_peer(ns, peer_ns);
3479 }
3480
3481 /* This function should be called only at init stage of the namespace.
3482 * It is not safe to call this function while steering operations
3483 * are executed in the namespace.
3484 */
mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace * ns,enum mlx5_flow_steering_mode mode)3485 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3486 enum mlx5_flow_steering_mode mode)
3487 {
3488 struct mlx5_flow_root_namespace *root;
3489 const struct mlx5_flow_cmds *cmds;
3490 int err;
3491
3492 root = find_root(&ns->node);
3493 if (&root->ns != ns)
3494 /* Can't set cmds to non root namespace */
3495 return -EINVAL;
3496
3497 if (root->table_type != FS_FT_FDB)
3498 return -EOPNOTSUPP;
3499
3500 if (root->mode == mode)
3501 return 0;
3502
3503 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3504 cmds = mlx5_fs_cmd_get_dr_cmds();
3505 else
3506 cmds = mlx5_fs_cmd_get_fw_cmds();
3507 if (!cmds)
3508 return -EOPNOTSUPP;
3509
3510 err = cmds->create_ns(root);
3511 if (err) {
3512 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3513 err);
3514 return err;
3515 }
3516
3517 root->cmds->destroy_ns(root);
3518 root->cmds = cmds;
3519 root->mode = mode;
3520
3521 return 0;
3522 }
3523