1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/device.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39 #include "fs_ft_pool.h"
40 #include "mlx5_core.h"
41 #include "eswitch.h"
42
mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)43 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
44 struct mlx5_flow_table *ft,
45 u32 underlay_qpn,
46 bool disconnect)
47 {
48 return 0;
49 }
50
mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)51 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
52 struct mlx5_flow_table *ft,
53 struct mlx5_flow_table_attr *ft_attr,
54 struct mlx5_flow_table *next_ft)
55 {
56 int max_fte = ft_attr->max_fte;
57
58 ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1;
59
60 return 0;
61 }
62
mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)63 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
64 struct mlx5_flow_table *ft)
65 {
66 return 0;
67 }
68
mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)69 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
70 struct mlx5_flow_table *ft,
71 struct mlx5_flow_table *next_ft)
72 {
73 return 0;
74 }
75
mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)76 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
77 struct mlx5_flow_table *ft,
78 u32 *in,
79 struct mlx5_flow_group *fg)
80 {
81 return 0;
82 }
83
mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)84 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
85 struct mlx5_flow_table *ft,
86 struct mlx5_flow_group *fg)
87 {
88 return 0;
89 }
90
mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)91 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
92 struct mlx5_flow_table *ft,
93 struct mlx5_flow_group *group,
94 struct fs_fte *fte)
95 {
96 return 0;
97 }
98
mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,int modify_mask,struct fs_fte * fte)99 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
100 struct mlx5_flow_table *ft,
101 struct mlx5_flow_group *group,
102 int modify_mask,
103 struct fs_fte *fte)
104 {
105 return -EOPNOTSUPP;
106 }
107
mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)108 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
109 struct mlx5_flow_table *ft,
110 struct fs_fte *fte)
111 {
112 return 0;
113 }
114
mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)115 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
116 struct mlx5_pkt_reformat_params *params,
117 enum mlx5_flow_namespace_type namespace,
118 struct mlx5_pkt_reformat *pkt_reformat)
119 {
120 return 0;
121 }
122
mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)123 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
124 struct mlx5_pkt_reformat *pkt_reformat)
125 {
126 }
127
mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)128 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
129 u8 namespace, u8 num_actions,
130 void *modify_actions,
131 struct mlx5_modify_hdr *modify_hdr)
132 {
133 return 0;
134 }
135
mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)136 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
137 struct mlx5_modify_hdr *modify_hdr)
138 {
139 }
140
mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_root_namespace * peer_ns)141 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
142 struct mlx5_flow_root_namespace *peer_ns)
143 {
144 return 0;
145 }
146
mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace * ns)147 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
148 {
149 return 0;
150 }
151
mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace * ns)152 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
153 {
154 return 0;
155 }
156
mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)157 static u32 mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace *ns,
158 enum fs_flow_table_type ft_type)
159 {
160 return 0;
161 }
162
mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev * master,struct mlx5_core_dev * slave,bool ft_id_valid,u32 ft_id)163 static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
164 struct mlx5_core_dev *slave,
165 bool ft_id_valid,
166 u32 ft_id)
167 {
168 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
169 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
170 struct mlx5_flow_root_namespace *root;
171 struct mlx5_flow_namespace *ns;
172
173 MLX5_SET(set_flow_table_root_in, in, opcode,
174 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
175 MLX5_SET(set_flow_table_root_in, in, table_type,
176 FS_FT_FDB);
177 if (ft_id_valid) {
178 MLX5_SET(set_flow_table_root_in, in,
179 table_eswitch_owner_vhca_id_valid, 1);
180 MLX5_SET(set_flow_table_root_in, in,
181 table_eswitch_owner_vhca_id,
182 MLX5_CAP_GEN(master, vhca_id));
183 MLX5_SET(set_flow_table_root_in, in, table_id,
184 ft_id);
185 } else {
186 ns = mlx5_get_flow_namespace(slave,
187 MLX5_FLOW_NAMESPACE_FDB);
188 root = find_root(&ns->node);
189 MLX5_SET(set_flow_table_root_in, in, table_id,
190 root->root_ft->id);
191 }
192
193 return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
194 }
195
196 static int
mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)197 mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
198 int definer_id)
199 {
200 return 0;
201 }
202
203 static int
mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)204 mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns,
205 u16 format_id, u32 *match_mask)
206 {
207 return 0;
208 }
209
mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 underlay_qpn,bool disconnect)210 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
211 struct mlx5_flow_table *ft, u32 underlay_qpn,
212 bool disconnect)
213 {
214 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
215 struct mlx5_core_dev *dev = ns->dev;
216 int err;
217
218 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
219 underlay_qpn == 0)
220 return 0;
221
222 if (ft->type == FS_FT_FDB &&
223 mlx5_lag_is_shared_fdb(dev) &&
224 !mlx5_lag_is_master(dev))
225 return 0;
226
227 MLX5_SET(set_flow_table_root_in, in, opcode,
228 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
229 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
230
231 if (disconnect)
232 MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
233 else
234 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
235
236 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
237 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
238 MLX5_SET(set_flow_table_root_in, in, other_vport,
239 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
240
241 err = mlx5_cmd_exec_in(dev, set_flow_table_root, in);
242 if (!err &&
243 ft->type == FS_FT_FDB &&
244 mlx5_lag_is_shared_fdb(dev) &&
245 mlx5_lag_is_master(dev)) {
246 err = mlx5_cmd_set_slave_root_fdb(dev,
247 mlx5_lag_get_peer_mdev(dev),
248 !disconnect, (!disconnect) ?
249 ft->id : 0);
250 if (err && !disconnect) {
251 MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
252 MLX5_SET(set_flow_table_root_in, in, table_id,
253 ns->root_ft->id);
254 mlx5_cmd_exec_in(dev, set_flow_table_root, in);
255 }
256 }
257
258 return err;
259 }
260
mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table_attr * ft_attr,struct mlx5_flow_table * next_ft)261 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
262 struct mlx5_flow_table *ft,
263 struct mlx5_flow_table_attr *ft_attr,
264 struct mlx5_flow_table *next_ft)
265 {
266 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
267 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
268 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
269 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
270 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
271 struct mlx5_core_dev *dev = ns->dev;
272 unsigned int size;
273 int err;
274
275 if (ft_attr->max_fte != POOL_NEXT_SIZE)
276 size = roundup_pow_of_two(ft_attr->max_fte);
277 size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
278 if (!size)
279 return -ENOSPC;
280
281 MLX5_SET(create_flow_table_in, in, opcode,
282 MLX5_CMD_OP_CREATE_FLOW_TABLE);
283
284 MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
285 MLX5_SET(create_flow_table_in, in, table_type, ft->type);
286 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
287 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
288 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
289 MLX5_SET(create_flow_table_in, in, other_vport,
290 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
291
292 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
293 en_decap);
294 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
295 en_encap);
296 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
297 term);
298
299 switch (ft->op_mod) {
300 case FS_FT_OP_MOD_NORMAL:
301 if (next_ft) {
302 MLX5_SET(create_flow_table_in, in,
303 flow_table_context.table_miss_action,
304 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
305 MLX5_SET(create_flow_table_in, in,
306 flow_table_context.table_miss_id, next_ft->id);
307 } else {
308 MLX5_SET(create_flow_table_in, in,
309 flow_table_context.table_miss_action,
310 ft->def_miss_action);
311 }
312 break;
313
314 case FS_FT_OP_MOD_LAG_DEMUX:
315 MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
316 if (next_ft)
317 MLX5_SET(create_flow_table_in, in,
318 flow_table_context.lag_master_next_table_id,
319 next_ft->id);
320 break;
321 }
322
323 err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
324 if (!err) {
325 ft->id = MLX5_GET(create_flow_table_out, out,
326 table_id);
327 ft->max_fte = size;
328 } else {
329 mlx5_ft_pool_put_sz(ns->dev, size);
330 }
331
332 return err;
333 }
334
mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft)335 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
336 struct mlx5_flow_table *ft)
337 {
338 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
339 struct mlx5_core_dev *dev = ns->dev;
340 int err;
341
342 MLX5_SET(destroy_flow_table_in, in, opcode,
343 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
344 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
345 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
346 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
347 MLX5_SET(destroy_flow_table_in, in, other_vport,
348 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
349
350 err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
351 if (!err)
352 mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
353
354 return err;
355 }
356
mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)357 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
358 struct mlx5_flow_table *ft,
359 struct mlx5_flow_table *next_ft)
360 {
361 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
362 struct mlx5_core_dev *dev = ns->dev;
363
364 MLX5_SET(modify_flow_table_in, in, opcode,
365 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
366 MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
367 MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
368
369 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
370 MLX5_SET(modify_flow_table_in, in, modify_field_select,
371 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
372 if (next_ft) {
373 MLX5_SET(modify_flow_table_in, in,
374 flow_table_context.lag_master_next_table_id, next_ft->id);
375 } else {
376 MLX5_SET(modify_flow_table_in, in,
377 flow_table_context.lag_master_next_table_id, 0);
378 }
379 } else {
380 MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
381 MLX5_SET(modify_flow_table_in, in, other_vport,
382 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
383 MLX5_SET(modify_flow_table_in, in, modify_field_select,
384 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
385 if (next_ft) {
386 MLX5_SET(modify_flow_table_in, in,
387 flow_table_context.table_miss_action,
388 MLX5_FLOW_TABLE_MISS_ACTION_FWD);
389 MLX5_SET(modify_flow_table_in, in,
390 flow_table_context.table_miss_id,
391 next_ft->id);
392 } else {
393 MLX5_SET(modify_flow_table_in, in,
394 flow_table_context.table_miss_action,
395 ft->def_miss_action);
396 }
397 }
398
399 return mlx5_cmd_exec_in(dev, modify_flow_table, in);
400 }
401
mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)402 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
403 struct mlx5_flow_table *ft,
404 u32 *in,
405 struct mlx5_flow_group *fg)
406 {
407 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
408 struct mlx5_core_dev *dev = ns->dev;
409 int err;
410
411 MLX5_SET(create_flow_group_in, in, opcode,
412 MLX5_CMD_OP_CREATE_FLOW_GROUP);
413 MLX5_SET(create_flow_group_in, in, table_type, ft->type);
414 MLX5_SET(create_flow_group_in, in, table_id, ft->id);
415 if (ft->vport) {
416 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
417 MLX5_SET(create_flow_group_in, in, other_vport, 1);
418 }
419
420 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
421 MLX5_SET(create_flow_group_in, in, other_vport,
422 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
423 err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
424 if (!err)
425 fg->id = MLX5_GET(create_flow_group_out, out,
426 group_id);
427 return err;
428 }
429
mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg)430 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
431 struct mlx5_flow_table *ft,
432 struct mlx5_flow_group *fg)
433 {
434 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
435 struct mlx5_core_dev *dev = ns->dev;
436
437 MLX5_SET(destroy_flow_group_in, in, opcode,
438 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
439 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
440 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
441 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
442 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
443 MLX5_SET(destroy_flow_group_in, in, other_vport,
444 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
445 return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
446 }
447
mlx5_set_extended_dest(struct mlx5_core_dev * dev,struct fs_fte * fte,bool * extended_dest)448 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
449 struct fs_fte *fte, bool *extended_dest)
450 {
451 int fw_log_max_fdb_encap_uplink =
452 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
453 int num_fwd_destinations = 0;
454 struct mlx5_flow_rule *dst;
455 int num_encap = 0;
456
457 *extended_dest = false;
458 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
459 return 0;
460
461 list_for_each_entry(dst, &fte->node.children, node.list) {
462 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
463 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE)
464 continue;
465 if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
466 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
467 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
468 num_encap++;
469 num_fwd_destinations++;
470 }
471 if (num_fwd_destinations > 1 && num_encap > 0)
472 *extended_dest = true;
473
474 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
475 mlx5_core_warn(dev, "FW does not support extended destination");
476 return -EOPNOTSUPP;
477 }
478 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
479 mlx5_core_warn(dev, "FW does not support more than %d encaps",
480 1 << fw_log_max_fdb_encap_uplink);
481 return -EOPNOTSUPP;
482 }
483
484 return 0;
485 }
486
487 static void
mlx5_cmd_set_fte_flow_meter(struct fs_fte * fte,void * in_flow_context)488 mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
489 {
490 void *exe_aso_ctrl;
491 void *execute_aso;
492
493 execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context,
494 execute_aso[0]);
495 MLX5_SET(execute_aso, execute_aso, valid, 1);
496 MLX5_SET(execute_aso, execute_aso, aso_object_id,
497 fte->action.exe_aso.object_id);
498
499 exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
500 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
501 fte->action.exe_aso.return_reg_id);
502 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
503 fte->action.exe_aso.type);
504 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
505 fte->action.exe_aso.flow_meter.init_color);
506 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
507 fte->action.exe_aso.flow_meter.meter_idx);
508 }
509
mlx5_cmd_set_fte(struct mlx5_core_dev * dev,int opmod,int modify_mask,struct mlx5_flow_table * ft,unsigned group_id,struct fs_fte * fte)510 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
511 int opmod, int modify_mask,
512 struct mlx5_flow_table *ft,
513 unsigned group_id,
514 struct fs_fte *fte)
515 {
516 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
517 bool extended_dest = false;
518 struct mlx5_flow_rule *dst;
519 void *in_flow_context, *vlan;
520 void *in_match_value;
521 unsigned int inlen;
522 int dst_cnt_size;
523 void *in_dests;
524 u32 *in;
525 int err;
526
527 if (mlx5_set_extended_dest(dev, fte, &extended_dest))
528 return -EOPNOTSUPP;
529
530 if (!extended_dest)
531 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
532 else
533 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
534
535 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
536 in = kvzalloc(inlen, GFP_KERNEL);
537 if (!in)
538 return -ENOMEM;
539
540 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
541 MLX5_SET(set_fte_in, in, op_mod, opmod);
542 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
543 MLX5_SET(set_fte_in, in, table_type, ft->type);
544 MLX5_SET(set_fte_in, in, table_id, ft->id);
545 MLX5_SET(set_fte_in, in, flow_index, fte->index);
546 MLX5_SET(set_fte_in, in, ignore_flow_level,
547 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
548
549 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
550 MLX5_SET(set_fte_in, in, other_vport,
551 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
552
553 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
554 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
555
556 MLX5_SET(flow_context, in_flow_context, flow_tag,
557 fte->flow_context.flow_tag);
558 MLX5_SET(flow_context, in_flow_context, flow_source,
559 fte->flow_context.flow_source);
560
561 MLX5_SET(flow_context, in_flow_context, extended_destination,
562 extended_dest);
563 if (extended_dest) {
564 u32 action;
565
566 action = fte->action.action &
567 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
568 MLX5_SET(flow_context, in_flow_context, action, action);
569 } else {
570 MLX5_SET(flow_context, in_flow_context, action,
571 fte->action.action);
572 if (fte->action.pkt_reformat)
573 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
574 fte->action.pkt_reformat->id);
575 }
576 if (fte->action.modify_hdr)
577 MLX5_SET(flow_context, in_flow_context, modify_header_id,
578 fte->action.modify_hdr->id);
579
580 MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
581 fte->action.crypto.type);
582 MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
583 fte->action.crypto.obj_id);
584
585 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
586
587 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
588 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
589 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
590
591 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
592
593 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
594 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
595 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
596
597 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
598 match_value);
599 memcpy(in_match_value, &fte->val, sizeof(fte->val));
600
601 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
602 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
603 int list_size = 0;
604
605 list_for_each_entry(dst, &fte->node.children, node.list) {
606 enum mlx5_flow_destination_type type = dst->dest_attr.type;
607 enum mlx5_ifc_flow_destination_type ifc_type;
608 unsigned int id;
609
610 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
611 continue;
612
613 switch (type) {
614 case MLX5_FLOW_DESTINATION_TYPE_NONE:
615 continue;
616 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
617 id = dst->dest_attr.ft_num;
618 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
619 break;
620 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
621 id = dst->dest_attr.ft->id;
622 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
623 break;
624 case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
625 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
626 MLX5_SET(dest_format_struct, in_dests,
627 destination_eswitch_owner_vhca_id_valid,
628 !!(dst->dest_attr.vport.flags &
629 MLX5_FLOW_DEST_VPORT_VHCA_ID));
630 MLX5_SET(dest_format_struct, in_dests,
631 destination_eswitch_owner_vhca_id,
632 dst->dest_attr.vport.vhca_id);
633 if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
634 /* destination_id is reserved */
635 id = 0;
636 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
637 break;
638 }
639 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
640 id = dst->dest_attr.vport.num;
641 if (extended_dest &&
642 dst->dest_attr.vport.pkt_reformat) {
643 MLX5_SET(dest_format_struct, in_dests,
644 packet_reformat,
645 !!(dst->dest_attr.vport.flags &
646 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
647 MLX5_SET(extended_dest_format, in_dests,
648 packet_reformat_id,
649 dst->dest_attr.vport.pkt_reformat->id);
650 }
651 break;
652 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
653 id = dst->dest_attr.sampler_id;
654 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
655 break;
656 default:
657 id = dst->dest_attr.tir_num;
658 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
659 }
660
661 MLX5_SET(dest_format_struct, in_dests, destination_type,
662 ifc_type);
663 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
664 in_dests += dst_cnt_size;
665 list_size++;
666 }
667
668 MLX5_SET(flow_context, in_flow_context, destination_list_size,
669 list_size);
670 }
671
672 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
673 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
674 log_max_flow_counter,
675 ft->type));
676 int list_size = 0;
677
678 list_for_each_entry(dst, &fte->node.children, node.list) {
679 if (dst->dest_attr.type !=
680 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
681 continue;
682
683 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
684 dst->dest_attr.counter_id);
685 in_dests += dst_cnt_size;
686 list_size++;
687 }
688 if (list_size > max_list_size) {
689 err = -EINVAL;
690 goto err_out;
691 }
692
693 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
694 list_size);
695 }
696
697 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
698 if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
699 mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
700 } else {
701 err = -EOPNOTSUPP;
702 goto err_out;
703 }
704 }
705
706 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
707 err_out:
708 kvfree(in);
709 return err;
710 }
711
mlx5_cmd_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * group,struct fs_fte * fte)712 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
713 struct mlx5_flow_table *ft,
714 struct mlx5_flow_group *group,
715 struct fs_fte *fte)
716 {
717 struct mlx5_core_dev *dev = ns->dev;
718 unsigned int group_id = group->id;
719
720 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
721 }
722
mlx5_cmd_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,int modify_mask,struct fs_fte * fte)723 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
724 struct mlx5_flow_table *ft,
725 struct mlx5_flow_group *fg,
726 int modify_mask,
727 struct fs_fte *fte)
728 {
729 int opmod;
730 struct mlx5_core_dev *dev = ns->dev;
731 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
732 flow_table_properties_nic_receive.
733 flow_modify_en);
734 if (!atomic_mod_cap)
735 return -EOPNOTSUPP;
736 opmod = 1;
737
738 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
739 }
740
mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)741 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
742 struct mlx5_flow_table *ft,
743 struct fs_fte *fte)
744 {
745 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
746 struct mlx5_core_dev *dev = ns->dev;
747
748 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
749 MLX5_SET(delete_fte_in, in, table_type, ft->type);
750 MLX5_SET(delete_fte_in, in, table_id, ft->id);
751 MLX5_SET(delete_fte_in, in, flow_index, fte->index);
752 MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
753 MLX5_SET(delete_fte_in, in, other_vport,
754 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
755
756 return mlx5_cmd_exec_in(dev, delete_fte, in);
757 }
758
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev * dev,enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,u32 * id)759 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
760 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
761 u32 *id)
762 {
763 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
764 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
765 int err;
766
767 MLX5_SET(alloc_flow_counter_in, in, opcode,
768 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
769 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
770
771 err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
772 if (!err)
773 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
774 return err;
775 }
776
mlx5_cmd_fc_alloc(struct mlx5_core_dev * dev,u32 * id)777 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
778 {
779 return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
780 }
781
mlx5_cmd_fc_free(struct mlx5_core_dev * dev,u32 id)782 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
783 {
784 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
785
786 MLX5_SET(dealloc_flow_counter_in, in, opcode,
787 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
788 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
789 return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
790 }
791
mlx5_cmd_fc_query(struct mlx5_core_dev * dev,u32 id,u64 * packets,u64 * bytes)792 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
793 u64 *packets, u64 *bytes)
794 {
795 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
796 MLX5_ST_SZ_BYTES(traffic_counter)] = {};
797 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
798 void *stats;
799 int err = 0;
800
801 MLX5_SET(query_flow_counter_in, in, opcode,
802 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
803 MLX5_SET(query_flow_counter_in, in, op_mod, 0);
804 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
805 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
806 if (err)
807 return err;
808
809 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
810 *packets = MLX5_GET64(traffic_counter, stats, packets);
811 *bytes = MLX5_GET64(traffic_counter, stats, octets);
812 return 0;
813 }
814
mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)815 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
816 {
817 return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
818 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
819 }
820
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev * dev,u32 base_id,int bulk_len,u32 * out)821 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
822 u32 *out)
823 {
824 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
825 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
826
827 MLX5_SET(query_flow_counter_in, in, opcode,
828 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
829 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
830 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
831 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
832 }
833
mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat_params * params,enum mlx5_flow_namespace_type namespace,struct mlx5_pkt_reformat * pkt_reformat)834 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
835 struct mlx5_pkt_reformat_params *params,
836 enum mlx5_flow_namespace_type namespace,
837 struct mlx5_pkt_reformat *pkt_reformat)
838 {
839 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
840 struct mlx5_core_dev *dev = ns->dev;
841 void *packet_reformat_context_in;
842 int max_encap_size;
843 void *reformat;
844 int inlen;
845 int err;
846 u32 *in;
847
848 if (namespace == MLX5_FLOW_NAMESPACE_FDB ||
849 namespace == MLX5_FLOW_NAMESPACE_FDB_BYPASS)
850 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
851 else
852 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
853
854 if (params->size > max_encap_size) {
855 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
856 params->size, max_encap_size);
857 return -EINVAL;
858 }
859
860 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
861 params->size, GFP_KERNEL);
862 if (!in)
863 return -ENOMEM;
864
865 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
866 in, packet_reformat_context);
867 reformat = MLX5_ADDR_OF(packet_reformat_context_in,
868 packet_reformat_context_in,
869 reformat_data);
870 inlen = reformat - (void *)in + params->size;
871
872 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
873 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
874 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
875 reformat_data_size, params->size);
876 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
877 reformat_type, params->type);
878 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
879 reformat_param_0, params->param_0);
880 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
881 reformat_param_1, params->param_1);
882 if (params->data && params->size)
883 memcpy(reformat, params->data, params->size);
884
885 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
886
887 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
888 out, packet_reformat_id);
889 kfree(in);
890 return err;
891 }
892
mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_pkt_reformat * pkt_reformat)893 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
894 struct mlx5_pkt_reformat *pkt_reformat)
895 {
896 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
897 struct mlx5_core_dev *dev = ns->dev;
898
899 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
900 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
901 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
902 pkt_reformat->id);
903
904 mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
905 }
906
mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace * ns,u8 namespace,u8 num_actions,void * modify_actions,struct mlx5_modify_hdr * modify_hdr)907 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
908 u8 namespace, u8 num_actions,
909 void *modify_actions,
910 struct mlx5_modify_hdr *modify_hdr)
911 {
912 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
913 int max_actions, actions_size, inlen, err;
914 struct mlx5_core_dev *dev = ns->dev;
915 void *actions_in;
916 u8 table_type;
917 u32 *in;
918
919 switch (namespace) {
920 case MLX5_FLOW_NAMESPACE_FDB:
921 case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
922 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
923 table_type = FS_FT_FDB;
924 break;
925 case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
926 case MLX5_FLOW_NAMESPACE_KERNEL:
927 case MLX5_FLOW_NAMESPACE_BYPASS:
928 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
929 table_type = FS_FT_NIC_RX;
930 break;
931 case MLX5_FLOW_NAMESPACE_EGRESS:
932 case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
933 case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
934 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
935 table_type = FS_FT_NIC_TX;
936 break;
937 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
938 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
939 table_type = FS_FT_ESW_INGRESS_ACL;
940 break;
941 case MLX5_FLOW_NAMESPACE_RDMA_TX:
942 max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
943 table_type = FS_FT_RDMA_TX;
944 break;
945 default:
946 return -EOPNOTSUPP;
947 }
948
949 if (num_actions > max_actions) {
950 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
951 num_actions, max_actions);
952 return -EOPNOTSUPP;
953 }
954
955 actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
956 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
957
958 in = kzalloc(inlen, GFP_KERNEL);
959 if (!in)
960 return -ENOMEM;
961
962 MLX5_SET(alloc_modify_header_context_in, in, opcode,
963 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
964 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
965 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
966
967 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
968 memcpy(actions_in, modify_actions, actions_size);
969
970 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
971
972 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
973 kfree(in);
974 return err;
975 }
976
mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace * ns,struct mlx5_modify_hdr * modify_hdr)977 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
978 struct mlx5_modify_hdr *modify_hdr)
979 {
980 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
981 struct mlx5_core_dev *dev = ns->dev;
982
983 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
984 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
985 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
986 modify_hdr->id);
987
988 mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
989 }
990
mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace * ns,int definer_id)991 static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
992 int definer_id)
993 {
994 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
995 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
996
997 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
998 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
999 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
1000 MLX5_OBJ_TYPE_MATCH_DEFINER);
1001 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
1002
1003 return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out));
1004 }
1005
mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace * ns,u16 format_id,u32 * match_mask)1006 static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
1007 u16 format_id, u32 *match_mask)
1008 {
1009 u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {};
1010 u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
1011 struct mlx5_core_dev *dev = ns->dev;
1012 void *ptr;
1013 int err;
1014
1015 MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode,
1016 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
1017 MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type,
1018 MLX5_OBJ_TYPE_MATCH_DEFINER);
1019
1020 ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
1021 MLX5_SET(match_definer, ptr, format_id, format_id);
1022
1023 ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
1024 memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
1025
1026 err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out);
1027 return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1028 }
1029
mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace * ns,enum fs_flow_table_type ft_type)1030 static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
1031 enum fs_flow_table_type ft_type)
1032 {
1033 return 0;
1034 }
1035
1036 static const struct mlx5_flow_cmds mlx5_flow_cmds = {
1037 .create_flow_table = mlx5_cmd_create_flow_table,
1038 .destroy_flow_table = mlx5_cmd_destroy_flow_table,
1039 .modify_flow_table = mlx5_cmd_modify_flow_table,
1040 .create_flow_group = mlx5_cmd_create_flow_group,
1041 .destroy_flow_group = mlx5_cmd_destroy_flow_group,
1042 .create_fte = mlx5_cmd_create_fte,
1043 .update_fte = mlx5_cmd_update_fte,
1044 .delete_fte = mlx5_cmd_delete_fte,
1045 .update_root_ft = mlx5_cmd_update_root_ft,
1046 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
1047 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
1048 .modify_header_alloc = mlx5_cmd_modify_header_alloc,
1049 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
1050 .create_match_definer = mlx5_cmd_create_match_definer,
1051 .destroy_match_definer = mlx5_cmd_destroy_match_definer,
1052 .set_peer = mlx5_cmd_stub_set_peer,
1053 .create_ns = mlx5_cmd_stub_create_ns,
1054 .destroy_ns = mlx5_cmd_stub_destroy_ns,
1055 .get_capabilities = mlx5_cmd_get_capabilities,
1056 };
1057
1058 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
1059 .create_flow_table = mlx5_cmd_stub_create_flow_table,
1060 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
1061 .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
1062 .create_flow_group = mlx5_cmd_stub_create_flow_group,
1063 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
1064 .create_fte = mlx5_cmd_stub_create_fte,
1065 .update_fte = mlx5_cmd_stub_update_fte,
1066 .delete_fte = mlx5_cmd_stub_delete_fte,
1067 .update_root_ft = mlx5_cmd_stub_update_root_ft,
1068 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
1069 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
1070 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
1071 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
1072 .create_match_definer = mlx5_cmd_stub_create_match_definer,
1073 .destroy_match_definer = mlx5_cmd_stub_destroy_match_definer,
1074 .set_peer = mlx5_cmd_stub_set_peer,
1075 .create_ns = mlx5_cmd_stub_create_ns,
1076 .destroy_ns = mlx5_cmd_stub_destroy_ns,
1077 .get_capabilities = mlx5_cmd_stub_get_capabilities,
1078 };
1079
mlx5_fs_cmd_get_fw_cmds(void)1080 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
1081 {
1082 return &mlx5_flow_cmds;
1083 }
1084
mlx5_fs_cmd_get_stub_cmds(void)1085 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
1086 {
1087 return &mlx5_flow_cmd_stubs;
1088 }
1089
mlx5_fs_cmd_get_default(enum fs_flow_table_type type)1090 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
1091 {
1092 switch (type) {
1093 case FS_FT_NIC_RX:
1094 case FS_FT_ESW_EGRESS_ACL:
1095 case FS_FT_ESW_INGRESS_ACL:
1096 case FS_FT_FDB:
1097 case FS_FT_SNIFFER_RX:
1098 case FS_FT_SNIFFER_TX:
1099 case FS_FT_NIC_TX:
1100 case FS_FT_RDMA_RX:
1101 case FS_FT_RDMA_TX:
1102 case FS_FT_PORT_SEL:
1103 return mlx5_fs_cmd_get_fw_cmds();
1104 default:
1105 return mlx5_fs_cmd_get_stub_cmds();
1106 }
1107 }
1108