1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "dr_types.h"
5
dr_mask_is_smac_set(struct mlx5dr_match_spec * spec)6 static bool dr_mask_is_smac_set(struct mlx5dr_match_spec *spec)
7 {
8 return (spec->smac_47_16 || spec->smac_15_0);
9 }
10
dr_mask_is_dmac_set(struct mlx5dr_match_spec * spec)11 static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
12 {
13 return (spec->dmac_47_16 || spec->dmac_15_0);
14 }
15
dr_mask_is_l3_base_set(struct mlx5dr_match_spec * spec)16 static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
17 {
18 return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
19 spec->ip_ecn || spec->ip_dscp);
20 }
21
dr_mask_is_tcp_udp_base_set(struct mlx5dr_match_spec * spec)22 static bool dr_mask_is_tcp_udp_base_set(struct mlx5dr_match_spec *spec)
23 {
24 return (spec->tcp_sport || spec->tcp_dport ||
25 spec->udp_sport || spec->udp_dport);
26 }
27
dr_mask_is_ipv4_set(struct mlx5dr_match_spec * spec)28 static bool dr_mask_is_ipv4_set(struct mlx5dr_match_spec *spec)
29 {
30 return (spec->dst_ip_31_0 || spec->src_ip_31_0);
31 }
32
dr_mask_is_ipv4_5_tuple_set(struct mlx5dr_match_spec * spec)33 static bool dr_mask_is_ipv4_5_tuple_set(struct mlx5dr_match_spec *spec)
34 {
35 return (dr_mask_is_l3_base_set(spec) ||
36 dr_mask_is_tcp_udp_base_set(spec) ||
37 dr_mask_is_ipv4_set(spec));
38 }
39
dr_mask_is_eth_l2_tnl_set(struct mlx5dr_match_misc * misc)40 static bool dr_mask_is_eth_l2_tnl_set(struct mlx5dr_match_misc *misc)
41 {
42 return misc->vxlan_vni;
43 }
44
dr_mask_is_ttl_set(struct mlx5dr_match_spec * spec)45 static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
46 {
47 return spec->ttl_hoplimit;
48 }
49
dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec * spec)50 static bool dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec *spec)
51 {
52 return spec->ipv4_ihl;
53 }
54
55 #define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
56 (_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
57 (_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
58 (_spec).ethertype || (_spec).ip_version || \
59 (_misc)._inner_outer##_second_vid || \
60 (_misc)._inner_outer##_second_cfi || \
61 (_misc)._inner_outer##_second_prio || \
62 (_misc)._inner_outer##_second_cvlan_tag || \
63 (_misc)._inner_outer##_second_svlan_tag)
64
65 #define DR_MASK_IS_ETH_L4_SET(_spec, _misc, _inner_outer) ( \
66 dr_mask_is_l3_base_set(&(_spec)) || \
67 dr_mask_is_tcp_udp_base_set(&(_spec)) || \
68 dr_mask_is_ttl_set(&(_spec)) || \
69 (_misc)._inner_outer##_ipv6_flow_label)
70
71 #define DR_MASK_IS_ETH_L4_MISC_SET(_misc3, _inner_outer) ( \
72 (_misc3)._inner_outer##_tcp_seq_num || \
73 (_misc3)._inner_outer##_tcp_ack_num)
74
75 #define DR_MASK_IS_FIRST_MPLS_SET(_misc2, _inner_outer) ( \
76 (_misc2)._inner_outer##_first_mpls_label || \
77 (_misc2)._inner_outer##_first_mpls_exp || \
78 (_misc2)._inner_outer##_first_mpls_s_bos || \
79 (_misc2)._inner_outer##_first_mpls_ttl)
80
dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc * misc)81 static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc)
82 {
83 return (misc->gre_key_h || misc->gre_key_l ||
84 misc->gre_protocol || misc->gre_c_present ||
85 misc->gre_k_present || misc->gre_s_present);
86 }
87
88 #define DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
89 (_misc)->outer_first_mpls_over_gre_label || \
90 (_misc)->outer_first_mpls_over_gre_exp || \
91 (_misc)->outer_first_mpls_over_gre_s_bos || \
92 (_misc)->outer_first_mpls_over_gre_ttl)
93
94 #define DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
95 (_misc)->outer_first_mpls_over_udp_label || \
96 (_misc)->outer_first_mpls_over_udp_exp || \
97 (_misc)->outer_first_mpls_over_udp_s_bos || \
98 (_misc)->outer_first_mpls_over_udp_ttl)
99
100 static bool
dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 * misc3)101 dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
102 {
103 return (misc3->outer_vxlan_gpe_vni ||
104 misc3->outer_vxlan_gpe_next_protocol ||
105 misc3->outer_vxlan_gpe_flags);
106 }
107
108 static bool
dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps * caps)109 dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
110 {
111 return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
112 (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
113 }
114
115 static bool
dr_mask_is_tnl_vxlan_gpe(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)116 dr_mask_is_tnl_vxlan_gpe(struct mlx5dr_match_param *mask,
117 struct mlx5dr_domain *dmn)
118 {
119 return dr_mask_is_vxlan_gpe_set(&mask->misc3) &&
120 dr_matcher_supp_vxlan_gpe(&dmn->info.caps);
121 }
122
dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc * misc)123 static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
124 {
125 return misc->geneve_vni ||
126 misc->geneve_oam ||
127 misc->geneve_protocol_type ||
128 misc->geneve_opt_len;
129 }
130
dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 * misc3)131 static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
132 {
133 return misc3->geneve_tlv_option_0_data;
134 }
135
136 static bool
dr_matcher_supp_flex_parser_ok(struct mlx5dr_cmd_caps * caps)137 dr_matcher_supp_flex_parser_ok(struct mlx5dr_cmd_caps *caps)
138 {
139 return caps->flex_parser_ok_bits_supp;
140 }
141
dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc * misc,struct mlx5dr_domain * dmn)142 static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *misc,
143 struct mlx5dr_domain *dmn)
144 {
145 return dr_matcher_supp_flex_parser_ok(&dmn->info.caps) &&
146 misc->geneve_tlv_option_0_exist;
147 }
148
149 static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps * caps)150 dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
151 {
152 return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
153 (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
154 }
155
156 static bool
dr_mask_is_tnl_geneve(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)157 dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
158 struct mlx5dr_domain *dmn)
159 {
160 return dr_mask_is_tnl_geneve_set(&mask->misc) &&
161 dr_matcher_supp_tnl_geneve(&dmn->info.caps);
162 }
163
dr_mask_is_tnl_gtpu_set(struct mlx5dr_match_misc3 * misc3)164 static bool dr_mask_is_tnl_gtpu_set(struct mlx5dr_match_misc3 *misc3)
165 {
166 return misc3->gtpu_msg_flags || misc3->gtpu_msg_type || misc3->gtpu_teid;
167 }
168
dr_matcher_supp_tnl_gtpu(struct mlx5dr_cmd_caps * caps)169 static bool dr_matcher_supp_tnl_gtpu(struct mlx5dr_cmd_caps *caps)
170 {
171 return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED;
172 }
173
dr_mask_is_tnl_gtpu(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)174 static bool dr_mask_is_tnl_gtpu(struct mlx5dr_match_param *mask,
175 struct mlx5dr_domain *dmn)
176 {
177 return dr_mask_is_tnl_gtpu_set(&mask->misc3) &&
178 dr_matcher_supp_tnl_gtpu(&dmn->info.caps);
179 }
180
dr_matcher_supp_tnl_gtpu_dw_0(struct mlx5dr_cmd_caps * caps)181 static int dr_matcher_supp_tnl_gtpu_dw_0(struct mlx5dr_cmd_caps *caps)
182 {
183 return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED;
184 }
185
dr_mask_is_tnl_gtpu_dw_0(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)186 static bool dr_mask_is_tnl_gtpu_dw_0(struct mlx5dr_match_param *mask,
187 struct mlx5dr_domain *dmn)
188 {
189 return mask->misc3.gtpu_dw_0 &&
190 dr_matcher_supp_tnl_gtpu_dw_0(&dmn->info.caps);
191 }
192
dr_matcher_supp_tnl_gtpu_teid(struct mlx5dr_cmd_caps * caps)193 static int dr_matcher_supp_tnl_gtpu_teid(struct mlx5dr_cmd_caps *caps)
194 {
195 return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED;
196 }
197
dr_mask_is_tnl_gtpu_teid(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)198 static bool dr_mask_is_tnl_gtpu_teid(struct mlx5dr_match_param *mask,
199 struct mlx5dr_domain *dmn)
200 {
201 return mask->misc3.gtpu_teid &&
202 dr_matcher_supp_tnl_gtpu_teid(&dmn->info.caps);
203 }
204
dr_matcher_supp_tnl_gtpu_dw_2(struct mlx5dr_cmd_caps * caps)205 static int dr_matcher_supp_tnl_gtpu_dw_2(struct mlx5dr_cmd_caps *caps)
206 {
207 return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED;
208 }
209
dr_mask_is_tnl_gtpu_dw_2(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)210 static bool dr_mask_is_tnl_gtpu_dw_2(struct mlx5dr_match_param *mask,
211 struct mlx5dr_domain *dmn)
212 {
213 return mask->misc3.gtpu_dw_2 &&
214 dr_matcher_supp_tnl_gtpu_dw_2(&dmn->info.caps);
215 }
216
dr_matcher_supp_tnl_gtpu_first_ext(struct mlx5dr_cmd_caps * caps)217 static int dr_matcher_supp_tnl_gtpu_first_ext(struct mlx5dr_cmd_caps *caps)
218 {
219 return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED;
220 }
221
dr_mask_is_tnl_gtpu_first_ext(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)222 static bool dr_mask_is_tnl_gtpu_first_ext(struct mlx5dr_match_param *mask,
223 struct mlx5dr_domain *dmn)
224 {
225 return mask->misc3.gtpu_first_ext_dw_0 &&
226 dr_matcher_supp_tnl_gtpu_first_ext(&dmn->info.caps);
227 }
228
dr_mask_is_tnl_gtpu_flex_parser_0(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)229 static bool dr_mask_is_tnl_gtpu_flex_parser_0(struct mlx5dr_match_param *mask,
230 struct mlx5dr_domain *dmn)
231 {
232 struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
233
234 return (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_0) &&
235 dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
236 (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_teid) &&
237 dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
238 (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_2) &&
239 dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
240 (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
241 dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
242 }
243
dr_mask_is_tnl_gtpu_flex_parser_1(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)244 static bool dr_mask_is_tnl_gtpu_flex_parser_1(struct mlx5dr_match_param *mask,
245 struct mlx5dr_domain *dmn)
246 {
247 struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
248
249 return (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_0) &&
250 dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
251 (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_teid) &&
252 dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
253 (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_2) &&
254 dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
255 (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
256 dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
257 }
258
dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)259 static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
260 struct mlx5dr_domain *dmn)
261 {
262 return dr_mask_is_tnl_gtpu_flex_parser_0(mask, dmn) ||
263 dr_mask_is_tnl_gtpu_flex_parser_1(mask, dmn) ||
264 dr_mask_is_tnl_gtpu(mask, dmn);
265 }
266
dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps * caps)267 static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
268 {
269 return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
270 (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
271 }
272
dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps * caps)273 static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
274 {
275 return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
276 (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
277 }
278
dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 * misc3)279 static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
280 {
281 return (misc3->icmpv6_type || misc3->icmpv6_code ||
282 misc3->icmpv6_header_data);
283 }
284
dr_mask_is_icmp(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)285 static bool dr_mask_is_icmp(struct mlx5dr_match_param *mask,
286 struct mlx5dr_domain *dmn)
287 {
288 if (DR_MASK_IS_ICMPV4_SET(&mask->misc3))
289 return dr_matcher_supp_icmp_v4(&dmn->info.caps);
290 else if (dr_mask_is_icmpv6_set(&mask->misc3))
291 return dr_matcher_supp_icmp_v6(&dmn->info.caps);
292
293 return false;
294 }
295
dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 * misc2)296 static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2)
297 {
298 return misc2->metadata_reg_a;
299 }
300
dr_mask_is_reg_c_0_3_set(struct mlx5dr_match_misc2 * misc2)301 static bool dr_mask_is_reg_c_0_3_set(struct mlx5dr_match_misc2 *misc2)
302 {
303 return (misc2->metadata_reg_c_0 || misc2->metadata_reg_c_1 ||
304 misc2->metadata_reg_c_2 || misc2->metadata_reg_c_3);
305 }
306
dr_mask_is_reg_c_4_7_set(struct mlx5dr_match_misc2 * misc2)307 static bool dr_mask_is_reg_c_4_7_set(struct mlx5dr_match_misc2 *misc2)
308 {
309 return (misc2->metadata_reg_c_4 || misc2->metadata_reg_c_5 ||
310 misc2->metadata_reg_c_6 || misc2->metadata_reg_c_7);
311 }
312
dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc * misc)313 static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
314 {
315 return (misc->source_sqn || misc->source_port);
316 }
317
dr_mask_is_flex_parser_id_0_3_set(u32 flex_parser_id,u32 flex_parser_value)318 static bool dr_mask_is_flex_parser_id_0_3_set(u32 flex_parser_id,
319 u32 flex_parser_value)
320 {
321 if (flex_parser_id)
322 return flex_parser_id <= DR_STE_MAX_FLEX_0_ID;
323
324 /* Using flex_parser 0 means that id is zero, thus value must be set. */
325 return flex_parser_value;
326 }
327
dr_mask_is_flex_parser_0_3_set(struct mlx5dr_match_misc4 * misc4)328 static bool dr_mask_is_flex_parser_0_3_set(struct mlx5dr_match_misc4 *misc4)
329 {
330 return (dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_0,
331 misc4->prog_sample_field_value_0) ||
332 dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_1,
333 misc4->prog_sample_field_value_1) ||
334 dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_2,
335 misc4->prog_sample_field_value_2) ||
336 dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_3,
337 misc4->prog_sample_field_value_3));
338 }
339
dr_mask_is_flex_parser_id_4_7_set(u32 flex_parser_id)340 static bool dr_mask_is_flex_parser_id_4_7_set(u32 flex_parser_id)
341 {
342 return flex_parser_id > DR_STE_MAX_FLEX_0_ID &&
343 flex_parser_id <= DR_STE_MAX_FLEX_1_ID;
344 }
345
dr_mask_is_flex_parser_4_7_set(struct mlx5dr_match_misc4 * misc4)346 static bool dr_mask_is_flex_parser_4_7_set(struct mlx5dr_match_misc4 *misc4)
347 {
348 return (dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_0) ||
349 dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_1) ||
350 dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_2) ||
351 dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_3));
352 }
353
dr_matcher_supp_tnl_mpls_over_gre(struct mlx5dr_cmd_caps * caps)354 static int dr_matcher_supp_tnl_mpls_over_gre(struct mlx5dr_cmd_caps *caps)
355 {
356 return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED;
357 }
358
dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)359 static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
360 struct mlx5dr_domain *dmn)
361 {
362 return DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(&mask->misc2) &&
363 dr_matcher_supp_tnl_mpls_over_gre(&dmn->info.caps);
364 }
365
dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps * caps)366 static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
367 {
368 return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
369 }
370
dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn)371 static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
372 struct mlx5dr_domain *dmn)
373 {
374 return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
375 dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
376 }
377
dr_mask_is_tnl_header_0_1_set(struct mlx5dr_match_misc5 * misc5)378 static bool dr_mask_is_tnl_header_0_1_set(struct mlx5dr_match_misc5 *misc5)
379 {
380 return misc5->tunnel_header_0 || misc5->tunnel_header_1;
381 }
382
mlx5dr_matcher_select_builders(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,enum mlx5dr_ipv outer_ipv,enum mlx5dr_ipv inner_ipv)383 int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
384 struct mlx5dr_matcher_rx_tx *nic_matcher,
385 enum mlx5dr_ipv outer_ipv,
386 enum mlx5dr_ipv inner_ipv)
387 {
388 nic_matcher->ste_builder =
389 nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
390 nic_matcher->num_of_builders =
391 nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
392
393 if (!nic_matcher->num_of_builders) {
394 mlx5dr_dbg(matcher->tbl->dmn,
395 "Rule not supported on this matcher due to IP related fields\n");
396 return -EINVAL;
397 }
398
399 return 0;
400 }
401
dr_matcher_set_ste_builders(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,enum mlx5dr_ipv outer_ipv,enum mlx5dr_ipv inner_ipv)402 static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
403 struct mlx5dr_matcher_rx_tx *nic_matcher,
404 enum mlx5dr_ipv outer_ipv,
405 enum mlx5dr_ipv inner_ipv)
406 {
407 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
408 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
409 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
410 struct mlx5dr_match_param mask = {};
411 bool allow_empty_match = false;
412 struct mlx5dr_ste_build *sb;
413 bool inner, rx;
414 int idx = 0;
415 int ret, i;
416
417 sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
418 rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
419
420 /* Create a temporary mask to track and clear used mask fields */
421 if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER)
422 mask.outer = matcher->mask.outer;
423
424 if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC)
425 mask.misc = matcher->mask.misc;
426
427 if (matcher->match_criteria & DR_MATCHER_CRITERIA_INNER)
428 mask.inner = matcher->mask.inner;
429
430 if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC2)
431 mask.misc2 = matcher->mask.misc2;
432
433 if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3)
434 mask.misc3 = matcher->mask.misc3;
435
436 if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
437 mask.misc4 = matcher->mask.misc4;
438
439 if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC5)
440 mask.misc5 = matcher->mask.misc5;
441
442 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
443 &matcher->mask, NULL);
444 if (ret)
445 return ret;
446
447 /* Optimize RX pipe by reducing source port match, since
448 * the FDB RX part is connected only to the wire.
449 */
450 if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
451 rx && mask.misc.source_port) {
452 mask.misc.source_port = 0;
453 mask.misc.source_eswitch_owner_vhca_id = 0;
454 allow_empty_match = true;
455 }
456
457 /* Outer */
458 if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
459 DR_MATCHER_CRITERIA_MISC |
460 DR_MATCHER_CRITERIA_MISC2 |
461 DR_MATCHER_CRITERIA_MISC3 |
462 DR_MATCHER_CRITERIA_MISC5)) {
463 inner = false;
464
465 if (dr_mask_is_wqe_metadata_set(&mask.misc2))
466 mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
467 &mask, inner, rx);
468
469 if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
470 mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
471 &mask, inner, rx);
472
473 if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
474 mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
475 &mask, inner, rx);
476
477 if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
478 (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
479 dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
480 mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
481 &mask, dmn, inner, rx);
482 }
483
484 if (dr_mask_is_smac_set(&mask.outer) &&
485 dr_mask_is_dmac_set(&mask.outer)) {
486 mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
487 &mask, inner, rx);
488 }
489
490 if (dr_mask_is_smac_set(&mask.outer))
491 mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
492 &mask, inner, rx);
493
494 if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
495 mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
496 &mask, inner, rx);
497
498 if (outer_ipv == DR_RULE_IPV6) {
499 if (DR_MASK_IS_DST_IP_SET(&mask.outer))
500 mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
501 &mask, inner, rx);
502
503 if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
504 mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
505 &mask, inner, rx);
506
507 if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
508 mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
509 &mask, inner, rx);
510 } else {
511 if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
512 mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
513 &mask, inner, rx);
514
515 if (dr_mask_is_ttl_set(&mask.outer) ||
516 dr_mask_is_ipv4_ihl_set(&mask.outer))
517 mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
518 &mask, inner, rx);
519 }
520
521 if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
522 mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
523 &mask, inner, rx);
524 else if (dr_mask_is_tnl_geneve(&mask, dmn)) {
525 mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
526 &mask, inner, rx);
527 if (dr_mask_is_tnl_geneve_tlv_opt(&mask.misc3))
528 mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
529 &mask, &dmn->info.caps,
530 inner, rx);
531 if (dr_mask_is_tnl_geneve_tlv_opt_exist_set(&mask.misc, dmn))
532 mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(ste_ctx, &sb[idx++],
533 &mask, &dmn->info.caps,
534 inner, rx);
535 } else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
536 if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
537 mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
538 &mask, &dmn->info.caps,
539 inner, rx);
540
541 if (dr_mask_is_tnl_gtpu_flex_parser_1(&mask, dmn))
542 mlx5dr_ste_build_tnl_gtpu_flex_parser_1(ste_ctx, &sb[idx++],
543 &mask, &dmn->info.caps,
544 inner, rx);
545
546 if (dr_mask_is_tnl_gtpu(&mask, dmn))
547 mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
548 &mask, inner, rx);
549 } else if (dr_mask_is_tnl_header_0_1_set(&mask.misc5)) {
550 mlx5dr_ste_build_tnl_header_0_1(ste_ctx, &sb[idx++],
551 &mask, inner, rx);
552 }
553
554 if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
555 mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
556 &mask, inner, rx);
557
558 if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
559 mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
560 &mask, inner, rx);
561
562 if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
563 mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
564 &mask, &dmn->info.caps,
565 inner, rx);
566 else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
567 mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
568 &mask, &dmn->info.caps,
569 inner, rx);
570
571 if (dr_mask_is_icmp(&mask, dmn))
572 mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
573 &mask, &dmn->info.caps,
574 inner, rx);
575
576 if (dr_mask_is_tnl_gre_set(&mask.misc))
577 mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
578 &mask, inner, rx);
579 }
580
581 /* Inner */
582 if (matcher->match_criteria & (DR_MATCHER_CRITERIA_INNER |
583 DR_MATCHER_CRITERIA_MISC |
584 DR_MATCHER_CRITERIA_MISC2 |
585 DR_MATCHER_CRITERIA_MISC3)) {
586 inner = true;
587
588 if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
589 mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
590 &mask, inner, rx);
591
592 if (dr_mask_is_smac_set(&mask.inner) &&
593 dr_mask_is_dmac_set(&mask.inner)) {
594 mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
595 &mask, inner, rx);
596 }
597
598 if (dr_mask_is_smac_set(&mask.inner))
599 mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
600 &mask, inner, rx);
601
602 if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
603 mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
604 &mask, inner, rx);
605
606 if (inner_ipv == DR_RULE_IPV6) {
607 if (DR_MASK_IS_DST_IP_SET(&mask.inner))
608 mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
609 &mask, inner, rx);
610
611 if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
612 mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
613 &mask, inner, rx);
614
615 if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
616 mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
617 &mask, inner, rx);
618 } else {
619 if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
620 mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
621 &mask, inner, rx);
622
623 if (dr_mask_is_ttl_set(&mask.inner) ||
624 dr_mask_is_ipv4_ihl_set(&mask.inner))
625 mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
626 &mask, inner, rx);
627 }
628
629 if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
630 mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
631 &mask, inner, rx);
632
633 if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
634 mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
635 &mask, inner, rx);
636
637 if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
638 mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
639 &mask, &dmn->info.caps,
640 inner, rx);
641 else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
642 mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
643 &mask, &dmn->info.caps,
644 inner, rx);
645 }
646
647 if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) {
648 if (dr_mask_is_flex_parser_0_3_set(&mask.misc4))
649 mlx5dr_ste_build_flex_parser_0(ste_ctx, &sb[idx++],
650 &mask, false, rx);
651
652 if (dr_mask_is_flex_parser_4_7_set(&mask.misc4))
653 mlx5dr_ste_build_flex_parser_1(ste_ctx, &sb[idx++],
654 &mask, false, rx);
655 }
656
657 /* Empty matcher, takes all */
658 if ((!idx && allow_empty_match) ||
659 matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
660 mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
661
662 if (idx == 0) {
663 mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n");
664 return -EINVAL;
665 }
666
667 /* Check that all mask fields were consumed */
668 for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
669 if (((u8 *)&mask)[i] != 0) {
670 mlx5dr_dbg(dmn, "Mask contains unsupported parameters\n");
671 return -EOPNOTSUPP;
672 }
673 }
674
675 nic_matcher->ste_builder = sb;
676 nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv] = idx;
677
678 return 0;
679 }
680
dr_nic_matcher_connect(struct mlx5dr_domain * dmn,struct mlx5dr_matcher_rx_tx * curr_nic_matcher,struct mlx5dr_matcher_rx_tx * next_nic_matcher,struct mlx5dr_matcher_rx_tx * prev_nic_matcher)681 static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
682 struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
683 struct mlx5dr_matcher_rx_tx *next_nic_matcher,
684 struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
685 {
686 struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl;
687 struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
688 struct mlx5dr_htbl_connect_info info;
689 struct mlx5dr_ste_htbl *prev_htbl;
690 int ret;
691
692 /* Connect end anchor hash table to next_htbl or to the default address */
693 if (next_nic_matcher) {
694 info.type = CONNECT_HIT;
695 info.hit_next_htbl = next_nic_matcher->s_htbl;
696 } else {
697 info.type = CONNECT_MISS;
698 info.miss_icm_addr = nic_tbl->default_icm_addr;
699 }
700 ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
701 curr_nic_matcher->e_anchor,
702 &info, info.type == CONNECT_HIT);
703 if (ret)
704 return ret;
705
706 /* Connect start hash table to end anchor */
707 info.type = CONNECT_MISS;
708 info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(curr_nic_matcher->e_anchor->chunk);
709 ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
710 curr_nic_matcher->s_htbl,
711 &info, false);
712 if (ret)
713 return ret;
714
715 /* Connect previous hash table to matcher start hash table */
716 if (prev_nic_matcher)
717 prev_htbl = prev_nic_matcher->e_anchor;
718 else
719 prev_htbl = nic_tbl->s_anchor;
720
721 info.type = CONNECT_HIT;
722 info.hit_next_htbl = curr_nic_matcher->s_htbl;
723 ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_htbl,
724 &info, true);
725 if (ret)
726 return ret;
727
728 /* Update the pointing ste and next hash table */
729 curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->chunk->ste_arr;
730 prev_htbl->chunk->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
731
732 if (next_nic_matcher) {
733 next_nic_matcher->s_htbl->pointing_ste =
734 curr_nic_matcher->e_anchor->chunk->ste_arr;
735 curr_nic_matcher->e_anchor->chunk->ste_arr[0].next_htbl =
736 next_nic_matcher->s_htbl;
737 }
738
739 return 0;
740 }
741
mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain * dmn,struct mlx5dr_matcher_rx_tx * nic_matcher)742 int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
743 struct mlx5dr_matcher_rx_tx *nic_matcher)
744 {
745 struct mlx5dr_matcher_rx_tx *next_nic_matcher, *prev_nic_matcher, *tmp_nic_matcher;
746 struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
747 bool first = true;
748 int ret;
749
750 /* If the nic matcher is already on its parent nic table list,
751 * then it is already connected to the chain of nic matchers.
752 */
753 if (!list_empty(&nic_matcher->list_node))
754 return 0;
755
756 next_nic_matcher = NULL;
757 list_for_each_entry(tmp_nic_matcher, &nic_tbl->nic_matcher_list, list_node) {
758 if (tmp_nic_matcher->prio >= nic_matcher->prio) {
759 next_nic_matcher = tmp_nic_matcher;
760 break;
761 }
762 first = false;
763 }
764
765 prev_nic_matcher = NULL;
766 if (next_nic_matcher && !first)
767 prev_nic_matcher = list_prev_entry(next_nic_matcher, list_node);
768 else if (!first)
769 prev_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
770 struct mlx5dr_matcher_rx_tx,
771 list_node);
772
773 ret = dr_nic_matcher_connect(dmn, nic_matcher,
774 next_nic_matcher, prev_nic_matcher);
775 if (ret)
776 return ret;
777
778 if (prev_nic_matcher)
779 list_add(&nic_matcher->list_node, &prev_nic_matcher->list_node);
780 else if (next_nic_matcher)
781 list_add_tail(&nic_matcher->list_node, &next_nic_matcher->list_node);
782 else
783 list_add(&nic_matcher->list_node, &nic_matcher->nic_tbl->nic_matcher_list);
784
785 return ret;
786 }
787
dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx * nic_matcher)788 static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher)
789 {
790 mlx5dr_htbl_put(nic_matcher->s_htbl);
791 mlx5dr_htbl_put(nic_matcher->e_anchor);
792 }
793
dr_matcher_uninit_fdb(struct mlx5dr_matcher * matcher)794 static void dr_matcher_uninit_fdb(struct mlx5dr_matcher *matcher)
795 {
796 dr_matcher_uninit_nic(&matcher->rx);
797 dr_matcher_uninit_nic(&matcher->tx);
798 }
799
dr_matcher_uninit(struct mlx5dr_matcher * matcher)800 static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
801 {
802 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
803
804 switch (dmn->type) {
805 case MLX5DR_DOMAIN_TYPE_NIC_RX:
806 dr_matcher_uninit_nic(&matcher->rx);
807 break;
808 case MLX5DR_DOMAIN_TYPE_NIC_TX:
809 dr_matcher_uninit_nic(&matcher->tx);
810 break;
811 case MLX5DR_DOMAIN_TYPE_FDB:
812 dr_matcher_uninit_fdb(matcher);
813 break;
814 default:
815 WARN_ON(true);
816 break;
817 }
818 }
819
dr_matcher_set_all_ste_builders(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)820 static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
821 struct mlx5dr_matcher_rx_tx *nic_matcher)
822 {
823 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
824
825 dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV4);
826 dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV6);
827 dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV4);
828 dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
829
830 if (!nic_matcher->ste_builder) {
831 mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
832 return -EINVAL;
833 }
834
835 return 0;
836 }
837
dr_matcher_init_nic(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)838 static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
839 struct mlx5dr_matcher_rx_tx *nic_matcher)
840 {
841 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
842 int ret;
843
844 nic_matcher->prio = matcher->prio;
845 INIT_LIST_HEAD(&nic_matcher->list_node);
846
847 ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
848 if (ret)
849 return ret;
850
851 nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
852 DR_CHUNK_SIZE_1,
853 MLX5DR_STE_LU_TYPE_DONT_CARE,
854 0);
855 if (!nic_matcher->e_anchor)
856 return -ENOMEM;
857
858 nic_matcher->s_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
859 DR_CHUNK_SIZE_1,
860 nic_matcher->ste_builder[0].lu_type,
861 nic_matcher->ste_builder[0].byte_mask);
862 if (!nic_matcher->s_htbl) {
863 ret = -ENOMEM;
864 goto free_e_htbl;
865 }
866
867 /* make sure the tables exist while empty */
868 mlx5dr_htbl_get(nic_matcher->s_htbl);
869 mlx5dr_htbl_get(nic_matcher->e_anchor);
870
871 return 0;
872
873 free_e_htbl:
874 mlx5dr_ste_htbl_free(nic_matcher->e_anchor);
875 return ret;
876 }
877
dr_matcher_init_fdb(struct mlx5dr_matcher * matcher)878 static int dr_matcher_init_fdb(struct mlx5dr_matcher *matcher)
879 {
880 int ret;
881
882 ret = dr_matcher_init_nic(matcher, &matcher->rx);
883 if (ret)
884 return ret;
885
886 ret = dr_matcher_init_nic(matcher, &matcher->tx);
887 if (ret)
888 goto uninit_nic_rx;
889
890 return 0;
891
892 uninit_nic_rx:
893 dr_matcher_uninit_nic(&matcher->rx);
894 return ret;
895 }
896
dr_matcher_copy_param(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * mask)897 static int dr_matcher_copy_param(struct mlx5dr_matcher *matcher,
898 struct mlx5dr_match_parameters *mask)
899 {
900 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
901 struct mlx5dr_match_parameters consumed_mask;
902 int i, ret = 0;
903
904 if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
905 mlx5dr_err(dmn, "Invalid match criteria attribute\n");
906 return -EINVAL;
907 }
908
909 if (mask) {
910 if (mask->match_sz > DR_SZ_MATCH_PARAM) {
911 mlx5dr_err(dmn, "Invalid match size attribute\n");
912 return -EINVAL;
913 }
914
915 consumed_mask.match_buf = kzalloc(mask->match_sz, GFP_KERNEL);
916 if (!consumed_mask.match_buf)
917 return -ENOMEM;
918
919 consumed_mask.match_sz = mask->match_sz;
920 memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz);
921 mlx5dr_ste_copy_param(matcher->match_criteria,
922 &matcher->mask, &consumed_mask, true);
923
924 /* Check that all mask data was consumed */
925 for (i = 0; i < consumed_mask.match_sz; i++) {
926 if (!((u8 *)consumed_mask.match_buf)[i])
927 continue;
928
929 mlx5dr_dbg(dmn,
930 "Match param mask contains unsupported parameters\n");
931 ret = -EOPNOTSUPP;
932 break;
933 }
934
935 kfree(consumed_mask.match_buf);
936 }
937
938 return ret;
939 }
940
dr_matcher_init(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * mask)941 static int dr_matcher_init(struct mlx5dr_matcher *matcher,
942 struct mlx5dr_match_parameters *mask)
943 {
944 struct mlx5dr_table *tbl = matcher->tbl;
945 struct mlx5dr_domain *dmn = tbl->dmn;
946 int ret;
947
948 ret = dr_matcher_copy_param(matcher, mask);
949 if (ret)
950 return ret;
951
952 switch (dmn->type) {
953 case MLX5DR_DOMAIN_TYPE_NIC_RX:
954 matcher->rx.nic_tbl = &tbl->rx;
955 ret = dr_matcher_init_nic(matcher, &matcher->rx);
956 break;
957 case MLX5DR_DOMAIN_TYPE_NIC_TX:
958 matcher->tx.nic_tbl = &tbl->tx;
959 ret = dr_matcher_init_nic(matcher, &matcher->tx);
960 break;
961 case MLX5DR_DOMAIN_TYPE_FDB:
962 matcher->rx.nic_tbl = &tbl->rx;
963 matcher->tx.nic_tbl = &tbl->tx;
964 ret = dr_matcher_init_fdb(matcher);
965 break;
966 default:
967 WARN_ON(true);
968 ret = -EINVAL;
969 }
970
971 return ret;
972 }
973
dr_matcher_add_to_dbg_list(struct mlx5dr_matcher * matcher)974 static void dr_matcher_add_to_dbg_list(struct mlx5dr_matcher *matcher)
975 {
976 mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
977 list_add(&matcher->list_node, &matcher->tbl->matcher_list);
978 mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
979 }
980
dr_matcher_remove_from_dbg_list(struct mlx5dr_matcher * matcher)981 static void dr_matcher_remove_from_dbg_list(struct mlx5dr_matcher *matcher)
982 {
983 mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
984 list_del(&matcher->list_node);
985 mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
986 }
987
988 struct mlx5dr_matcher *
mlx5dr_matcher_create(struct mlx5dr_table * tbl,u32 priority,u8 match_criteria_enable,struct mlx5dr_match_parameters * mask)989 mlx5dr_matcher_create(struct mlx5dr_table *tbl,
990 u32 priority,
991 u8 match_criteria_enable,
992 struct mlx5dr_match_parameters *mask)
993 {
994 struct mlx5dr_matcher *matcher;
995 int ret;
996
997 refcount_inc(&tbl->refcount);
998
999 matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
1000 if (!matcher)
1001 goto dec_ref;
1002
1003 matcher->tbl = tbl;
1004 matcher->prio = priority;
1005 matcher->match_criteria = match_criteria_enable;
1006 refcount_set(&matcher->refcount, 1);
1007 INIT_LIST_HEAD(&matcher->list_node);
1008 INIT_LIST_HEAD(&matcher->dbg_rule_list);
1009
1010 mlx5dr_domain_lock(tbl->dmn);
1011
1012 ret = dr_matcher_init(matcher, mask);
1013 if (ret)
1014 goto free_matcher;
1015
1016 dr_matcher_add_to_dbg_list(matcher);
1017
1018 mlx5dr_domain_unlock(tbl->dmn);
1019
1020 return matcher;
1021
1022 free_matcher:
1023 mlx5dr_domain_unlock(tbl->dmn);
1024 kfree(matcher);
1025 dec_ref:
1026 refcount_dec(&tbl->refcount);
1027 return NULL;
1028 }
1029
dr_matcher_disconnect_nic(struct mlx5dr_domain * dmn,struct mlx5dr_table_rx_tx * nic_tbl,struct mlx5dr_matcher_rx_tx * next_nic_matcher,struct mlx5dr_matcher_rx_tx * prev_nic_matcher)1030 static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
1031 struct mlx5dr_table_rx_tx *nic_tbl,
1032 struct mlx5dr_matcher_rx_tx *next_nic_matcher,
1033 struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
1034 {
1035 struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
1036 struct mlx5dr_htbl_connect_info info;
1037 struct mlx5dr_ste_htbl *prev_anchor;
1038
1039 if (prev_nic_matcher)
1040 prev_anchor = prev_nic_matcher->e_anchor;
1041 else
1042 prev_anchor = nic_tbl->s_anchor;
1043
1044 /* Connect previous anchor hash table to next matcher or to the default address */
1045 if (next_nic_matcher) {
1046 info.type = CONNECT_HIT;
1047 info.hit_next_htbl = next_nic_matcher->s_htbl;
1048 next_nic_matcher->s_htbl->pointing_ste = prev_anchor->chunk->ste_arr;
1049 prev_anchor->chunk->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
1050 } else {
1051 info.type = CONNECT_MISS;
1052 info.miss_icm_addr = nic_tbl->default_icm_addr;
1053 prev_anchor->chunk->ste_arr[0].next_htbl = NULL;
1054 }
1055
1056 return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
1057 &info, true);
1058 }
1059
mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain * dmn,struct mlx5dr_matcher_rx_tx * nic_matcher)1060 int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
1061 struct mlx5dr_matcher_rx_tx *nic_matcher)
1062 {
1063 struct mlx5dr_matcher_rx_tx *prev_nic_matcher, *next_nic_matcher;
1064 struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
1065 int ret;
1066
1067 /* If the nic matcher is not on its parent nic table list,
1068 * then it is detached - no need to disconnect it.
1069 */
1070 if (list_empty(&nic_matcher->list_node))
1071 return 0;
1072
1073 if (list_is_last(&nic_matcher->list_node, &nic_tbl->nic_matcher_list))
1074 next_nic_matcher = NULL;
1075 else
1076 next_nic_matcher = list_next_entry(nic_matcher, list_node);
1077
1078 if (nic_matcher->list_node.prev == &nic_tbl->nic_matcher_list)
1079 prev_nic_matcher = NULL;
1080 else
1081 prev_nic_matcher = list_prev_entry(nic_matcher, list_node);
1082
1083 ret = dr_matcher_disconnect_nic(dmn, nic_tbl, next_nic_matcher, prev_nic_matcher);
1084 if (ret)
1085 return ret;
1086
1087 list_del_init(&nic_matcher->list_node);
1088 return 0;
1089 }
1090
mlx5dr_matcher_destroy(struct mlx5dr_matcher * matcher)1091 int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
1092 {
1093 struct mlx5dr_table *tbl = matcher->tbl;
1094
1095 if (WARN_ON_ONCE(refcount_read(&matcher->refcount) > 1))
1096 return -EBUSY;
1097
1098 mlx5dr_domain_lock(tbl->dmn);
1099
1100 dr_matcher_remove_from_dbg_list(matcher);
1101 dr_matcher_uninit(matcher);
1102 refcount_dec(&matcher->tbl->refcount);
1103
1104 mlx5dr_domain_unlock(tbl->dmn);
1105 kfree(matcher);
1106
1107 return 0;
1108 }
1109