1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/mlx5/eswitch.h>
5 #include <linux/err.h>
6 #include "dr_types.h"
7 
8 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type)	\
9 	((dmn)->info.caps.dmn_type##_sw_owner ||	\
10 	 ((dmn)->info.caps.dmn_type##_sw_owner_v2 &&	\
11 	  (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
12 
dr_domain_init_csum_recalc_fts(struct mlx5dr_domain * dmn)13 static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
14 {
15 	/* Per vport cached FW FT for checksum recalculation, this
16 	 * recalculation is needed due to a HW bug in STEv0.
17 	 */
18 	xa_init(&dmn->csum_fts_xa);
19 }
20 
dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain * dmn)21 static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
22 {
23 	struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
24 	unsigned long i;
25 
26 	xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
27 		if (recalc_cs_ft)
28 			mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
29 	}
30 
31 	xa_destroy(&dmn->csum_fts_xa);
32 }
33 
mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain * dmn,u16 vport_num,u64 * rx_icm_addr)34 int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
35 					u16 vport_num,
36 					u64 *rx_icm_addr)
37 {
38 	struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
39 	int ret;
40 
41 	recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
42 	if (!recalc_cs_ft) {
43 		/* Table hasn't been created yet */
44 		recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
45 		if (!recalc_cs_ft)
46 			return -EINVAL;
47 
48 		ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
49 				      recalc_cs_ft, GFP_KERNEL));
50 		if (ret)
51 			return ret;
52 	}
53 
54 	*rx_icm_addr = recalc_cs_ft->rx_icm_addr;
55 
56 	return 0;
57 }
58 
dr_domain_init_resources(struct mlx5dr_domain * dmn)59 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
60 {
61 	int ret;
62 
63 	dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
64 	if (!dmn->ste_ctx) {
65 		mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
66 		return -EOPNOTSUPP;
67 	}
68 
69 	ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
70 	if (ret) {
71 		mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
72 		return ret;
73 	}
74 
75 	dmn->uar = mlx5_get_uars_page(dmn->mdev);
76 	if (IS_ERR(dmn->uar)) {
77 		mlx5dr_err(dmn, "Couldn't allocate UAR\n");
78 		ret = PTR_ERR(dmn->uar);
79 		goto clean_pd;
80 	}
81 
82 	dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
83 	if (!dmn->ste_icm_pool) {
84 		mlx5dr_err(dmn, "Couldn't get icm memory\n");
85 		ret = -ENOMEM;
86 		goto clean_uar;
87 	}
88 
89 	dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
90 	if (!dmn->action_icm_pool) {
91 		mlx5dr_err(dmn, "Couldn't get action icm memory\n");
92 		ret = -ENOMEM;
93 		goto free_ste_icm_pool;
94 	}
95 
96 	ret = mlx5dr_send_ring_alloc(dmn);
97 	if (ret) {
98 		mlx5dr_err(dmn, "Couldn't create send-ring\n");
99 		goto free_action_icm_pool;
100 	}
101 
102 	return 0;
103 
104 free_action_icm_pool:
105 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
106 free_ste_icm_pool:
107 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
108 clean_uar:
109 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
110 clean_pd:
111 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
112 
113 	return ret;
114 }
115 
dr_domain_uninit_resources(struct mlx5dr_domain * dmn)116 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
117 {
118 	mlx5dr_send_ring_free(dmn, dmn->send_ring);
119 	mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
120 	mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
121 	mlx5_put_uars_page(dmn->mdev, dmn->uar);
122 	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
123 }
124 
dr_domain_fill_uplink_caps(struct mlx5dr_domain * dmn,struct mlx5dr_cmd_vport_cap * uplink_vport)125 static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
126 				       struct mlx5dr_cmd_vport_cap *uplink_vport)
127 {
128 	struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
129 
130 	uplink_vport->num = MLX5_VPORT_UPLINK;
131 	uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
132 	uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
133 	uplink_vport->vport_gvmi = 0;
134 	uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
135 }
136 
dr_domain_query_vport(struct mlx5dr_domain * dmn,u16 vport_number,bool other_vport,struct mlx5dr_cmd_vport_cap * vport_caps)137 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
138 				 u16 vport_number,
139 				 bool other_vport,
140 				 struct mlx5dr_cmd_vport_cap *vport_caps)
141 {
142 	int ret;
143 
144 	ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
145 						 other_vport,
146 						 vport_number,
147 						 &vport_caps->icm_address_rx,
148 						 &vport_caps->icm_address_tx);
149 	if (ret)
150 		return ret;
151 
152 	ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
153 				    other_vport,
154 				    vport_number,
155 				    &vport_caps->vport_gvmi);
156 	if (ret)
157 		return ret;
158 
159 	vport_caps->num = vport_number;
160 	vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
161 
162 	return 0;
163 }
164 
dr_domain_query_esw_mngr(struct mlx5dr_domain * dmn)165 static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
166 {
167 	return dr_domain_query_vport(dmn, 0, false,
168 				     &dmn->info.caps.vports.esw_manager_caps);
169 }
170 
dr_domain_query_uplink(struct mlx5dr_domain * dmn)171 static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
172 {
173 	dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
174 }
175 
176 static struct mlx5dr_cmd_vport_cap *
dr_domain_add_vport_cap(struct mlx5dr_domain * dmn,u16 vport)177 dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
178 {
179 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
180 	struct mlx5dr_cmd_vport_cap *vport_caps;
181 	int ret;
182 
183 	vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
184 	if (!vport_caps)
185 		return NULL;
186 
187 	ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
188 	if (ret) {
189 		kvfree(vport_caps);
190 		return NULL;
191 	}
192 
193 	ret = xa_insert(&caps->vports.vports_caps_xa, vport,
194 			vport_caps, GFP_KERNEL);
195 	if (ret) {
196 		mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
197 		kvfree(vport_caps);
198 		return ERR_PTR(ret);
199 	}
200 
201 	return vport_caps;
202 }
203 
dr_domain_is_esw_mgr_vport(struct mlx5dr_domain * dmn,u16 vport)204 static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
205 {
206 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
207 
208 	return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
209 	       (!caps->is_ecpf && vport == 0);
210 }
211 
212 struct mlx5dr_cmd_vport_cap *
mlx5dr_domain_get_vport_cap(struct mlx5dr_domain * dmn,u16 vport)213 mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
214 {
215 	struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
216 	struct mlx5dr_cmd_vport_cap *vport_caps;
217 
218 	if (dr_domain_is_esw_mgr_vport(dmn, vport))
219 		return &caps->vports.esw_manager_caps;
220 
221 	if (vport == MLX5_VPORT_UPLINK)
222 		return &caps->vports.uplink_caps;
223 
224 vport_load:
225 	vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
226 	if (vport_caps)
227 		return vport_caps;
228 
229 	vport_caps = dr_domain_add_vport_cap(dmn, vport);
230 	if (PTR_ERR(vport_caps) == -EBUSY)
231 		/* caps were already stored by another thread */
232 		goto vport_load;
233 
234 	return vport_caps;
235 }
236 
dr_domain_clear_vports(struct mlx5dr_domain * dmn)237 static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
238 {
239 	struct mlx5dr_cmd_vport_cap *vport_caps;
240 	unsigned long i;
241 
242 	xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
243 		vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
244 		kvfree(vport_caps);
245 	}
246 }
247 
dr_domain_query_fdb_caps(struct mlx5_core_dev * mdev,struct mlx5dr_domain * dmn)248 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
249 				    struct mlx5dr_domain *dmn)
250 {
251 	int ret;
252 
253 	if (!dmn->info.caps.eswitch_manager)
254 		return -EOPNOTSUPP;
255 
256 	ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
257 	if (ret)
258 		return ret;
259 
260 	dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
261 	dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
262 	dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
263 	dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
264 
265 	xa_init(&dmn->info.caps.vports.vports_caps_xa);
266 
267 	/* Query eswitch manager and uplink vports only. Rest of the
268 	 * vports (vport 0, VFs and SFs) will be queried dynamically.
269 	 */
270 
271 	ret = dr_domain_query_esw_mngr(dmn);
272 	if (ret) {
273 		mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
274 		goto free_vports_caps_xa;
275 	}
276 
277 	dr_domain_query_uplink(dmn);
278 
279 	return 0;
280 
281 free_vports_caps_xa:
282 	xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
283 
284 	return ret;
285 }
286 
dr_domain_caps_init(struct mlx5_core_dev * mdev,struct mlx5dr_domain * dmn)287 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
288 			       struct mlx5dr_domain *dmn)
289 {
290 	struct mlx5dr_cmd_vport_cap *vport_cap;
291 	int ret;
292 
293 	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
294 		mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
295 		return -EOPNOTSUPP;
296 	}
297 
298 	ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
299 	if (ret)
300 		return ret;
301 
302 	ret = dr_domain_query_fdb_caps(mdev, dmn);
303 	if (ret)
304 		return ret;
305 
306 	switch (dmn->type) {
307 	case MLX5DR_DOMAIN_TYPE_NIC_RX:
308 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
309 			return -ENOTSUPP;
310 
311 		dmn->info.supp_sw_steering = true;
312 		dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
313 		dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
314 		dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
315 		break;
316 	case MLX5DR_DOMAIN_TYPE_NIC_TX:
317 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
318 			return -ENOTSUPP;
319 
320 		dmn->info.supp_sw_steering = true;
321 		dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
322 		dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
323 		dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
324 		break;
325 	case MLX5DR_DOMAIN_TYPE_FDB:
326 		if (!dmn->info.caps.eswitch_manager)
327 			return -ENOTSUPP;
328 
329 		if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
330 			return -ENOTSUPP;
331 
332 		dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
333 		dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
334 		vport_cap = &dmn->info.caps.vports.esw_manager_caps;
335 
336 		dmn->info.supp_sw_steering = true;
337 		dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
338 		dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
339 		dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
340 		dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
341 		break;
342 	default:
343 		mlx5dr_err(dmn, "Invalid domain\n");
344 		ret = -EINVAL;
345 		break;
346 	}
347 
348 	return ret;
349 }
350 
dr_domain_caps_uninit(struct mlx5dr_domain * dmn)351 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
352 {
353 	dr_domain_clear_vports(dmn);
354 	xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
355 }
356 
357 struct mlx5dr_domain *
mlx5dr_domain_create(struct mlx5_core_dev * mdev,enum mlx5dr_domain_type type)358 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
359 {
360 	struct mlx5dr_domain *dmn;
361 	int ret;
362 
363 	if (type > MLX5DR_DOMAIN_TYPE_FDB)
364 		return NULL;
365 
366 	dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
367 	if (!dmn)
368 		return NULL;
369 
370 	dmn->mdev = mdev;
371 	dmn->type = type;
372 	refcount_set(&dmn->refcount, 1);
373 	mutex_init(&dmn->info.rx.mutex);
374 	mutex_init(&dmn->info.tx.mutex);
375 
376 	if (dr_domain_caps_init(mdev, dmn)) {
377 		mlx5dr_err(dmn, "Failed init domain, no caps\n");
378 		goto free_domain;
379 	}
380 
381 	dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
382 	dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
383 					    dmn->info.caps.log_icm_size);
384 
385 	if (!dmn->info.supp_sw_steering) {
386 		mlx5dr_err(dmn, "SW steering is not supported\n");
387 		goto uninit_caps;
388 	}
389 
390 	/* Allocate resources */
391 	ret = dr_domain_init_resources(dmn);
392 	if (ret) {
393 		mlx5dr_err(dmn, "Failed init domain resources\n");
394 		goto uninit_caps;
395 	}
396 
397 	dr_domain_init_csum_recalc_fts(dmn);
398 	mlx5dr_dbg_init_dump(dmn);
399 	return dmn;
400 
401 uninit_caps:
402 	dr_domain_caps_uninit(dmn);
403 free_domain:
404 	kfree(dmn);
405 	return NULL;
406 }
407 
408 /* Assure synchronization of the device steering tables with updates made by SW
409  * insertion.
410  */
mlx5dr_domain_sync(struct mlx5dr_domain * dmn,u32 flags)411 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
412 {
413 	int ret = 0;
414 
415 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
416 		mlx5dr_domain_lock(dmn);
417 		ret = mlx5dr_send_ring_force_drain(dmn);
418 		mlx5dr_domain_unlock(dmn);
419 		if (ret) {
420 			mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
421 				   flags, ret);
422 			return ret;
423 		}
424 	}
425 
426 	if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
427 		ret = mlx5dr_cmd_sync_steering(dmn->mdev);
428 
429 	return ret;
430 }
431 
mlx5dr_domain_destroy(struct mlx5dr_domain * dmn)432 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
433 {
434 	if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
435 		return -EBUSY;
436 
437 	/* make sure resources are not used by the hardware */
438 	mlx5dr_cmd_sync_steering(dmn->mdev);
439 	mlx5dr_dbg_uninit_dump(dmn);
440 	dr_domain_uninit_csum_recalc_fts(dmn);
441 	dr_domain_uninit_resources(dmn);
442 	dr_domain_caps_uninit(dmn);
443 	mutex_destroy(&dmn->info.tx.mutex);
444 	mutex_destroy(&dmn->info.rx.mutex);
445 	kfree(dmn);
446 	return 0;
447 }
448 
mlx5dr_domain_set_peer(struct mlx5dr_domain * dmn,struct mlx5dr_domain * peer_dmn)449 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
450 			    struct mlx5dr_domain *peer_dmn)
451 {
452 	mlx5dr_domain_lock(dmn);
453 
454 	if (dmn->peer_dmn)
455 		refcount_dec(&dmn->peer_dmn->refcount);
456 
457 	dmn->peer_dmn = peer_dmn;
458 
459 	if (dmn->peer_dmn)
460 		refcount_inc(&dmn->peer_dmn->refcount);
461 
462 	mlx5dr_domain_unlock(dmn);
463 }
464