1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3 #include <linux/mlx5/driver.h>
4 #include "vhca_event.h"
5 #include "priv.h"
6 #include "sf.h"
7 #include "mlx5_ifc_vhca_event.h"
8 #include "ecpf.h"
9 #include "mlx5_core.h"
10 #include "eswitch.h"
11 #include "diag/sf_tracepoint.h"
12 
13 struct mlx5_sf_hw {
14 	u32 usr_sfnum;
15 	u8 allocated: 1;
16 	u8 pending_delete: 1;
17 };
18 
19 struct mlx5_sf_hwc_table {
20 	struct mlx5_sf_hw *sfs;
21 	int max_fn;
22 	u16 start_fn_id;
23 };
24 
25 enum mlx5_sf_hwc_index {
26 	MLX5_SF_HWC_LOCAL,
27 	MLX5_SF_HWC_EXTERNAL,
28 	MLX5_SF_HWC_MAX,
29 };
30 
31 struct mlx5_sf_hw_table {
32 	struct mlx5_core_dev *dev;
33 	struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
34 	struct notifier_block vhca_nb;
35 	struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
36 };
37 
38 static struct mlx5_sf_hwc_table *
mlx5_sf_controller_to_hwc(struct mlx5_core_dev * dev,u32 controller)39 mlx5_sf_controller_to_hwc(struct mlx5_core_dev *dev, u32 controller)
40 {
41 	int idx = !!controller;
42 
43 	return &dev->priv.sf_hw_table->hwc[idx];
44 }
45 
mlx5_sf_sw_to_hw_id(struct mlx5_core_dev * dev,u32 controller,u16 sw_id)46 u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id)
47 {
48 	struct mlx5_sf_hwc_table *hwc;
49 
50 	hwc = mlx5_sf_controller_to_hwc(dev, controller);
51 	return hwc->start_fn_id + sw_id;
52 }
53 
mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table * hwc,u16 hw_id)54 static u16 mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table *hwc, u16 hw_id)
55 {
56 	return hw_id - hwc->start_fn_id;
57 }
58 
59 static struct mlx5_sf_hwc_table *
mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table * table,u16 fn_id)60 mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
61 {
62 	int i;
63 
64 	for (i = 0; i < ARRAY_SIZE(table->hwc); i++) {
65 		if (table->hwc[i].max_fn &&
66 		    fn_id >= table->hwc[i].start_fn_id &&
67 		    fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn))
68 			return &table->hwc[i];
69 	}
70 	return NULL;
71 }
72 
mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table * table,u32 controller,u32 usr_sfnum)73 static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
74 				     u32 usr_sfnum)
75 {
76 	struct mlx5_sf_hwc_table *hwc;
77 	int free_idx = -1;
78 	int i;
79 
80 	hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
81 	if (!hwc->sfs)
82 		return -ENOSPC;
83 
84 	for (i = 0; i < hwc->max_fn; i++) {
85 		if (!hwc->sfs[i].allocated && free_idx == -1) {
86 			free_idx = i;
87 			continue;
88 		}
89 
90 		if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum)
91 			return -EEXIST;
92 	}
93 
94 	if (free_idx == -1)
95 		return -ENOSPC;
96 
97 	hwc->sfs[free_idx].usr_sfnum = usr_sfnum;
98 	hwc->sfs[free_idx].allocated = true;
99 	return free_idx;
100 }
101 
mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table * table,u32 controller,int id)102 static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
103 {
104 	struct mlx5_sf_hwc_table *hwc;
105 
106 	hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
107 	hwc->sfs[id].allocated = false;
108 	hwc->sfs[id].pending_delete = false;
109 }
110 
mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev * dev,u32 controller,u32 usr_sfnum)111 int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum)
112 {
113 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
114 	u16 hw_fn_id;
115 	int sw_id;
116 	int err;
117 
118 	if (!table)
119 		return -EOPNOTSUPP;
120 
121 	mutex_lock(&table->table_lock);
122 	sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
123 	if (sw_id < 0) {
124 		err = sw_id;
125 		goto exist_err;
126 	}
127 
128 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, sw_id);
129 	err = mlx5_cmd_alloc_sf(dev, hw_fn_id);
130 	if (err)
131 		goto err;
132 
133 	err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum);
134 	if (err)
135 		goto vhca_err;
136 
137 	if (controller) {
138 		/* If this SF is for external controller, SF manager
139 		 * needs to arm firmware to receive the events.
140 		 */
141 		err = mlx5_vhca_event_arm(dev, hw_fn_id);
142 		if (err)
143 			goto vhca_err;
144 	}
145 
146 	trace_mlx5_sf_hwc_alloc(dev, controller, hw_fn_id, usr_sfnum);
147 	mutex_unlock(&table->table_lock);
148 	return sw_id;
149 
150 vhca_err:
151 	mlx5_cmd_dealloc_sf(dev, hw_fn_id);
152 err:
153 	mlx5_sf_hw_table_id_free(table, controller, sw_id);
154 exist_err:
155 	mutex_unlock(&table->table_lock);
156 	return err;
157 }
158 
mlx5_sf_hw_table_sf_free(struct mlx5_core_dev * dev,u32 controller,u16 id)159 void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
160 {
161 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
162 	u16 hw_fn_id;
163 
164 	mutex_lock(&table->table_lock);
165 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
166 	mlx5_cmd_dealloc_sf(dev, hw_fn_id);
167 	mlx5_sf_hw_table_id_free(table, controller, id);
168 	mutex_unlock(&table->table_lock);
169 }
170 
mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev * dev,struct mlx5_sf_hwc_table * hwc,int idx)171 static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev,
172 					 struct mlx5_sf_hwc_table *hwc, int idx)
173 {
174 	mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx);
175 	hwc->sfs[idx].allocated = false;
176 	hwc->sfs[idx].pending_delete = false;
177 	trace_mlx5_sf_hwc_free(dev, hwc->start_fn_id + idx);
178 }
179 
mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev * dev,u32 controller,u16 id)180 void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
181 {
182 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
183 	u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
184 	struct mlx5_sf_hwc_table *hwc;
185 	u16 hw_fn_id;
186 	u8 state;
187 	int err;
188 
189 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
190 	hwc = mlx5_sf_controller_to_hwc(dev, controller);
191 	mutex_lock(&table->table_lock);
192 	err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
193 	if (err)
194 		goto err;
195 	state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
196 	if (state == MLX5_VHCA_STATE_ALLOCATED) {
197 		mlx5_cmd_dealloc_sf(dev, hw_fn_id);
198 		hwc->sfs[id].allocated = false;
199 	} else {
200 		hwc->sfs[id].pending_delete = true;
201 		trace_mlx5_sf_hwc_deferred_free(dev, hw_fn_id);
202 	}
203 err:
204 	mutex_unlock(&table->table_lock);
205 }
206 
mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev * dev,struct mlx5_sf_hwc_table * hwc)207 static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
208 					     struct mlx5_sf_hwc_table *hwc)
209 {
210 	int i;
211 
212 	for (i = 0; i < hwc->max_fn; i++) {
213 		if (hwc->sfs[i].allocated)
214 			mlx5_sf_hw_table_hwc_sf_free(dev, hwc, i);
215 	}
216 }
217 
mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table * table)218 static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
219 {
220 	mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
221 	mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
222 }
223 
mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table * hwc,u16 max_fn,u16 base_id)224 static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
225 {
226 	struct mlx5_sf_hw *sfs;
227 
228 	if (!max_fn)
229 		return 0;
230 
231 	sfs = kcalloc(max_fn, sizeof(*sfs), GFP_KERNEL);
232 	if (!sfs)
233 		return -ENOMEM;
234 
235 	hwc->sfs = sfs;
236 	hwc->max_fn = max_fn;
237 	hwc->start_fn_id = base_id;
238 	return 0;
239 }
240 
mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table * hwc)241 static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc)
242 {
243 	kfree(hwc->sfs);
244 }
245 
mlx5_sf_hw_table_init(struct mlx5_core_dev * dev)246 int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
247 {
248 	struct mlx5_sf_hw_table *table;
249 	u16 max_ext_fn = 0;
250 	u16 ext_base_id = 0;
251 	u16 max_fn = 0;
252 	u16 base_id;
253 	int err;
254 
255 	if (!mlx5_vhca_event_supported(dev))
256 		return 0;
257 
258 	if (mlx5_sf_supported(dev))
259 		max_fn = mlx5_sf_max_functions(dev);
260 
261 	err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id);
262 	if (err)
263 		return err;
264 
265 	if (!max_fn && !max_ext_fn)
266 		return 0;
267 
268 	table = kzalloc(sizeof(*table), GFP_KERNEL);
269 	if (!table)
270 		return -ENOMEM;
271 
272 	mutex_init(&table->table_lock);
273 	table->dev = dev;
274 	dev->priv.sf_hw_table = table;
275 
276 	base_id = mlx5_sf_start_function_id(dev);
277 	err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_LOCAL], max_fn, base_id);
278 	if (err)
279 		goto table_err;
280 
281 	err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_EXTERNAL],
282 					max_ext_fn, ext_base_id);
283 	if (err)
284 		goto ext_err;
285 
286 	mlx5_core_dbg(dev, "SF HW table: max sfs = %d, ext sfs = %d\n", max_fn, max_ext_fn);
287 	return 0;
288 
289 ext_err:
290 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
291 table_err:
292 	mutex_destroy(&table->table_lock);
293 	kfree(table);
294 	return err;
295 }
296 
mlx5_sf_hw_table_cleanup(struct mlx5_core_dev * dev)297 void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
298 {
299 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
300 
301 	if (!table)
302 		return;
303 
304 	mutex_destroy(&table->table_lock);
305 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
306 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
307 	kfree(table);
308 }
309 
mlx5_sf_hw_vhca_event(struct notifier_block * nb,unsigned long opcode,void * data)310 static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
311 {
312 	struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
313 	const struct mlx5_vhca_state_event *event = data;
314 	struct mlx5_sf_hwc_table *hwc;
315 	struct mlx5_sf_hw *sf_hw;
316 	u16 sw_id;
317 
318 	if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
319 		return 0;
320 
321 	hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
322 	if (!hwc)
323 		return 0;
324 
325 	sw_id = mlx5_sf_hw_to_sw_id(hwc, event->function_id);
326 	sf_hw = &hwc->sfs[sw_id];
327 
328 	mutex_lock(&table->table_lock);
329 	/* SF driver notified through firmware that SF is finally detached.
330 	 * Hence recycle the sf hardware id for reuse.
331 	 */
332 	if (sf_hw->allocated && sf_hw->pending_delete)
333 		mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
334 	mutex_unlock(&table->table_lock);
335 	return 0;
336 }
337 
mlx5_sf_hw_table_create(struct mlx5_core_dev * dev)338 int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
339 {
340 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
341 
342 	if (!table)
343 		return 0;
344 
345 	table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
346 	return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
347 }
348 
mlx5_sf_hw_table_destroy(struct mlx5_core_dev * dev)349 void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
350 {
351 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
352 
353 	if (!table)
354 		return;
355 
356 	mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
357 	/* Dealloc SFs whose firmware event has been missed. */
358 	mlx5_sf_hw_table_dealloc_all(table);
359 }
360 
mlx5_sf_hw_table_supported(const struct mlx5_core_dev * dev)361 bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
362 {
363 	return !!dev->priv.sf_hw_table;
364 }
365