1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/list.h>
5 #include <linux/spinlock.h>
6
7 #include "hnae3.h"
8
9 static LIST_HEAD(hnae3_ae_algo_list);
10 static LIST_HEAD(hnae3_client_list);
11 static LIST_HEAD(hnae3_ae_dev_list);
12
hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo * ae_algo)13 void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
14 {
15 const struct pci_device_id *pci_id;
16 struct hnae3_ae_dev *ae_dev;
17
18 if (!ae_algo)
19 return;
20
21 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
22 if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
23 continue;
24
25 pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
26 if (!pci_id)
27 continue;
28 if (IS_ENABLED(CONFIG_PCI_IOV))
29 pci_disable_sriov(ae_dev->pdev);
30 }
31 }
32 EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
33
34 /* we are keeping things simple and using single lock for all the
35 * list. This is a non-critical code so other updations, if happen
36 * in parallel, can wait.
37 */
38 static DEFINE_MUTEX(hnae3_common_lock);
39
hnae3_client_match(enum hnae3_client_type client_type)40 static bool hnae3_client_match(enum hnae3_client_type client_type)
41 {
42 if (client_type == HNAE3_CLIENT_KNIC ||
43 client_type == HNAE3_CLIENT_ROCE)
44 return true;
45
46 return false;
47 }
48
hnae3_set_client_init_flag(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev,unsigned int inited)49 void hnae3_set_client_init_flag(struct hnae3_client *client,
50 struct hnae3_ae_dev *ae_dev,
51 unsigned int inited)
52 {
53 if (!client || !ae_dev)
54 return;
55
56 switch (client->type) {
57 case HNAE3_CLIENT_KNIC:
58 hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
59 break;
60 case HNAE3_CLIENT_ROCE:
61 hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
62 break;
63 default:
64 break;
65 }
66 }
67 EXPORT_SYMBOL(hnae3_set_client_init_flag);
68
hnae3_get_client_init_flag(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)69 static int hnae3_get_client_init_flag(struct hnae3_client *client,
70 struct hnae3_ae_dev *ae_dev)
71 {
72 int inited = 0;
73
74 switch (client->type) {
75 case HNAE3_CLIENT_KNIC:
76 inited = hnae3_get_bit(ae_dev->flag,
77 HNAE3_KNIC_CLIENT_INITED_B);
78 break;
79 case HNAE3_CLIENT_ROCE:
80 inited = hnae3_get_bit(ae_dev->flag,
81 HNAE3_ROCE_CLIENT_INITED_B);
82 break;
83 default:
84 break;
85 }
86
87 return inited;
88 }
89
hnae3_init_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)90 static int hnae3_init_client_instance(struct hnae3_client *client,
91 struct hnae3_ae_dev *ae_dev)
92 {
93 int ret;
94
95 /* check if this client matches the type of ae_dev */
96 if (!(hnae3_client_match(client->type) &&
97 hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
98 return 0;
99 }
100
101 ret = ae_dev->ops->init_client_instance(client, ae_dev);
102 if (ret)
103 dev_err(&ae_dev->pdev->dev,
104 "fail to instantiate client, ret = %d\n", ret);
105
106 return ret;
107 }
108
hnae3_uninit_client_instance(struct hnae3_client * client,struct hnae3_ae_dev * ae_dev)109 static void hnae3_uninit_client_instance(struct hnae3_client *client,
110 struct hnae3_ae_dev *ae_dev)
111 {
112 /* check if this client matches the type of ae_dev */
113 if (!(hnae3_client_match(client->type) &&
114 hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
115 return;
116
117 if (hnae3_get_client_init_flag(client, ae_dev)) {
118 ae_dev->ops->uninit_client_instance(client, ae_dev);
119
120 hnae3_set_client_init_flag(client, ae_dev, 0);
121 }
122 }
123
hnae3_register_client(struct hnae3_client * client)124 int hnae3_register_client(struct hnae3_client *client)
125 {
126 struct hnae3_client *client_tmp;
127 struct hnae3_ae_dev *ae_dev;
128
129 if (!client)
130 return -ENODEV;
131
132 mutex_lock(&hnae3_common_lock);
133 /* one system should only have one client for every type */
134 list_for_each_entry(client_tmp, &hnae3_client_list, node) {
135 if (client_tmp->type == client->type)
136 goto exit;
137 }
138
139 list_add_tail(&client->node, &hnae3_client_list);
140
141 /* initialize the client on every matched port */
142 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
143 /* if the client could not be initialized on current port, for
144 * any error reasons, move on to next available port
145 */
146 int ret = hnae3_init_client_instance(client, ae_dev);
147 if (ret)
148 dev_err(&ae_dev->pdev->dev,
149 "match and instantiation failed for port, ret = %d\n",
150 ret);
151 }
152
153 exit:
154 mutex_unlock(&hnae3_common_lock);
155
156 return 0;
157 }
158 EXPORT_SYMBOL(hnae3_register_client);
159
hnae3_unregister_client(struct hnae3_client * client)160 void hnae3_unregister_client(struct hnae3_client *client)
161 {
162 struct hnae3_client *client_tmp;
163 struct hnae3_ae_dev *ae_dev;
164 bool existed = false;
165
166 if (!client)
167 return;
168
169 mutex_lock(&hnae3_common_lock);
170 /* one system should only have one client for every type */
171 list_for_each_entry(client_tmp, &hnae3_client_list, node) {
172 if (client_tmp->type == client->type) {
173 existed = true;
174 break;
175 }
176 }
177
178 if (!existed) {
179 mutex_unlock(&hnae3_common_lock);
180 pr_err("client %s does not exist!\n", client->name);
181 return;
182 }
183
184 /* un-initialize the client on every matched port */
185 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
186 hnae3_uninit_client_instance(client, ae_dev);
187 }
188
189 list_del(&client->node);
190 mutex_unlock(&hnae3_common_lock);
191 }
192 EXPORT_SYMBOL(hnae3_unregister_client);
193
194 /* hnae3_register_ae_algo - register a AE algorithm to hnae3 framework
195 * @ae_algo: AE algorithm
196 * NOTE: the duplicated name will not be checked
197 */
hnae3_register_ae_algo(struct hnae3_ae_algo * ae_algo)198 void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
199 {
200 const struct pci_device_id *id;
201 struct hnae3_ae_dev *ae_dev;
202 struct hnae3_client *client;
203 int ret;
204
205 if (!ae_algo)
206 return;
207
208 mutex_lock(&hnae3_common_lock);
209
210 list_add_tail(&ae_algo->node, &hnae3_ae_algo_list);
211
212 /* Check if this algo/ops matches the list of ae_devs */
213 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
214 id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
215 if (!id)
216 continue;
217
218 if (!ae_algo->ops) {
219 dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
220 continue;
221 }
222 ae_dev->ops = ae_algo->ops;
223
224 ret = ae_algo->ops->init_ae_dev(ae_dev);
225 if (ret) {
226 dev_err(&ae_dev->pdev->dev,
227 "init ae_dev error, ret = %d\n", ret);
228 continue;
229 }
230
231 /* ae_dev init should set flag */
232 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
233
234 /* check the client list for the match with this ae_dev type and
235 * initialize the figure out client instance
236 */
237 list_for_each_entry(client, &hnae3_client_list, node) {
238 ret = hnae3_init_client_instance(client, ae_dev);
239 if (ret)
240 dev_err(&ae_dev->pdev->dev,
241 "match and instantiation failed, ret = %d\n",
242 ret);
243 }
244 }
245
246 mutex_unlock(&hnae3_common_lock);
247 }
248 EXPORT_SYMBOL(hnae3_register_ae_algo);
249
250 /* hnae3_unregister_ae_algo - unregisters a AE algorithm
251 * @ae_algo: the AE algorithm to unregister
252 */
hnae3_unregister_ae_algo(struct hnae3_ae_algo * ae_algo)253 void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
254 {
255 const struct pci_device_id *id;
256 struct hnae3_ae_dev *ae_dev;
257 struct hnae3_client *client;
258
259 if (!ae_algo)
260 return;
261
262 mutex_lock(&hnae3_common_lock);
263 /* Check if there are matched ae_dev */
264 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
265 if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
266 continue;
267
268 id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
269 if (!id)
270 continue;
271
272 /* check the client list for the match with this ae_dev type and
273 * un-initialize the figure out client instance
274 */
275 list_for_each_entry(client, &hnae3_client_list, node)
276 hnae3_uninit_client_instance(client, ae_dev);
277
278 ae_algo->ops->uninit_ae_dev(ae_dev);
279 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
280 ae_dev->ops = NULL;
281 }
282
283 list_del(&ae_algo->node);
284 mutex_unlock(&hnae3_common_lock);
285 }
286 EXPORT_SYMBOL(hnae3_unregister_ae_algo);
287
288 /* hnae3_register_ae_dev - registers a AE device to hnae3 framework
289 * @ae_dev: the AE device
290 * NOTE: the duplicated name will not be checked
291 */
hnae3_register_ae_dev(struct hnae3_ae_dev * ae_dev)292 int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
293 {
294 const struct pci_device_id *id;
295 struct hnae3_ae_algo *ae_algo;
296 struct hnae3_client *client;
297 int ret;
298
299 if (!ae_dev)
300 return -ENODEV;
301
302 mutex_lock(&hnae3_common_lock);
303
304 list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
305
306 /* Check if there are matched ae_algo */
307 list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
308 id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
309 if (!id)
310 continue;
311
312 if (!ae_algo->ops) {
313 dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
314 ret = -EOPNOTSUPP;
315 goto out_err;
316 }
317 ae_dev->ops = ae_algo->ops;
318
319 ret = ae_dev->ops->init_ae_dev(ae_dev);
320 if (ret) {
321 dev_err(&ae_dev->pdev->dev,
322 "init ae_dev error, ret = %d\n", ret);
323 goto out_err;
324 }
325
326 /* ae_dev init should set flag */
327 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
328 break;
329 }
330
331 /* check the client list for the match with this ae_dev type and
332 * initialize the figure out client instance
333 */
334 list_for_each_entry(client, &hnae3_client_list, node) {
335 ret = hnae3_init_client_instance(client, ae_dev);
336 if (ret)
337 dev_err(&ae_dev->pdev->dev,
338 "match and instantiation failed, ret = %d\n",
339 ret);
340 }
341
342 mutex_unlock(&hnae3_common_lock);
343
344 return 0;
345
346 out_err:
347 list_del(&ae_dev->node);
348 mutex_unlock(&hnae3_common_lock);
349
350 return ret;
351 }
352 EXPORT_SYMBOL(hnae3_register_ae_dev);
353
354 /* hnae3_unregister_ae_dev - unregisters a AE device
355 * @ae_dev: the AE device to unregister
356 */
hnae3_unregister_ae_dev(struct hnae3_ae_dev * ae_dev)357 void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
358 {
359 const struct pci_device_id *id;
360 struct hnae3_ae_algo *ae_algo;
361 struct hnae3_client *client;
362
363 if (!ae_dev)
364 return;
365
366 mutex_lock(&hnae3_common_lock);
367 /* Check if there are matched ae_algo */
368 list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
369 if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
370 continue;
371
372 id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
373 if (!id)
374 continue;
375
376 list_for_each_entry(client, &hnae3_client_list, node)
377 hnae3_uninit_client_instance(client, ae_dev);
378
379 ae_algo->ops->uninit_ae_dev(ae_dev);
380 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
381 ae_dev->ops = NULL;
382 }
383
384 list_del(&ae_dev->node);
385 mutex_unlock(&hnae3_common_lock);
386 }
387 EXPORT_SYMBOL(hnae3_unregister_ae_dev);
388
389 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
390 MODULE_LICENSE("GPL");
391 MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework");
392 MODULE_VERSION(HNAE3_MOD_VERSION);
393