1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
3
4 #include <linux/pci.h>
5 #include <linux/vmalloc.h>
6
7 #include "core.h"
8
9 static BLOCKING_NOTIFIER_HEAD(pds_notify_chain);
10
pdsc_register_notify(struct notifier_block * nb)11 int pdsc_register_notify(struct notifier_block *nb)
12 {
13 return blocking_notifier_chain_register(&pds_notify_chain, nb);
14 }
15 EXPORT_SYMBOL_GPL(pdsc_register_notify);
16
pdsc_unregister_notify(struct notifier_block * nb)17 void pdsc_unregister_notify(struct notifier_block *nb)
18 {
19 blocking_notifier_chain_unregister(&pds_notify_chain, nb);
20 }
21 EXPORT_SYMBOL_GPL(pdsc_unregister_notify);
22
pdsc_notify(unsigned long event,void * data)23 void pdsc_notify(unsigned long event, void *data)
24 {
25 blocking_notifier_call_chain(&pds_notify_chain, event, data);
26 }
27
pdsc_intr_free(struct pdsc * pdsc,int index)28 void pdsc_intr_free(struct pdsc *pdsc, int index)
29 {
30 struct pdsc_intr_info *intr_info;
31
32 if (index >= pdsc->nintrs || index < 0) {
33 WARN(true, "bad intr index %d\n", index);
34 return;
35 }
36
37 intr_info = &pdsc->intr_info[index];
38 if (!intr_info->vector)
39 return;
40 dev_dbg(pdsc->dev, "%s: idx %d vec %d name %s\n",
41 __func__, index, intr_info->vector, intr_info->name);
42
43 pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
44 pds_core_intr_clean(&pdsc->intr_ctrl[index]);
45
46 free_irq(intr_info->vector, intr_info->data);
47
48 memset(intr_info, 0, sizeof(*intr_info));
49 }
50
pdsc_intr_alloc(struct pdsc * pdsc,char * name,irq_handler_t handler,void * data)51 int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
52 irq_handler_t handler, void *data)
53 {
54 struct pdsc_intr_info *intr_info;
55 unsigned int index;
56 int err;
57
58 /* Find the first available interrupt */
59 for (index = 0; index < pdsc->nintrs; index++)
60 if (!pdsc->intr_info[index].vector)
61 break;
62 if (index >= pdsc->nintrs) {
63 dev_warn(pdsc->dev, "%s: no intr, index=%d nintrs=%d\n",
64 __func__, index, pdsc->nintrs);
65 return -ENOSPC;
66 }
67
68 pds_core_intr_clean_flags(&pdsc->intr_ctrl[index],
69 PDS_CORE_INTR_CRED_RESET_COALESCE);
70
71 intr_info = &pdsc->intr_info[index];
72
73 intr_info->index = index;
74 intr_info->data = data;
75 strscpy(intr_info->name, name, sizeof(intr_info->name));
76
77 /* Get the OS vector number for the interrupt */
78 err = pci_irq_vector(pdsc->pdev, index);
79 if (err < 0) {
80 dev_err(pdsc->dev, "failed to get intr vector index %d: %pe\n",
81 index, ERR_PTR(err));
82 goto err_out_free_intr;
83 }
84 intr_info->vector = err;
85
86 /* Init the device's intr mask */
87 pds_core_intr_clean(&pdsc->intr_ctrl[index]);
88 pds_core_intr_mask_assert(&pdsc->intr_ctrl[index], 1);
89 pds_core_intr_mask(&pdsc->intr_ctrl[index], PDS_CORE_INTR_MASK_SET);
90
91 /* Register the isr with a name */
92 err = request_irq(intr_info->vector, handler, 0, intr_info->name, data);
93 if (err) {
94 dev_err(pdsc->dev, "failed to get intr irq vector %d: %pe\n",
95 intr_info->vector, ERR_PTR(err));
96 goto err_out_free_intr;
97 }
98
99 return index;
100
101 err_out_free_intr:
102 pdsc_intr_free(pdsc, index);
103 return err;
104 }
105
pdsc_qcq_intr_free(struct pdsc * pdsc,struct pdsc_qcq * qcq)106 static void pdsc_qcq_intr_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
107 {
108 if (!(qcq->flags & PDS_CORE_QCQ_F_INTR) ||
109 qcq->intx == PDS_CORE_INTR_INDEX_NOT_ASSIGNED)
110 return;
111
112 pdsc_intr_free(pdsc, qcq->intx);
113 qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
114 }
115
pdsc_qcq_intr_alloc(struct pdsc * pdsc,struct pdsc_qcq * qcq)116 static int pdsc_qcq_intr_alloc(struct pdsc *pdsc, struct pdsc_qcq *qcq)
117 {
118 char name[PDSC_INTR_NAME_MAX_SZ];
119 int index;
120
121 if (!(qcq->flags & PDS_CORE_QCQ_F_INTR)) {
122 qcq->intx = PDS_CORE_INTR_INDEX_NOT_ASSIGNED;
123 return 0;
124 }
125
126 snprintf(name, sizeof(name), "%s-%d-%s",
127 PDS_CORE_DRV_NAME, pdsc->pdev->bus->number, qcq->q.name);
128 index = pdsc_intr_alloc(pdsc, name, pdsc_adminq_isr, pdsc);
129 if (index < 0)
130 return index;
131 qcq->intx = index;
132
133 return 0;
134 }
135
pdsc_qcq_free(struct pdsc * pdsc,struct pdsc_qcq * qcq)136 void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
137 {
138 struct device *dev = pdsc->dev;
139
140 if (!(qcq && qcq->pdsc))
141 return;
142
143 pdsc_debugfs_del_qcq(qcq);
144
145 pdsc_qcq_intr_free(pdsc, qcq);
146
147 if (qcq->q_base)
148 dma_free_coherent(dev, qcq->q_size,
149 qcq->q_base, qcq->q_base_pa);
150
151 if (qcq->cq_base)
152 dma_free_coherent(dev, qcq->cq_size,
153 qcq->cq_base, qcq->cq_base_pa);
154
155 if (qcq->cq.info)
156 vfree(qcq->cq.info);
157
158 if (qcq->q.info)
159 vfree(qcq->q.info);
160
161 memset(qcq, 0, sizeof(*qcq));
162 }
163
pdsc_q_map(struct pdsc_queue * q,void * base,dma_addr_t base_pa)164 static void pdsc_q_map(struct pdsc_queue *q, void *base, dma_addr_t base_pa)
165 {
166 struct pdsc_q_info *cur;
167 unsigned int i;
168
169 q->base = base;
170 q->base_pa = base_pa;
171
172 for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
173 cur->desc = base + (i * q->desc_size);
174 }
175
pdsc_cq_map(struct pdsc_cq * cq,void * base,dma_addr_t base_pa)176 static void pdsc_cq_map(struct pdsc_cq *cq, void *base, dma_addr_t base_pa)
177 {
178 struct pdsc_cq_info *cur;
179 unsigned int i;
180
181 cq->base = base;
182 cq->base_pa = base_pa;
183
184 for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++)
185 cur->comp = base + (i * cq->desc_size);
186 }
187
pdsc_qcq_alloc(struct pdsc * pdsc,unsigned int type,unsigned int index,const char * name,unsigned int flags,unsigned int num_descs,unsigned int desc_size,unsigned int cq_desc_size,unsigned int pid,struct pdsc_qcq * qcq)188 int pdsc_qcq_alloc(struct pdsc *pdsc, unsigned int type, unsigned int index,
189 const char *name, unsigned int flags, unsigned int num_descs,
190 unsigned int desc_size, unsigned int cq_desc_size,
191 unsigned int pid, struct pdsc_qcq *qcq)
192 {
193 struct device *dev = pdsc->dev;
194 void *q_base, *cq_base;
195 dma_addr_t cq_base_pa;
196 dma_addr_t q_base_pa;
197 int err;
198
199 qcq->q.info = vcalloc(num_descs, sizeof(*qcq->q.info));
200 if (!qcq->q.info) {
201 err = -ENOMEM;
202 goto err_out;
203 }
204
205 qcq->pdsc = pdsc;
206 qcq->flags = flags;
207 INIT_WORK(&qcq->work, pdsc_work_thread);
208
209 qcq->q.type = type;
210 qcq->q.index = index;
211 qcq->q.num_descs = num_descs;
212 qcq->q.desc_size = desc_size;
213 qcq->q.tail_idx = 0;
214 qcq->q.head_idx = 0;
215 qcq->q.pid = pid;
216 snprintf(qcq->q.name, sizeof(qcq->q.name), "%s%u", name, index);
217
218 err = pdsc_qcq_intr_alloc(pdsc, qcq);
219 if (err)
220 goto err_out_free_q_info;
221
222 qcq->cq.info = vcalloc(num_descs, sizeof(*qcq->cq.info));
223 if (!qcq->cq.info) {
224 err = -ENOMEM;
225 goto err_out_free_irq;
226 }
227
228 qcq->cq.bound_intr = &pdsc->intr_info[qcq->intx];
229 qcq->cq.num_descs = num_descs;
230 qcq->cq.desc_size = cq_desc_size;
231 qcq->cq.tail_idx = 0;
232 qcq->cq.done_color = 1;
233
234 if (flags & PDS_CORE_QCQ_F_NOTIFYQ) {
235 /* q & cq need to be contiguous in case of notifyq */
236 qcq->q_size = PDS_PAGE_SIZE +
237 ALIGN(num_descs * desc_size, PDS_PAGE_SIZE) +
238 ALIGN(num_descs * cq_desc_size, PDS_PAGE_SIZE);
239 qcq->q_base = dma_alloc_coherent(dev,
240 qcq->q_size + qcq->cq_size,
241 &qcq->q_base_pa,
242 GFP_KERNEL);
243 if (!qcq->q_base) {
244 err = -ENOMEM;
245 goto err_out_free_cq_info;
246 }
247 q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
248 q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
249 pdsc_q_map(&qcq->q, q_base, q_base_pa);
250
251 cq_base = PTR_ALIGN(q_base +
252 ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
253 PDS_PAGE_SIZE);
254 cq_base_pa = ALIGN(qcq->q_base_pa +
255 ALIGN(num_descs * desc_size, PDS_PAGE_SIZE),
256 PDS_PAGE_SIZE);
257
258 } else {
259 /* q DMA descriptors */
260 qcq->q_size = PDS_PAGE_SIZE + (num_descs * desc_size);
261 qcq->q_base = dma_alloc_coherent(dev, qcq->q_size,
262 &qcq->q_base_pa,
263 GFP_KERNEL);
264 if (!qcq->q_base) {
265 err = -ENOMEM;
266 goto err_out_free_cq_info;
267 }
268 q_base = PTR_ALIGN(qcq->q_base, PDS_PAGE_SIZE);
269 q_base_pa = ALIGN(qcq->q_base_pa, PDS_PAGE_SIZE);
270 pdsc_q_map(&qcq->q, q_base, q_base_pa);
271
272 /* cq DMA descriptors */
273 qcq->cq_size = PDS_PAGE_SIZE + (num_descs * cq_desc_size);
274 qcq->cq_base = dma_alloc_coherent(dev, qcq->cq_size,
275 &qcq->cq_base_pa,
276 GFP_KERNEL);
277 if (!qcq->cq_base) {
278 err = -ENOMEM;
279 goto err_out_free_q;
280 }
281 cq_base = PTR_ALIGN(qcq->cq_base, PDS_PAGE_SIZE);
282 cq_base_pa = ALIGN(qcq->cq_base_pa, PDS_PAGE_SIZE);
283 }
284
285 pdsc_cq_map(&qcq->cq, cq_base, cq_base_pa);
286 qcq->cq.bound_q = &qcq->q;
287
288 pdsc_debugfs_add_qcq(pdsc, qcq);
289
290 return 0;
291
292 err_out_free_q:
293 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
294 err_out_free_cq_info:
295 vfree(qcq->cq.info);
296 err_out_free_irq:
297 pdsc_qcq_intr_free(pdsc, qcq);
298 err_out_free_q_info:
299 vfree(qcq->q.info);
300 memset(qcq, 0, sizeof(*qcq));
301 err_out:
302 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
303 return err;
304 }
305
pdsc_core_init(struct pdsc * pdsc)306 static int pdsc_core_init(struct pdsc *pdsc)
307 {
308 union pds_core_dev_comp comp = {};
309 union pds_core_dev_cmd cmd = {
310 .init.opcode = PDS_CORE_CMD_INIT,
311 };
312 struct pds_core_dev_init_data_out cido;
313 struct pds_core_dev_init_data_in cidi;
314 u32 dbid_count;
315 u32 dbpage_num;
316 size_t sz;
317 int err;
318
319 cidi.adminq_q_base = cpu_to_le64(pdsc->adminqcq.q_base_pa);
320 cidi.adminq_cq_base = cpu_to_le64(pdsc->adminqcq.cq_base_pa);
321 cidi.notifyq_cq_base = cpu_to_le64(pdsc->notifyqcq.cq.base_pa);
322 cidi.flags = cpu_to_le32(PDS_CORE_QINIT_F_IRQ | PDS_CORE_QINIT_F_ENA);
323 cidi.intr_index = cpu_to_le16(pdsc->adminqcq.intx);
324 cidi.adminq_ring_size = ilog2(pdsc->adminqcq.q.num_descs);
325 cidi.notifyq_ring_size = ilog2(pdsc->notifyqcq.q.num_descs);
326
327 mutex_lock(&pdsc->devcmd_lock);
328
329 sz = min_t(size_t, sizeof(cidi), sizeof(pdsc->cmd_regs->data));
330 memcpy_toio(&pdsc->cmd_regs->data, &cidi, sz);
331
332 err = pdsc_devcmd_locked(pdsc, &cmd, &comp, pdsc->devcmd_timeout);
333 if (!err) {
334 sz = min_t(size_t, sizeof(cido), sizeof(pdsc->cmd_regs->data));
335 memcpy_fromio(&cido, &pdsc->cmd_regs->data, sz);
336 }
337
338 mutex_unlock(&pdsc->devcmd_lock);
339 if (err) {
340 dev_err(pdsc->dev, "Device init command failed: %pe\n",
341 ERR_PTR(err));
342 return err;
343 }
344
345 pdsc->hw_index = le32_to_cpu(cido.core_hw_index);
346
347 dbid_count = le32_to_cpu(pdsc->dev_ident.ndbpgs_per_lif);
348 dbpage_num = pdsc->hw_index * dbid_count;
349 pdsc->kern_dbpage = pdsc_map_dbpage(pdsc, dbpage_num);
350 if (!pdsc->kern_dbpage) {
351 dev_err(pdsc->dev, "Cannot map dbpage, aborting\n");
352 return -ENOMEM;
353 }
354
355 pdsc->adminqcq.q.hw_type = cido.adminq_hw_type;
356 pdsc->adminqcq.q.hw_index = le32_to_cpu(cido.adminq_hw_index);
357 pdsc->adminqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->adminqcq.q.hw_index);
358
359 pdsc->notifyqcq.q.hw_type = cido.notifyq_hw_type;
360 pdsc->notifyqcq.q.hw_index = le32_to_cpu(cido.notifyq_hw_index);
361 pdsc->notifyqcq.q.dbval = PDS_CORE_DBELL_QID(pdsc->notifyqcq.q.hw_index);
362
363 pdsc->last_eid = 0;
364
365 return err;
366 }
367
368 static struct pdsc_viftype pdsc_viftype_defaults[] = {
369 [PDS_DEV_TYPE_VDPA] = { .name = PDS_DEV_TYPE_VDPA_STR,
370 .vif_id = PDS_DEV_TYPE_VDPA,
371 .dl_id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET },
372 [PDS_DEV_TYPE_MAX] = {}
373 };
374
pdsc_viftypes_init(struct pdsc * pdsc)375 static int pdsc_viftypes_init(struct pdsc *pdsc)
376 {
377 enum pds_core_vif_types vt;
378
379 pdsc->viftype_status = kzalloc(sizeof(pdsc_viftype_defaults),
380 GFP_KERNEL);
381 if (!pdsc->viftype_status)
382 return -ENOMEM;
383
384 for (vt = 0; vt < PDS_DEV_TYPE_MAX; vt++) {
385 bool vt_support;
386
387 if (!pdsc_viftype_defaults[vt].name)
388 continue;
389
390 /* Grab the defaults */
391 pdsc->viftype_status[vt] = pdsc_viftype_defaults[vt];
392
393 /* See what the Core device has for support */
394 vt_support = !!le16_to_cpu(pdsc->dev_ident.vif_types[vt]);
395 dev_dbg(pdsc->dev, "VIF %s is %ssupported\n",
396 pdsc->viftype_status[vt].name,
397 vt_support ? "" : "not ");
398
399 pdsc->viftype_status[vt].supported = vt_support;
400 }
401
402 return 0;
403 }
404
pdsc_setup(struct pdsc * pdsc,bool init)405 int pdsc_setup(struct pdsc *pdsc, bool init)
406 {
407 int numdescs;
408 int err;
409
410 err = pdsc_dev_init(pdsc);
411 if (err)
412 return err;
413
414 /* Scale the descriptor ring length based on number of CPUs and VFs */
415 numdescs = max_t(int, PDSC_ADMINQ_MIN_LENGTH, num_online_cpus());
416 numdescs += 2 * pci_sriov_get_totalvfs(pdsc->pdev);
417 numdescs = roundup_pow_of_two(numdescs);
418 err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_ADMINQ, 0, "adminq",
419 PDS_CORE_QCQ_F_CORE | PDS_CORE_QCQ_F_INTR,
420 numdescs,
421 sizeof(union pds_core_adminq_cmd),
422 sizeof(union pds_core_adminq_comp),
423 0, &pdsc->adminqcq);
424 if (err)
425 goto err_out_teardown;
426
427 err = pdsc_qcq_alloc(pdsc, PDS_CORE_QTYPE_NOTIFYQ, 0, "notifyq",
428 PDS_CORE_QCQ_F_NOTIFYQ,
429 PDSC_NOTIFYQ_LENGTH,
430 sizeof(struct pds_core_notifyq_cmd),
431 sizeof(union pds_core_notifyq_comp),
432 0, &pdsc->notifyqcq);
433 if (err)
434 goto err_out_teardown;
435
436 /* NotifyQ rides on the AdminQ interrupt */
437 pdsc->notifyqcq.intx = pdsc->adminqcq.intx;
438
439 /* Set up the Core with the AdminQ and NotifyQ info */
440 err = pdsc_core_init(pdsc);
441 if (err)
442 goto err_out_teardown;
443
444 /* Set up the VIFs */
445 err = pdsc_viftypes_init(pdsc);
446 if (err)
447 goto err_out_teardown;
448
449 if (init)
450 pdsc_debugfs_add_viftype(pdsc);
451
452 refcount_set(&pdsc->adminq_refcnt, 1);
453 clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
454 return 0;
455
456 err_out_teardown:
457 pdsc_teardown(pdsc, init);
458 return err;
459 }
460
pdsc_teardown(struct pdsc * pdsc,bool removing)461 void pdsc_teardown(struct pdsc *pdsc, bool removing)
462 {
463 int i;
464
465 if (!pdsc->pdev->is_virtfn)
466 pdsc_devcmd_reset(pdsc);
467 if (pdsc->adminqcq.work.func)
468 cancel_work_sync(&pdsc->adminqcq.work);
469 pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
470 pdsc_qcq_free(pdsc, &pdsc->adminqcq);
471
472 kfree(pdsc->viftype_status);
473 pdsc->viftype_status = NULL;
474
475 if (pdsc->intr_info) {
476 for (i = 0; i < pdsc->nintrs; i++)
477 pdsc_intr_free(pdsc, i);
478
479 kfree(pdsc->intr_info);
480 pdsc->intr_info = NULL;
481 pdsc->nintrs = 0;
482 }
483
484 if (pdsc->kern_dbpage) {
485 iounmap(pdsc->kern_dbpage);
486 pdsc->kern_dbpage = NULL;
487 }
488
489 pci_free_irq_vectors(pdsc->pdev);
490 set_bit(PDSC_S_FW_DEAD, &pdsc->state);
491 }
492
pdsc_start(struct pdsc * pdsc)493 int pdsc_start(struct pdsc *pdsc)
494 {
495 pds_core_intr_mask(&pdsc->intr_ctrl[pdsc->adminqcq.intx],
496 PDS_CORE_INTR_MASK_CLEAR);
497
498 return 0;
499 }
500
pdsc_stop(struct pdsc * pdsc)501 void pdsc_stop(struct pdsc *pdsc)
502 {
503 int i;
504
505 if (!pdsc->intr_info)
506 return;
507
508 /* Mask interrupts that are in use */
509 for (i = 0; i < pdsc->nintrs; i++)
510 if (pdsc->intr_info[i].vector)
511 pds_core_intr_mask(&pdsc->intr_ctrl[i],
512 PDS_CORE_INTR_MASK_SET);
513 }
514
pdsc_adminq_wait_and_dec_once_unused(struct pdsc * pdsc)515 static void pdsc_adminq_wait_and_dec_once_unused(struct pdsc *pdsc)
516 {
517 /* The driver initializes the adminq_refcnt to 1 when the adminq is
518 * allocated and ready for use. Other users/requesters will increment
519 * the refcnt while in use. If the refcnt is down to 1 then the adminq
520 * is not in use and the refcnt can be cleared and adminq freed. Before
521 * calling this function the driver will set PDSC_S_FW_DEAD, which
522 * prevent subsequent attempts to use the adminq and increment the
523 * refcnt to fail. This guarantees that this function will eventually
524 * exit.
525 */
526 while (!refcount_dec_if_one(&pdsc->adminq_refcnt)) {
527 dev_dbg_ratelimited(pdsc->dev, "%s: adminq in use\n",
528 __func__);
529 cpu_relax();
530 }
531 }
532
pdsc_fw_down(struct pdsc * pdsc)533 void pdsc_fw_down(struct pdsc *pdsc)
534 {
535 union pds_core_notifyq_comp reset_event = {
536 .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
537 .reset.state = 0,
538 };
539
540 if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
541 dev_warn(pdsc->dev, "%s: already happening\n", __func__);
542 return;
543 }
544
545 if (pdsc->pdev->is_virtfn)
546 return;
547
548 pdsc_adminq_wait_and_dec_once_unused(pdsc);
549
550 /* Notify clients of fw_down */
551 if (pdsc->fw_reporter)
552 devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
553 pdsc_notify(PDS_EVENT_RESET, &reset_event);
554
555 pdsc_stop(pdsc);
556 pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
557 }
558
pdsc_fw_up(struct pdsc * pdsc)559 void pdsc_fw_up(struct pdsc *pdsc)
560 {
561 union pds_core_notifyq_comp reset_event = {
562 .reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
563 .reset.state = 1,
564 };
565 int err;
566
567 if (!test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
568 dev_err(pdsc->dev, "%s: fw not dead\n", __func__);
569 return;
570 }
571
572 if (pdsc->pdev->is_virtfn) {
573 clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
574 return;
575 }
576
577 err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
578 if (err)
579 goto err_out;
580
581 err = pdsc_start(pdsc);
582 if (err)
583 goto err_out;
584
585 /* Notify clients of fw_up */
586 pdsc->fw_recoveries++;
587 if (pdsc->fw_reporter)
588 devlink_health_reporter_state_update(pdsc->fw_reporter,
589 DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
590 pdsc_notify(PDS_EVENT_RESET, &reset_event);
591
592 return;
593
594 err_out:
595 pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
596 }
597
pdsc_health_thread(struct work_struct * work)598 void pdsc_health_thread(struct work_struct *work)
599 {
600 struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
601 unsigned long mask;
602 bool healthy;
603
604 mutex_lock(&pdsc->config_lock);
605
606 /* Don't do a check when in a transition state */
607 mask = BIT_ULL(PDSC_S_INITING_DRIVER) |
608 BIT_ULL(PDSC_S_STOPPING_DRIVER);
609 if (pdsc->state & mask)
610 goto out_unlock;
611
612 healthy = pdsc_is_fw_good(pdsc);
613 dev_dbg(pdsc->dev, "%s: health %d fw_status %#02x fw_heartbeat %d\n",
614 __func__, healthy, pdsc->fw_status, pdsc->last_hb);
615
616 if (test_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
617 if (healthy)
618 pdsc_fw_up(pdsc);
619 } else {
620 if (!healthy)
621 pdsc_fw_down(pdsc);
622 }
623
624 pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
625
626 out_unlock:
627 mutex_unlock(&pdsc->config_lock);
628 }
629