1 // SPDX-License-Identifier: GPL-2.0
2
3 /*
4 * Copyright 2016-2022 HabanaLabs, Ltd.
5 * All Rights Reserved.
6 */
7
8 #define pr_fmt(fmt) "habanalabs: " fmt
9
10 #include <uapi/misc/habanalabs.h>
11 #include "habanalabs.h"
12
13 #include <linux/pci.h>
14 #include <linux/hwmon.h>
15
16 #include <trace/events/habanalabs.h>
17
18 #define HL_RESET_DELAY_USEC 10000 /* 10ms */
19
20 enum dma_alloc_type {
21 DMA_ALLOC_COHERENT,
22 DMA_ALLOC_CPU_ACCESSIBLE,
23 DMA_ALLOC_POOL,
24 };
25
26 #define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
27
28 /*
29 * hl_set_dram_bar- sets the bar to allow later access to address
30 *
31 * @hdev: pointer to habanalabs device structure.
32 * @addr: the address the caller wants to access.
33 * @region: the PCI region.
34 *
35 * @return: the old BAR base address on success, U64_MAX for failure.
36 * The caller should set it back to the old address after use.
37 *
38 * In case the bar space does not cover the whole address space,
39 * the bar base address should be set to allow access to a given address.
40 * This function can be called also if the bar doesn't need to be set,
41 * in that case it just won't change the base.
42 */
hl_set_dram_bar(struct hl_device * hdev,u64 addr,struct pci_mem_region * region)43 static u64 hl_set_dram_bar(struct hl_device *hdev, u64 addr, struct pci_mem_region *region)
44 {
45 struct asic_fixed_properties *prop = &hdev->asic_prop;
46 u64 bar_base_addr, old_base;
47
48 if (is_power_of_2(prop->dram_pci_bar_size))
49 bar_base_addr = addr & ~(prop->dram_pci_bar_size - 0x1ull);
50 else
51 bar_base_addr = DIV_ROUND_DOWN_ULL(addr, prop->dram_pci_bar_size) *
52 prop->dram_pci_bar_size;
53
54 old_base = hdev->asic_funcs->set_dram_bar_base(hdev, bar_base_addr);
55
56 /* in case of success we need to update the new BAR base */
57 if (old_base != U64_MAX)
58 region->region_base = bar_base_addr;
59
60 return old_base;
61 }
62
hl_access_sram_dram_region(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type,enum pci_region region_type)63 static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val,
64 enum debugfs_access_type acc_type, enum pci_region region_type)
65 {
66 struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
67 void __iomem *acc_addr;
68 u64 old_base = 0, rc;
69
70 if (region_type == PCI_REGION_DRAM) {
71 old_base = hl_set_dram_bar(hdev, addr, region);
72 if (old_base == U64_MAX)
73 return -EIO;
74 }
75
76 acc_addr = hdev->pcie_bar[region->bar_id] + addr - region->region_base +
77 region->offset_in_bar;
78 switch (acc_type) {
79 case DEBUGFS_READ8:
80 *val = readb(acc_addr);
81 break;
82 case DEBUGFS_WRITE8:
83 writeb(*val, acc_addr);
84 break;
85 case DEBUGFS_READ32:
86 *val = readl(acc_addr);
87 break;
88 case DEBUGFS_WRITE32:
89 writel(*val, acc_addr);
90 break;
91 case DEBUGFS_READ64:
92 *val = readq(acc_addr);
93 break;
94 case DEBUGFS_WRITE64:
95 writeq(*val, acc_addr);
96 break;
97 }
98
99 if (region_type == PCI_REGION_DRAM) {
100 rc = hl_set_dram_bar(hdev, old_base, region);
101 if (rc == U64_MAX)
102 return -EIO;
103 }
104
105 return 0;
106 }
107
hl_dma_alloc_common(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle,gfp_t flag,enum dma_alloc_type alloc_type,const char * caller)108 static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
109 gfp_t flag, enum dma_alloc_type alloc_type,
110 const char *caller)
111 {
112 void *ptr = NULL;
113
114 switch (alloc_type) {
115 case DMA_ALLOC_COHERENT:
116 ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
117 break;
118 case DMA_ALLOC_CPU_ACCESSIBLE:
119 ptr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
120 break;
121 case DMA_ALLOC_POOL:
122 ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
123 break;
124 }
125
126 if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
127 trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size,
128 caller);
129
130 return ptr;
131 }
132
hl_asic_dma_free_common(struct hl_device * hdev,size_t size,void * cpu_addr,dma_addr_t dma_handle,enum dma_alloc_type alloc_type,const char * caller)133 static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
134 dma_addr_t dma_handle, enum dma_alloc_type alloc_type,
135 const char *caller)
136 {
137 switch (alloc_type) {
138 case DMA_ALLOC_COHERENT:
139 hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
140 break;
141 case DMA_ALLOC_CPU_ACCESSIBLE:
142 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, cpu_addr);
143 break;
144 case DMA_ALLOC_POOL:
145 hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
146 break;
147 }
148
149 trace_habanalabs_dma_free(hdev->dev, (u64) (uintptr_t) cpu_addr, dma_handle, size, caller);
150 }
151
hl_asic_dma_alloc_coherent_caller(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle,gfp_t flag,const char * caller)152 void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
153 gfp_t flag, const char *caller)
154 {
155 return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT, caller);
156 }
157
hl_asic_dma_free_coherent_caller(struct hl_device * hdev,size_t size,void * cpu_addr,dma_addr_t dma_handle,const char * caller)158 void hl_asic_dma_free_coherent_caller(struct hl_device *hdev, size_t size, void *cpu_addr,
159 dma_addr_t dma_handle, const char *caller)
160 {
161 hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT, caller);
162 }
163
hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle,const char * caller)164 void *hl_cpu_accessible_dma_pool_alloc_caller(struct hl_device *hdev, size_t size,
165 dma_addr_t *dma_handle, const char *caller)
166 {
167 return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
168 }
169
hl_cpu_accessible_dma_pool_free_caller(struct hl_device * hdev,size_t size,void * vaddr,const char * caller)170 void hl_cpu_accessible_dma_pool_free_caller(struct hl_device *hdev, size_t size, void *vaddr,
171 const char *caller)
172 {
173 hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE, caller);
174 }
175
hl_asic_dma_pool_zalloc_caller(struct hl_device * hdev,size_t size,gfp_t mem_flags,dma_addr_t * dma_handle,const char * caller)176 void *hl_asic_dma_pool_zalloc_caller(struct hl_device *hdev, size_t size, gfp_t mem_flags,
177 dma_addr_t *dma_handle, const char *caller)
178 {
179 return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL, caller);
180 }
181
hl_asic_dma_pool_free_caller(struct hl_device * hdev,void * vaddr,dma_addr_t dma_addr,const char * caller)182 void hl_asic_dma_pool_free_caller(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr,
183 const char *caller)
184 {
185 hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL, caller);
186 }
187
hl_dma_map_sgtable(struct hl_device * hdev,struct sg_table * sgt,enum dma_data_direction dir)188 int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
189 {
190 struct asic_fixed_properties *prop = &hdev->asic_prop;
191 struct scatterlist *sg;
192 int rc, i;
193
194 rc = dma_map_sgtable(&hdev->pdev->dev, sgt, dir, 0);
195 if (rc)
196 return rc;
197
198 /* Shift to the device's base physical address of host memory if necessary */
199 if (prop->device_dma_offset_for_host_access)
200 for_each_sgtable_dma_sg(sgt, sg, i)
201 sg->dma_address += prop->device_dma_offset_for_host_access;
202
203 return 0;
204 }
205
hl_dma_unmap_sgtable(struct hl_device * hdev,struct sg_table * sgt,enum dma_data_direction dir)206 void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
207 {
208 struct asic_fixed_properties *prop = &hdev->asic_prop;
209 struct scatterlist *sg;
210 int i;
211
212 /* Cancel the device's base physical address of host memory if necessary */
213 if (prop->device_dma_offset_for_host_access)
214 for_each_sgtable_dma_sg(sgt, sg, i)
215 sg->dma_address -= prop->device_dma_offset_for_host_access;
216
217 dma_unmap_sgtable(&hdev->pdev->dev, sgt, dir, 0);
218 }
219
220 /*
221 * hl_access_cfg_region - access the config region
222 *
223 * @hdev: pointer to habanalabs device structure
224 * @addr: the address to access
225 * @val: the value to write from or read to
226 * @acc_type: the type of access (read/write 64/32)
227 */
hl_access_cfg_region(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type)228 int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
229 enum debugfs_access_type acc_type)
230 {
231 struct pci_mem_region *cfg_region = &hdev->pci_mem_region[PCI_REGION_CFG];
232 u32 val_h, val_l;
233
234 if (!IS_ALIGNED(addr, sizeof(u32))) {
235 dev_err(hdev->dev, "address %#llx not a multiple of %zu\n", addr, sizeof(u32));
236 return -EINVAL;
237 }
238
239 switch (acc_type) {
240 case DEBUGFS_READ32:
241 *val = RREG32(addr - cfg_region->region_base);
242 break;
243 case DEBUGFS_WRITE32:
244 WREG32(addr - cfg_region->region_base, *val);
245 break;
246 case DEBUGFS_READ64:
247 val_l = RREG32(addr - cfg_region->region_base);
248 val_h = RREG32(addr + sizeof(u32) - cfg_region->region_base);
249
250 *val = (((u64) val_h) << 32) | val_l;
251 break;
252 case DEBUGFS_WRITE64:
253 WREG32(addr - cfg_region->region_base, lower_32_bits(*val));
254 WREG32(addr + sizeof(u32) - cfg_region->region_base, upper_32_bits(*val));
255 break;
256 default:
257 dev_err(hdev->dev, "access type %d is not supported\n", acc_type);
258 return -EOPNOTSUPP;
259 }
260
261 return 0;
262 }
263
264 /*
265 * hl_access_dev_mem - access device memory
266 *
267 * @hdev: pointer to habanalabs device structure
268 * @region_type: the type of the region the address belongs to
269 * @addr: the address to access
270 * @val: the value to write from or read to
271 * @acc_type: the type of access (r/w, 32/64)
272 */
hl_access_dev_mem(struct hl_device * hdev,enum pci_region region_type,u64 addr,u64 * val,enum debugfs_access_type acc_type)273 int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
274 u64 addr, u64 *val, enum debugfs_access_type acc_type)
275 {
276 switch (region_type) {
277 case PCI_REGION_CFG:
278 return hl_access_cfg_region(hdev, addr, val, acc_type);
279 case PCI_REGION_SRAM:
280 case PCI_REGION_DRAM:
281 return hl_access_sram_dram_region(hdev, addr, val, acc_type,
282 region_type);
283 default:
284 return -EFAULT;
285 }
286
287 return 0;
288 }
289
hl_engine_data_sprintf(struct engines_data * e,const char * fmt,...)290 void hl_engine_data_sprintf(struct engines_data *e, const char *fmt, ...)
291 {
292 va_list args;
293 int str_size;
294
295 va_start(args, fmt);
296 /* Calculate formatted string length. Assuming each string is null terminated, hence
297 * increment result by 1
298 */
299 str_size = vsnprintf(NULL, 0, fmt, args) + 1;
300 va_end(args);
301
302 if ((e->actual_size + str_size) < e->allocated_buf_size) {
303 va_start(args, fmt);
304 vsnprintf(e->buf + e->actual_size, str_size, fmt, args);
305 va_end(args);
306 }
307
308 /* Need to update the size even when not updating destination buffer to get the exact size
309 * of all input strings
310 */
311 e->actual_size += str_size;
312 }
313
hl_device_status(struct hl_device * hdev)314 enum hl_device_status hl_device_status(struct hl_device *hdev)
315 {
316 enum hl_device_status status;
317
318 if (hdev->reset_info.in_reset) {
319 if (hdev->reset_info.in_compute_reset)
320 status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;
321 else
322 status = HL_DEVICE_STATUS_IN_RESET;
323 } else if (hdev->reset_info.needs_reset) {
324 status = HL_DEVICE_STATUS_NEEDS_RESET;
325 } else if (hdev->disabled) {
326 status = HL_DEVICE_STATUS_MALFUNCTION;
327 } else if (!hdev->init_done) {
328 status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
329 } else {
330 status = HL_DEVICE_STATUS_OPERATIONAL;
331 }
332
333 return status;
334 }
335
hl_device_operational(struct hl_device * hdev,enum hl_device_status * status)336 bool hl_device_operational(struct hl_device *hdev,
337 enum hl_device_status *status)
338 {
339 enum hl_device_status current_status;
340
341 current_status = hl_device_status(hdev);
342 if (status)
343 *status = current_status;
344
345 switch (current_status) {
346 case HL_DEVICE_STATUS_IN_RESET:
347 case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
348 case HL_DEVICE_STATUS_MALFUNCTION:
349 case HL_DEVICE_STATUS_NEEDS_RESET:
350 return false;
351 case HL_DEVICE_STATUS_OPERATIONAL:
352 case HL_DEVICE_STATUS_IN_DEVICE_CREATION:
353 default:
354 return true;
355 }
356 }
357
hpriv_release(struct kref * ref)358 static void hpriv_release(struct kref *ref)
359 {
360 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
361 bool device_is_idle = true;
362 struct hl_fpriv *hpriv;
363 struct hl_device *hdev;
364
365 hpriv = container_of(ref, struct hl_fpriv, refcount);
366
367 hdev = hpriv->hdev;
368
369 hdev->asic_funcs->send_device_activity(hdev, false);
370
371 put_pid(hpriv->taskpid);
372
373 hl_debugfs_remove_file(hpriv);
374
375 mutex_destroy(&hpriv->ctx_lock);
376 mutex_destroy(&hpriv->restore_phase_mutex);
377
378 if ((!hdev->pldm) && (hdev->pdev) &&
379 (!hdev->asic_funcs->is_device_idle(hdev,
380 idle_mask,
381 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL))) {
382 dev_err(hdev->dev,
383 "device not idle after user context is closed (0x%llx_%llx)\n",
384 idle_mask[1], idle_mask[0]);
385
386 device_is_idle = false;
387 }
388
389 /* We need to remove the user from the list to make sure the reset process won't
390 * try to kill the user process. Because, if we got here, it means there are no
391 * more driver/device resources that the user process is occupying so there is
392 * no need to kill it
393 *
394 * However, we can't set the compute_ctx to NULL at this stage. This is to prevent
395 * a race between the release and opening the device again. We don't want to let
396 * a user open the device while there a reset is about to happen.
397 */
398 mutex_lock(&hdev->fpriv_list_lock);
399 list_del(&hpriv->dev_node);
400 mutex_unlock(&hdev->fpriv_list_lock);
401
402 if (!device_is_idle || hdev->reset_upon_device_release) {
403 hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
404 } else {
405 int rc = hdev->asic_funcs->scrub_device_mem(hdev);
406
407 if (rc)
408 dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);
409 }
410
411 /* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
412 * thread, we don't care because the in_reset is marked so if a user will try to open
413 * the device it will fail on that, even if compute_ctx is false.
414 */
415 mutex_lock(&hdev->fpriv_list_lock);
416 hdev->is_compute_ctx_active = false;
417 mutex_unlock(&hdev->fpriv_list_lock);
418
419 hdev->compute_ctx_in_release = 0;
420
421 /* release the eventfd */
422 if (hpriv->notifier_event.eventfd)
423 eventfd_ctx_put(hpriv->notifier_event.eventfd);
424
425 mutex_destroy(&hpriv->notifier_event.lock);
426
427 kfree(hpriv);
428 }
429
hl_hpriv_get(struct hl_fpriv * hpriv)430 void hl_hpriv_get(struct hl_fpriv *hpriv)
431 {
432 kref_get(&hpriv->refcount);
433 }
434
hl_hpriv_put(struct hl_fpriv * hpriv)435 int hl_hpriv_put(struct hl_fpriv *hpriv)
436 {
437 return kref_put(&hpriv->refcount, hpriv_release);
438 }
439
440 /*
441 * hl_device_release - release function for habanalabs device
442 *
443 * @inode: pointer to inode structure
444 * @filp: pointer to file structure
445 *
446 * Called when process closes an habanalabs device
447 */
hl_device_release(struct inode * inode,struct file * filp)448 static int hl_device_release(struct inode *inode, struct file *filp)
449 {
450 struct hl_fpriv *hpriv = filp->private_data;
451 struct hl_device *hdev = hpriv->hdev;
452
453 filp->private_data = NULL;
454
455 if (!hdev) {
456 pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
457 put_pid(hpriv->taskpid);
458 return 0;
459 }
460
461 /* Each pending user interrupt holds the user's context, hence we
462 * must release them all before calling hl_ctx_mgr_fini().
463 */
464 hl_release_pending_user_interrupts(hpriv->hdev);
465
466 hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
467 hl_mem_mgr_fini(&hpriv->mem_mgr);
468
469 hdev->compute_ctx_in_release = 1;
470
471 if (!hl_hpriv_put(hpriv))
472 dev_notice(hdev->dev,
473 "User process closed FD but device still in use\n");
474
475 hdev->last_open_session_duration_jif =
476 jiffies - hdev->last_successful_open_jif;
477
478 return 0;
479 }
480
hl_device_release_ctrl(struct inode * inode,struct file * filp)481 static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
482 {
483 struct hl_fpriv *hpriv = filp->private_data;
484 struct hl_device *hdev = hpriv->hdev;
485
486 filp->private_data = NULL;
487
488 if (!hdev) {
489 pr_err("Closing FD after device was removed\n");
490 goto out;
491 }
492
493 mutex_lock(&hdev->fpriv_ctrl_list_lock);
494 list_del(&hpriv->dev_node);
495 mutex_unlock(&hdev->fpriv_ctrl_list_lock);
496 out:
497 /* release the eventfd */
498 if (hpriv->notifier_event.eventfd)
499 eventfd_ctx_put(hpriv->notifier_event.eventfd);
500
501 mutex_destroy(&hpriv->notifier_event.lock);
502 put_pid(hpriv->taskpid);
503
504 kfree(hpriv);
505
506 return 0;
507 }
508
509 /*
510 * hl_mmap - mmap function for habanalabs device
511 *
512 * @*filp: pointer to file structure
513 * @*vma: pointer to vm_area_struct of the process
514 *
515 * Called when process does an mmap on habanalabs device. Call the relevant mmap
516 * function at the end of the common code.
517 */
hl_mmap(struct file * filp,struct vm_area_struct * vma)518 static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
519 {
520 struct hl_fpriv *hpriv = filp->private_data;
521 struct hl_device *hdev = hpriv->hdev;
522 unsigned long vm_pgoff;
523
524 if (!hdev) {
525 pr_err_ratelimited("Trying to mmap after device was removed! Please close FD\n");
526 return -ENODEV;
527 }
528
529 vm_pgoff = vma->vm_pgoff;
530
531 switch (vm_pgoff & HL_MMAP_TYPE_MASK) {
532 case HL_MMAP_TYPE_BLOCK:
533 vma->vm_pgoff = HL_MMAP_OFFSET_VALUE_GET(vm_pgoff);
534 return hl_hw_block_mmap(hpriv, vma);
535
536 case HL_MMAP_TYPE_CB:
537 case HL_MMAP_TYPE_TS_BUFF:
538 return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
539 }
540 return -EINVAL;
541 }
542
543 static const struct file_operations hl_ops = {
544 .owner = THIS_MODULE,
545 .open = hl_device_open,
546 .release = hl_device_release,
547 .mmap = hl_mmap,
548 .unlocked_ioctl = hl_ioctl,
549 .compat_ioctl = hl_ioctl
550 };
551
552 static const struct file_operations hl_ctrl_ops = {
553 .owner = THIS_MODULE,
554 .open = hl_device_open_ctrl,
555 .release = hl_device_release_ctrl,
556 .unlocked_ioctl = hl_ioctl_control,
557 .compat_ioctl = hl_ioctl_control
558 };
559
device_release_func(struct device * dev)560 static void device_release_func(struct device *dev)
561 {
562 kfree(dev);
563 }
564
565 /*
566 * device_init_cdev - Initialize cdev and device for habanalabs device
567 *
568 * @hdev: pointer to habanalabs device structure
569 * @hclass: pointer to the class object of the device
570 * @minor: minor number of the specific device
571 * @fpos: file operations to install for this device
572 * @name: name of the device as it will appear in the filesystem
573 * @cdev: pointer to the char device object that will be initialized
574 * @dev: pointer to the device object that will be initialized
575 *
576 * Initialize a cdev and a Linux device for habanalabs's device.
577 */
device_init_cdev(struct hl_device * hdev,struct class * hclass,int minor,const struct file_operations * fops,char * name,struct cdev * cdev,struct device ** dev)578 static int device_init_cdev(struct hl_device *hdev, struct class *hclass,
579 int minor, const struct file_operations *fops,
580 char *name, struct cdev *cdev,
581 struct device **dev)
582 {
583 cdev_init(cdev, fops);
584 cdev->owner = THIS_MODULE;
585
586 *dev = kzalloc(sizeof(**dev), GFP_KERNEL);
587 if (!*dev)
588 return -ENOMEM;
589
590 device_initialize(*dev);
591 (*dev)->devt = MKDEV(hdev->major, minor);
592 (*dev)->class = hclass;
593 (*dev)->release = device_release_func;
594 dev_set_drvdata(*dev, hdev);
595 dev_set_name(*dev, "%s", name);
596
597 return 0;
598 }
599
device_cdev_sysfs_add(struct hl_device * hdev)600 static int device_cdev_sysfs_add(struct hl_device *hdev)
601 {
602 int rc;
603
604 rc = cdev_device_add(&hdev->cdev, hdev->dev);
605 if (rc) {
606 dev_err(hdev->dev,
607 "failed to add a char device to the system\n");
608 return rc;
609 }
610
611 rc = cdev_device_add(&hdev->cdev_ctrl, hdev->dev_ctrl);
612 if (rc) {
613 dev_err(hdev->dev,
614 "failed to add a control char device to the system\n");
615 goto delete_cdev_device;
616 }
617
618 /* hl_sysfs_init() must be done after adding the device to the system */
619 rc = hl_sysfs_init(hdev);
620 if (rc) {
621 dev_err(hdev->dev, "failed to initialize sysfs\n");
622 goto delete_ctrl_cdev_device;
623 }
624
625 hdev->cdev_sysfs_created = true;
626
627 return 0;
628
629 delete_ctrl_cdev_device:
630 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
631 delete_cdev_device:
632 cdev_device_del(&hdev->cdev, hdev->dev);
633 return rc;
634 }
635
device_cdev_sysfs_del(struct hl_device * hdev)636 static void device_cdev_sysfs_del(struct hl_device *hdev)
637 {
638 if (!hdev->cdev_sysfs_created)
639 goto put_devices;
640
641 hl_sysfs_fini(hdev);
642 cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
643 cdev_device_del(&hdev->cdev, hdev->dev);
644
645 put_devices:
646 put_device(hdev->dev);
647 put_device(hdev->dev_ctrl);
648 }
649
device_hard_reset_pending(struct work_struct * work)650 static void device_hard_reset_pending(struct work_struct *work)
651 {
652 struct hl_device_reset_work *device_reset_work =
653 container_of(work, struct hl_device_reset_work, reset_work.work);
654 struct hl_device *hdev = device_reset_work->hdev;
655 u32 flags;
656 int rc;
657
658 flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
659
660 rc = hl_device_reset(hdev, flags);
661 if ((rc == -EBUSY) && !hdev->device_fini_pending) {
662 dev_info(hdev->dev,
663 "Could not reset device. will try again in %u seconds",
664 HL_PENDING_RESET_PER_SEC);
665
666 queue_delayed_work(device_reset_work->wq,
667 &device_reset_work->reset_work,
668 msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
669 }
670 }
671
672 /*
673 * device_early_init - do some early initialization for the habanalabs device
674 *
675 * @hdev: pointer to habanalabs device structure
676 *
677 * Install the relevant function pointers and call the early_init function,
678 * if such a function exists
679 */
device_early_init(struct hl_device * hdev)680 static int device_early_init(struct hl_device *hdev)
681 {
682 int i, rc;
683 char workq_name[32];
684
685 switch (hdev->asic_type) {
686 case ASIC_GOYA:
687 goya_set_asic_funcs(hdev);
688 strscpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name));
689 break;
690 case ASIC_GAUDI:
691 gaudi_set_asic_funcs(hdev);
692 strscpy(hdev->asic_name, "GAUDI", sizeof(hdev->asic_name));
693 break;
694 case ASIC_GAUDI_SEC:
695 gaudi_set_asic_funcs(hdev);
696 strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
697 break;
698 case ASIC_GAUDI2:
699 gaudi2_set_asic_funcs(hdev);
700 strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));
701 break;
702 case ASIC_GAUDI2_SEC:
703 gaudi2_set_asic_funcs(hdev);
704 strscpy(hdev->asic_name, "GAUDI2 SEC", sizeof(hdev->asic_name));
705 break;
706 default:
707 dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
708 hdev->asic_type);
709 return -EINVAL;
710 }
711
712 rc = hdev->asic_funcs->early_init(hdev);
713 if (rc)
714 return rc;
715
716 rc = hl_asid_init(hdev);
717 if (rc)
718 goto early_fini;
719
720 if (hdev->asic_prop.completion_queues_count) {
721 hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
722 sizeof(struct workqueue_struct *),
723 GFP_KERNEL);
724 if (!hdev->cq_wq) {
725 rc = -ENOMEM;
726 goto asid_fini;
727 }
728 }
729
730 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
731 snprintf(workq_name, 32, "hl-free-jobs-%u", (u32) i);
732 hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
733 if (hdev->cq_wq[i] == NULL) {
734 dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
735 rc = -ENOMEM;
736 goto free_cq_wq;
737 }
738 }
739
740 hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
741 if (hdev->eq_wq == NULL) {
742 dev_err(hdev->dev, "Failed to allocate EQ workqueue\n");
743 rc = -ENOMEM;
744 goto free_cq_wq;
745 }
746
747 hdev->cs_cmplt_wq = alloc_workqueue("hl-cs-completions", WQ_UNBOUND, 0);
748 if (!hdev->cs_cmplt_wq) {
749 dev_err(hdev->dev,
750 "Failed to allocate CS completions workqueue\n");
751 rc = -ENOMEM;
752 goto free_eq_wq;
753 }
754
755 hdev->ts_free_obj_wq = alloc_workqueue("hl-ts-free-obj", WQ_UNBOUND, 0);
756 if (!hdev->ts_free_obj_wq) {
757 dev_err(hdev->dev,
758 "Failed to allocate Timestamp registration free workqueue\n");
759 rc = -ENOMEM;
760 goto free_cs_cmplt_wq;
761 }
762
763 hdev->pf_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
764 if (!hdev->pf_wq) {
765 dev_err(hdev->dev, "Failed to allocate MMU prefetch workqueue\n");
766 rc = -ENOMEM;
767 goto free_ts_free_wq;
768 }
769
770 hdev->hl_chip_info = kzalloc(sizeof(struct hwmon_chip_info),
771 GFP_KERNEL);
772 if (!hdev->hl_chip_info) {
773 rc = -ENOMEM;
774 goto free_pf_wq;
775 }
776
777 rc = hl_mmu_if_set_funcs(hdev);
778 if (rc)
779 goto free_chip_info;
780
781 hl_mem_mgr_init(hdev->dev, &hdev->kernel_mem_mgr);
782
783 hdev->device_reset_work.wq =
784 create_singlethread_workqueue("hl_device_reset");
785 if (!hdev->device_reset_work.wq) {
786 rc = -ENOMEM;
787 dev_err(hdev->dev, "Failed to create device reset WQ\n");
788 goto free_cb_mgr;
789 }
790
791 INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work,
792 device_hard_reset_pending);
793 hdev->device_reset_work.hdev = hdev;
794 hdev->device_fini_pending = 0;
795
796 mutex_init(&hdev->send_cpu_message_lock);
797 mutex_init(&hdev->debug_lock);
798 INIT_LIST_HEAD(&hdev->cs_mirror_list);
799 spin_lock_init(&hdev->cs_mirror_lock);
800 spin_lock_init(&hdev->reset_info.lock);
801 INIT_LIST_HEAD(&hdev->fpriv_list);
802 INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
803 mutex_init(&hdev->fpriv_list_lock);
804 mutex_init(&hdev->fpriv_ctrl_list_lock);
805 mutex_init(&hdev->clk_throttling.lock);
806
807 return 0;
808
809 free_cb_mgr:
810 hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
811 free_chip_info:
812 kfree(hdev->hl_chip_info);
813 free_pf_wq:
814 destroy_workqueue(hdev->pf_wq);
815 free_ts_free_wq:
816 destroy_workqueue(hdev->ts_free_obj_wq);
817 free_cs_cmplt_wq:
818 destroy_workqueue(hdev->cs_cmplt_wq);
819 free_eq_wq:
820 destroy_workqueue(hdev->eq_wq);
821 free_cq_wq:
822 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
823 if (hdev->cq_wq[i])
824 destroy_workqueue(hdev->cq_wq[i]);
825 kfree(hdev->cq_wq);
826 asid_fini:
827 hl_asid_fini(hdev);
828 early_fini:
829 if (hdev->asic_funcs->early_fini)
830 hdev->asic_funcs->early_fini(hdev);
831
832 return rc;
833 }
834
835 /*
836 * device_early_fini - finalize all that was done in device_early_init
837 *
838 * @hdev: pointer to habanalabs device structure
839 *
840 */
device_early_fini(struct hl_device * hdev)841 static void device_early_fini(struct hl_device *hdev)
842 {
843 int i;
844
845 mutex_destroy(&hdev->debug_lock);
846 mutex_destroy(&hdev->send_cpu_message_lock);
847
848 mutex_destroy(&hdev->fpriv_list_lock);
849 mutex_destroy(&hdev->fpriv_ctrl_list_lock);
850
851 mutex_destroy(&hdev->clk_throttling.lock);
852
853 hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
854
855 kfree(hdev->hl_chip_info);
856
857 destroy_workqueue(hdev->pf_wq);
858 destroy_workqueue(hdev->ts_free_obj_wq);
859 destroy_workqueue(hdev->cs_cmplt_wq);
860 destroy_workqueue(hdev->eq_wq);
861 destroy_workqueue(hdev->device_reset_work.wq);
862
863 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
864 destroy_workqueue(hdev->cq_wq[i]);
865 kfree(hdev->cq_wq);
866
867 hl_asid_fini(hdev);
868
869 if (hdev->asic_funcs->early_fini)
870 hdev->asic_funcs->early_fini(hdev);
871 }
872
hl_device_heartbeat(struct work_struct * work)873 static void hl_device_heartbeat(struct work_struct *work)
874 {
875 struct hl_device *hdev = container_of(work, struct hl_device,
876 work_heartbeat.work);
877
878 if (!hl_device_operational(hdev, NULL))
879 goto reschedule;
880
881 if (!hdev->asic_funcs->send_heartbeat(hdev))
882 goto reschedule;
883
884 if (hl_device_operational(hdev, NULL))
885 dev_err(hdev->dev, "Device heartbeat failed!\n");
886
887 hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT);
888
889 return;
890
891 reschedule:
892 /*
893 * prev_reset_trigger tracks consecutive fatal h/w errors until first
894 * heartbeat immediately post reset.
895 * If control reached here, then at least one heartbeat work has been
896 * scheduled since last reset/init cycle.
897 * So if the device is not already in reset cycle, reset the flag
898 * prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR
899 * status for at least one heartbeat. From this point driver restarts
900 * tracking future consecutive fatal errors.
901 */
902 if (!hdev->reset_info.in_reset)
903 hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
904
905 schedule_delayed_work(&hdev->work_heartbeat,
906 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
907 }
908
909 /*
910 * device_late_init - do late stuff initialization for the habanalabs device
911 *
912 * @hdev: pointer to habanalabs device structure
913 *
914 * Do stuff that either needs the device H/W queues to be active or needs
915 * to happen after all the rest of the initialization is finished
916 */
device_late_init(struct hl_device * hdev)917 static int device_late_init(struct hl_device *hdev)
918 {
919 int rc;
920
921 if (hdev->asic_funcs->late_init) {
922 rc = hdev->asic_funcs->late_init(hdev);
923 if (rc) {
924 dev_err(hdev->dev,
925 "failed late initialization for the H/W\n");
926 return rc;
927 }
928 }
929
930 hdev->high_pll = hdev->asic_prop.high_pll;
931
932 if (hdev->heartbeat) {
933 INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
934 schedule_delayed_work(&hdev->work_heartbeat,
935 usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
936 }
937
938 hdev->late_init_done = true;
939
940 return 0;
941 }
942
943 /*
944 * device_late_fini - finalize all that was done in device_late_init
945 *
946 * @hdev: pointer to habanalabs device structure
947 *
948 */
device_late_fini(struct hl_device * hdev)949 static void device_late_fini(struct hl_device *hdev)
950 {
951 if (!hdev->late_init_done)
952 return;
953
954 if (hdev->heartbeat)
955 cancel_delayed_work_sync(&hdev->work_heartbeat);
956
957 if (hdev->asic_funcs->late_fini)
958 hdev->asic_funcs->late_fini(hdev);
959
960 hdev->late_init_done = false;
961 }
962
hl_device_utilization(struct hl_device * hdev,u32 * utilization)963 int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
964 {
965 u64 max_power, curr_power, dc_power, dividend;
966 int rc;
967
968 max_power = hdev->max_power;
969 dc_power = hdev->asic_prop.dc_power_default;
970 rc = hl_fw_cpucp_power_get(hdev, &curr_power);
971
972 if (rc)
973 return rc;
974
975 curr_power = clamp(curr_power, dc_power, max_power);
976
977 dividend = (curr_power - dc_power) * 100;
978 *utilization = (u32) div_u64(dividend, (max_power - dc_power));
979
980 return 0;
981 }
982
hl_device_set_debug_mode(struct hl_device * hdev,struct hl_ctx * ctx,bool enable)983 int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)
984 {
985 int rc = 0;
986
987 mutex_lock(&hdev->debug_lock);
988
989 if (!enable) {
990 if (!hdev->in_debug) {
991 dev_err(hdev->dev,
992 "Failed to disable debug mode because device was not in debug mode\n");
993 rc = -EFAULT;
994 goto out;
995 }
996
997 if (!hdev->reset_info.hard_reset_pending)
998 hdev->asic_funcs->halt_coresight(hdev, ctx);
999
1000 hdev->in_debug = 0;
1001
1002 goto out;
1003 }
1004
1005 if (hdev->in_debug) {
1006 dev_err(hdev->dev,
1007 "Failed to enable debug mode because device is already in debug mode\n");
1008 rc = -EFAULT;
1009 goto out;
1010 }
1011
1012 hdev->in_debug = 1;
1013
1014 out:
1015 mutex_unlock(&hdev->debug_lock);
1016
1017 return rc;
1018 }
1019
take_release_locks(struct hl_device * hdev)1020 static void take_release_locks(struct hl_device *hdev)
1021 {
1022 /* Flush anyone that is inside the critical section of enqueue
1023 * jobs to the H/W
1024 */
1025 hdev->asic_funcs->hw_queues_lock(hdev);
1026 hdev->asic_funcs->hw_queues_unlock(hdev);
1027
1028 /* Flush processes that are sending message to CPU */
1029 mutex_lock(&hdev->send_cpu_message_lock);
1030 mutex_unlock(&hdev->send_cpu_message_lock);
1031
1032 /* Flush anyone that is inside device open */
1033 mutex_lock(&hdev->fpriv_list_lock);
1034 mutex_unlock(&hdev->fpriv_list_lock);
1035 mutex_lock(&hdev->fpriv_ctrl_list_lock);
1036 mutex_unlock(&hdev->fpriv_ctrl_list_lock);
1037 }
1038
cleanup_resources(struct hl_device * hdev,bool hard_reset,bool fw_reset,bool skip_wq_flush)1039 static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
1040 bool skip_wq_flush)
1041 {
1042 if (hard_reset)
1043 device_late_fini(hdev);
1044
1045 /*
1046 * Halt the engines and disable interrupts so we won't get any more
1047 * completions from H/W and we won't have any accesses from the
1048 * H/W to the host machine
1049 */
1050 hdev->asic_funcs->halt_engines(hdev, hard_reset, fw_reset);
1051
1052 /* Go over all the queues, release all CS and their jobs */
1053 hl_cs_rollback_all(hdev, skip_wq_flush);
1054
1055 /* flush the MMU prefetch workqueue */
1056 flush_workqueue(hdev->pf_wq);
1057
1058 /* Release all pending user interrupts, each pending user interrupt
1059 * holds a reference to user context
1060 */
1061 hl_release_pending_user_interrupts(hdev);
1062 }
1063
1064 /*
1065 * hl_device_suspend - initiate device suspend
1066 *
1067 * @hdev: pointer to habanalabs device structure
1068 *
1069 * Puts the hw in the suspend state (all asics).
1070 * Returns 0 for success or an error on failure.
1071 * Called at driver suspend.
1072 */
hl_device_suspend(struct hl_device * hdev)1073 int hl_device_suspend(struct hl_device *hdev)
1074 {
1075 int rc;
1076
1077 pci_save_state(hdev->pdev);
1078
1079 /* Block future CS/VM/JOB completion operations */
1080 spin_lock(&hdev->reset_info.lock);
1081 if (hdev->reset_info.in_reset) {
1082 spin_unlock(&hdev->reset_info.lock);
1083 dev_err(hdev->dev, "Can't suspend while in reset\n");
1084 return -EIO;
1085 }
1086 hdev->reset_info.in_reset = 1;
1087 spin_unlock(&hdev->reset_info.lock);
1088
1089 /* This blocks all other stuff that is not blocked by in_reset */
1090 hdev->disabled = true;
1091
1092 take_release_locks(hdev);
1093
1094 rc = hdev->asic_funcs->suspend(hdev);
1095 if (rc)
1096 dev_err(hdev->dev,
1097 "Failed to disable PCI access of device CPU\n");
1098
1099 /* Shut down the device */
1100 pci_disable_device(hdev->pdev);
1101 pci_set_power_state(hdev->pdev, PCI_D3hot);
1102
1103 return 0;
1104 }
1105
1106 /*
1107 * hl_device_resume - initiate device resume
1108 *
1109 * @hdev: pointer to habanalabs device structure
1110 *
1111 * Bring the hw back to operating state (all asics).
1112 * Returns 0 for success or an error on failure.
1113 * Called at driver resume.
1114 */
hl_device_resume(struct hl_device * hdev)1115 int hl_device_resume(struct hl_device *hdev)
1116 {
1117 int rc;
1118
1119 pci_set_power_state(hdev->pdev, PCI_D0);
1120 pci_restore_state(hdev->pdev);
1121 rc = pci_enable_device_mem(hdev->pdev);
1122 if (rc) {
1123 dev_err(hdev->dev,
1124 "Failed to enable PCI device in resume\n");
1125 return rc;
1126 }
1127
1128 pci_set_master(hdev->pdev);
1129
1130 rc = hdev->asic_funcs->resume(hdev);
1131 if (rc) {
1132 dev_err(hdev->dev, "Failed to resume device after suspend\n");
1133 goto disable_device;
1134 }
1135
1136
1137 /* 'in_reset' was set to true during suspend, now we must clear it in order
1138 * for hard reset to be performed
1139 */
1140 spin_lock(&hdev->reset_info.lock);
1141 hdev->reset_info.in_reset = 0;
1142 spin_unlock(&hdev->reset_info.lock);
1143
1144 rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
1145 if (rc) {
1146 dev_err(hdev->dev, "Failed to reset device during resume\n");
1147 goto disable_device;
1148 }
1149
1150 return 0;
1151
1152 disable_device:
1153 pci_clear_master(hdev->pdev);
1154 pci_disable_device(hdev->pdev);
1155
1156 return rc;
1157 }
1158
device_kill_open_processes(struct hl_device * hdev,u32 timeout,bool control_dev)1159 static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
1160 {
1161 struct task_struct *task = NULL;
1162 struct list_head *fd_list;
1163 struct hl_fpriv *hpriv;
1164 struct mutex *fd_lock;
1165 u32 pending_cnt;
1166
1167 fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
1168 fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
1169
1170 /* Giving time for user to close FD, and for processes that are inside
1171 * hl_device_open to finish
1172 */
1173 if (!list_empty(fd_list))
1174 ssleep(1);
1175
1176 if (timeout) {
1177 pending_cnt = timeout;
1178 } else {
1179 if (hdev->process_kill_trial_cnt) {
1180 /* Processes have been already killed */
1181 pending_cnt = 1;
1182 goto wait_for_processes;
1183 } else {
1184 /* Wait a small period after process kill */
1185 pending_cnt = HL_PENDING_RESET_PER_SEC;
1186 }
1187 }
1188
1189 mutex_lock(fd_lock);
1190
1191 /* This section must be protected because we are dereferencing
1192 * pointers that are freed if the process exits
1193 */
1194 list_for_each_entry(hpriv, fd_list, dev_node) {
1195 task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
1196 if (task) {
1197 dev_info(hdev->dev, "Killing user process pid=%d\n",
1198 task_pid_nr(task));
1199 send_sig(SIGKILL, task, 1);
1200 usleep_range(1000, 10000);
1201
1202 put_task_struct(task);
1203 } else {
1204 /*
1205 * If we got here, it means that process was killed from outside the driver
1206 * right after it started looping on fd_list and before get_pid_task, thus
1207 * we don't need to kill it.
1208 */
1209 dev_dbg(hdev->dev,
1210 "Can't get task struct for user process, assuming process was killed from outside the driver\n");
1211 }
1212 }
1213
1214 mutex_unlock(fd_lock);
1215
1216 /*
1217 * We killed the open users, but that doesn't mean they are closed.
1218 * It could be that they are running a long cleanup phase in the driver
1219 * e.g. MMU unmappings, or running other long teardown flow even before
1220 * our cleanup.
1221 * Therefore we need to wait again to make sure they are closed before
1222 * continuing with the reset.
1223 */
1224
1225 wait_for_processes:
1226 while ((!list_empty(fd_list)) && (pending_cnt)) {
1227 dev_dbg(hdev->dev,
1228 "Waiting for all unmap operations to finish before hard reset\n");
1229
1230 pending_cnt--;
1231
1232 ssleep(1);
1233 }
1234
1235 /* All processes exited successfully */
1236 if (list_empty(fd_list))
1237 return 0;
1238
1239 /* Give up waiting for processes to exit */
1240 if (hdev->process_kill_trial_cnt == HL_PENDING_RESET_MAX_TRIALS)
1241 return -ETIME;
1242
1243 hdev->process_kill_trial_cnt++;
1244
1245 return -EBUSY;
1246 }
1247
device_disable_open_processes(struct hl_device * hdev,bool control_dev)1248 static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
1249 {
1250 struct list_head *fd_list;
1251 struct hl_fpriv *hpriv;
1252 struct mutex *fd_lock;
1253
1254 fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
1255 fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
1256
1257 mutex_lock(fd_lock);
1258 list_for_each_entry(hpriv, fd_list, dev_node)
1259 hpriv->hdev = NULL;
1260 mutex_unlock(fd_lock);
1261 }
1262
handle_reset_trigger(struct hl_device * hdev,u32 flags)1263 static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
1264 {
1265 u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
1266
1267 /*
1268 * 'reset cause' is being updated here, because getting here
1269 * means that it's the 1st time and the last time we're here
1270 * ('in_reset' makes sure of it). This makes sure that
1271 * 'reset_cause' will continue holding its 1st recorded reason!
1272 */
1273 if (flags & HL_DRV_RESET_HEARTBEAT) {
1274 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
1275 cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
1276 } else if (flags & HL_DRV_RESET_TDR) {
1277 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
1278 cur_reset_trigger = HL_DRV_RESET_TDR;
1279 } else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
1280 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
1281 cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
1282 } else {
1283 hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
1284 }
1285
1286 /*
1287 * If reset cause is same twice, then reset_trigger_repeated
1288 * is set and if this reset is due to a fatal FW error
1289 * device is set to an unstable state.
1290 */
1291 if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
1292 hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
1293 hdev->reset_info.reset_trigger_repeated = 0;
1294 } else {
1295 hdev->reset_info.reset_trigger_repeated = 1;
1296 }
1297
1298 /* If reset is due to heartbeat, device CPU is no responsive in
1299 * which case no point sending PCI disable message to it.
1300 *
1301 * If F/W is performing the reset, no need to send it a message to disable
1302 * PCI access
1303 */
1304 if ((flags & HL_DRV_RESET_HARD) &&
1305 !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
1306 /* Disable PCI access from device F/W so he won't send
1307 * us additional interrupts. We disable MSI/MSI-X at
1308 * the halt_engines function and we can't have the F/W
1309 * sending us interrupts after that. We need to disable
1310 * the access here because if the device is marked
1311 * disable, the message won't be send. Also, in case
1312 * of heartbeat, the device CPU is marked as disable
1313 * so this message won't be sent
1314 */
1315 if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))
1316 dev_warn(hdev->dev,
1317 "Failed to disable PCI access by F/W\n");
1318 }
1319 }
1320
1321 /*
1322 * hl_device_reset - reset the device
1323 *
1324 * @hdev: pointer to habanalabs device structure
1325 * @flags: reset flags.
1326 *
1327 * Block future CS and wait for pending CS to be enqueued
1328 * Call ASIC H/W fini
1329 * Flush all completions
1330 * Re-initialize all internal data structures
1331 * Call ASIC H/W init, late_init
1332 * Test queues
1333 * Enable device
1334 *
1335 * Returns 0 for success or an error on failure.
1336 */
hl_device_reset(struct hl_device * hdev,u32 flags)1337 int hl_device_reset(struct hl_device *hdev, u32 flags)
1338 {
1339 bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
1340 reset_upon_device_release = false, schedule_hard_reset = false,
1341 skip_wq_flush, delay_reset;
1342 u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
1343 struct hl_ctx *ctx;
1344 int i, rc;
1345
1346 if (!hdev->init_done) {
1347 dev_err(hdev->dev, "Can't reset before initialization is done\n");
1348 return 0;
1349 }
1350
1351 hard_reset = !!(flags & HL_DRV_RESET_HARD);
1352 from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
1353 fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
1354 skip_wq_flush = !!(flags & HL_DRV_RESET_DEV_RELEASE);
1355 delay_reset = !!(flags & HL_DRV_RESET_DELAY);
1356
1357 if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
1358 hard_instead_soft = true;
1359 hard_reset = true;
1360 }
1361
1362 if (hdev->reset_upon_device_release && (flags & HL_DRV_RESET_DEV_RELEASE)) {
1363 if (hard_reset) {
1364 dev_crit(hdev->dev,
1365 "Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
1366 return -EINVAL;
1367 }
1368
1369 reset_upon_device_release = true;
1370
1371 goto do_reset;
1372 }
1373
1374 if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
1375 hard_instead_soft = true;
1376 hard_reset = true;
1377 }
1378
1379 if (hard_instead_soft)
1380 dev_dbg(hdev->dev, "Doing hard-reset instead of compute reset\n");
1381
1382 do_reset:
1383 /* Re-entry of reset thread */
1384 if (from_hard_reset_thread && hdev->process_kill_trial_cnt)
1385 goto kill_processes;
1386
1387 /*
1388 * Prevent concurrency in this function - only one reset should be
1389 * done at any given time. Only need to perform this if we didn't
1390 * get from the dedicated hard reset thread
1391 */
1392 if (!from_hard_reset_thread) {
1393 /* Block future CS/VM/JOB completion operations */
1394 spin_lock(&hdev->reset_info.lock);
1395 if (hdev->reset_info.in_reset) {
1396 /* We only allow scheduling of a hard reset during compute reset */
1397 if (hard_reset && hdev->reset_info.in_compute_reset)
1398 hdev->reset_info.hard_reset_schedule_flags = flags;
1399 spin_unlock(&hdev->reset_info.lock);
1400 return 0;
1401 }
1402
1403 /* This still allows the completion of some KDMA ops
1404 * Update this before in_reset because in_compute_reset implies we are in reset
1405 */
1406 hdev->reset_info.in_compute_reset = !hard_reset;
1407
1408 hdev->reset_info.in_reset = 1;
1409
1410 spin_unlock(&hdev->reset_info.lock);
1411
1412 if (delay_reset)
1413 usleep_range(HL_RESET_DELAY_USEC, HL_RESET_DELAY_USEC << 1);
1414
1415 handle_reset_trigger(hdev, flags);
1416
1417 /* This also blocks future CS/VM/JOB completion operations */
1418 hdev->disabled = true;
1419
1420 take_release_locks(hdev);
1421
1422 if (hard_reset)
1423 dev_info(hdev->dev, "Going to reset device\n");
1424 else if (reset_upon_device_release)
1425 dev_dbg(hdev->dev, "Going to reset device after release by user\n");
1426 else
1427 dev_dbg(hdev->dev, "Going to reset engines of inference device\n");
1428 }
1429
1430 again:
1431 if ((hard_reset) && (!from_hard_reset_thread)) {
1432 hdev->reset_info.hard_reset_pending = true;
1433
1434 hdev->process_kill_trial_cnt = 0;
1435
1436 hdev->device_reset_work.flags = flags;
1437
1438 /*
1439 * Because the reset function can't run from heartbeat work,
1440 * we need to call the reset function from a dedicated work.
1441 */
1442 queue_delayed_work(hdev->device_reset_work.wq,
1443 &hdev->device_reset_work.reset_work, 0);
1444
1445 return 0;
1446 }
1447
1448 cleanup_resources(hdev, hard_reset, fw_reset, skip_wq_flush);
1449
1450 kill_processes:
1451 if (hard_reset) {
1452 /* Kill processes here after CS rollback. This is because the
1453 * process can't really exit until all its CSs are done, which
1454 * is what we do in cs rollback
1455 */
1456 rc = device_kill_open_processes(hdev, 0, false);
1457
1458 if (rc == -EBUSY) {
1459 if (hdev->device_fini_pending) {
1460 dev_crit(hdev->dev,
1461 "Failed to kill all open processes, stopping hard reset\n");
1462 goto out_err;
1463 }
1464
1465 /* signal reset thread to reschedule */
1466 return rc;
1467 }
1468
1469 if (rc) {
1470 dev_crit(hdev->dev,
1471 "Failed to kill all open processes, stopping hard reset\n");
1472 goto out_err;
1473 }
1474
1475 /* Flush the Event queue workers to make sure no other thread is
1476 * reading or writing to registers during the reset
1477 */
1478 flush_workqueue(hdev->eq_wq);
1479 }
1480
1481 /* Reset the H/W. It will be in idle state after this returns */
1482 hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
1483
1484 if (hard_reset) {
1485 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
1486
1487 /* Release kernel context */
1488 if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
1489 hdev->kernel_ctx = NULL;
1490
1491 hl_vm_fini(hdev);
1492 hl_mmu_fini(hdev);
1493 hl_eq_reset(hdev, &hdev->event_queue);
1494 }
1495
1496 /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */
1497 hl_hw_queue_reset(hdev, hard_reset);
1498 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
1499 hl_cq_reset(hdev, &hdev->completion_queue[i]);
1500
1501 /* Make sure the context switch phase will run again */
1502 ctx = hl_get_compute_ctx(hdev);
1503 if (ctx) {
1504 atomic_set(&ctx->thread_ctx_switch_token, 1);
1505 ctx->thread_ctx_switch_wait_token = 0;
1506 hl_ctx_put(ctx);
1507 }
1508
1509 /* Finished tear-down, starting to re-initialize */
1510
1511 if (hard_reset) {
1512 hdev->device_cpu_disabled = false;
1513 hdev->reset_info.hard_reset_pending = false;
1514
1515 if (hdev->reset_info.reset_trigger_repeated &&
1516 (hdev->reset_info.prev_reset_trigger ==
1517 HL_DRV_RESET_FW_FATAL_ERR)) {
1518 /* if there 2 back to back resets from FW,
1519 * ensure driver puts the driver in a unusable state
1520 */
1521 dev_crit(hdev->dev,
1522 "Consecutive FW fatal errors received, stopping hard reset\n");
1523 rc = -EIO;
1524 goto out_err;
1525 }
1526
1527 if (hdev->kernel_ctx) {
1528 dev_crit(hdev->dev,
1529 "kernel ctx was alive during hard reset, something is terribly wrong\n");
1530 rc = -EBUSY;
1531 goto out_err;
1532 }
1533
1534 rc = hl_mmu_init(hdev);
1535 if (rc) {
1536 dev_err(hdev->dev,
1537 "Failed to initialize MMU S/W after hard reset\n");
1538 goto out_err;
1539 }
1540
1541 /* Allocate the kernel context */
1542 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx),
1543 GFP_KERNEL);
1544 if (!hdev->kernel_ctx) {
1545 rc = -ENOMEM;
1546 hl_mmu_fini(hdev);
1547 goto out_err;
1548 }
1549
1550 hdev->is_compute_ctx_active = false;
1551
1552 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1553 if (rc) {
1554 dev_err(hdev->dev,
1555 "failed to init kernel ctx in hard reset\n");
1556 kfree(hdev->kernel_ctx);
1557 hdev->kernel_ctx = NULL;
1558 hl_mmu_fini(hdev);
1559 goto out_err;
1560 }
1561 }
1562
1563 /* Device is now enabled as part of the initialization requires
1564 * communication with the device firmware to get information that
1565 * is required for the initialization itself
1566 */
1567 hdev->disabled = false;
1568
1569 /* F/W security enabled indication might be updated after hard-reset */
1570 if (hard_reset) {
1571 rc = hl_fw_read_preboot_status(hdev);
1572 if (rc)
1573 goto out_err;
1574 }
1575
1576 rc = hdev->asic_funcs->hw_init(hdev);
1577 if (rc) {
1578 dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
1579 goto out_err;
1580 }
1581
1582 /* If device is not idle fail the reset process */
1583 if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
1584 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
1585 dev_err(hdev->dev, "device is not idle (mask 0x%llx_%llx) after reset\n",
1586 idle_mask[1], idle_mask[0]);
1587 rc = -EIO;
1588 goto out_err;
1589 }
1590
1591 /* Check that the communication with the device is working */
1592 rc = hdev->asic_funcs->test_queues(hdev);
1593 if (rc) {
1594 dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");
1595 goto out_err;
1596 }
1597
1598 if (hard_reset) {
1599 rc = device_late_init(hdev);
1600 if (rc) {
1601 dev_err(hdev->dev, "Failed late init after hard reset\n");
1602 goto out_err;
1603 }
1604
1605 rc = hl_vm_init(hdev);
1606 if (rc) {
1607 dev_err(hdev->dev, "Failed to init memory module after hard reset\n");
1608 goto out_err;
1609 }
1610
1611 if (!hdev->asic_prop.fw_security_enabled)
1612 hl_fw_set_max_power(hdev);
1613 } else {
1614 rc = hdev->asic_funcs->compute_reset_late_init(hdev);
1615 if (rc) {
1616 if (reset_upon_device_release)
1617 dev_err(hdev->dev,
1618 "Failed late init in reset after device release\n");
1619 else
1620 dev_err(hdev->dev, "Failed late init after compute reset\n");
1621 goto out_err;
1622 }
1623 }
1624
1625 rc = hdev->asic_funcs->scrub_device_mem(hdev);
1626 if (rc) {
1627 dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);
1628 return rc;
1629 }
1630
1631 spin_lock(&hdev->reset_info.lock);
1632 hdev->reset_info.in_compute_reset = 0;
1633
1634 /* Schedule hard reset only if requested and if not already in hard reset.
1635 * We keep 'in_reset' enabled, so no other reset can go in during the hard
1636 * reset schedule
1637 */
1638 if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)
1639 schedule_hard_reset = true;
1640 else
1641 hdev->reset_info.in_reset = 0;
1642
1643 spin_unlock(&hdev->reset_info.lock);
1644
1645 hdev->reset_info.needs_reset = false;
1646
1647 if (hard_reset)
1648 dev_info(hdev->dev, "Successfully finished resetting the device\n");
1649 else
1650 dev_dbg(hdev->dev, "Successfully finished resetting the device\n");
1651
1652 if (hard_reset) {
1653 hdev->reset_info.hard_reset_cnt++;
1654
1655 /* After reset is done, we are ready to receive events from
1656 * the F/W. We can't do it before because we will ignore events
1657 * and if those events are fatal, we won't know about it and
1658 * the device will be operational although it shouldn't be
1659 */
1660 hdev->asic_funcs->enable_events_from_fw(hdev);
1661 } else if (!reset_upon_device_release) {
1662 hdev->reset_info.compute_reset_cnt++;
1663 }
1664
1665 if (schedule_hard_reset) {
1666 dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
1667 flags = hdev->reset_info.hard_reset_schedule_flags;
1668 hdev->reset_info.hard_reset_schedule_flags = 0;
1669 hdev->disabled = true;
1670 hard_reset = true;
1671 handle_reset_trigger(hdev, flags);
1672 goto again;
1673 }
1674
1675 return 0;
1676
1677 out_err:
1678 hdev->disabled = true;
1679
1680 spin_lock(&hdev->reset_info.lock);
1681 hdev->reset_info.in_compute_reset = 0;
1682
1683 if (hard_reset) {
1684 dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
1685 hdev->reset_info.hard_reset_cnt++;
1686 } else if (reset_upon_device_release) {
1687 spin_unlock(&hdev->reset_info.lock);
1688 dev_err(hdev->dev, "Failed to reset device after user release\n");
1689 flags |= HL_DRV_RESET_HARD;
1690 flags &= ~HL_DRV_RESET_DEV_RELEASE;
1691 hard_reset = true;
1692 goto again;
1693 } else {
1694 spin_unlock(&hdev->reset_info.lock);
1695 dev_err(hdev->dev, "Failed to do compute reset\n");
1696 hdev->reset_info.compute_reset_cnt++;
1697 flags |= HL_DRV_RESET_HARD;
1698 hard_reset = true;
1699 goto again;
1700 }
1701
1702 hdev->reset_info.in_reset = 0;
1703
1704 spin_unlock(&hdev->reset_info.lock);
1705
1706 return rc;
1707 }
1708
hl_notifier_event_send(struct hl_notifier_event * notifier_event,u64 event_mask)1709 static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
1710 {
1711 mutex_lock(¬ifier_event->lock);
1712 notifier_event->events_mask |= event_mask;
1713
1714 if (notifier_event->eventfd)
1715 eventfd_signal(notifier_event->eventfd, 1);
1716
1717 mutex_unlock(¬ifier_event->lock);
1718 }
1719
1720 /*
1721 * hl_notifier_event_send_all - notify all user processes via eventfd
1722 *
1723 * @hdev: pointer to habanalabs device structure
1724 * @event_mask: the occurred event/s
1725 * Returns 0 for success or an error on failure.
1726 */
hl_notifier_event_send_all(struct hl_device * hdev,u64 event_mask)1727 void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
1728 {
1729 struct hl_fpriv *hpriv;
1730
1731 mutex_lock(&hdev->fpriv_list_lock);
1732
1733 list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
1734 hl_notifier_event_send(&hpriv->notifier_event, event_mask);
1735
1736 mutex_unlock(&hdev->fpriv_list_lock);
1737
1738 /* control device */
1739 mutex_lock(&hdev->fpriv_ctrl_list_lock);
1740
1741 list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node)
1742 hl_notifier_event_send(&hpriv->notifier_event, event_mask);
1743
1744 mutex_unlock(&hdev->fpriv_ctrl_list_lock);
1745 }
1746
1747 /*
1748 * hl_device_init - main initialization function for habanalabs device
1749 *
1750 * @hdev: pointer to habanalabs device structure
1751 *
1752 * Allocate an id for the device, do early initialization and then call the
1753 * ASIC specific initialization functions. Finally, create the cdev and the
1754 * Linux device to expose it to the user
1755 */
hl_device_init(struct hl_device * hdev,struct class * hclass)1756 int hl_device_init(struct hl_device *hdev, struct class *hclass)
1757 {
1758 int i, rc, cq_cnt, user_interrupt_cnt, cq_ready_cnt;
1759 char *name;
1760 bool add_cdev_sysfs_on_err = false;
1761
1762 hdev->cdev_idx = hdev->id / 2;
1763
1764 name = kasprintf(GFP_KERNEL, "hl%d", hdev->cdev_idx);
1765 if (!name) {
1766 rc = -ENOMEM;
1767 goto out_disabled;
1768 }
1769
1770 /* Initialize cdev and device structures */
1771 rc = device_init_cdev(hdev, hclass, hdev->id, &hl_ops, name,
1772 &hdev->cdev, &hdev->dev);
1773
1774 kfree(name);
1775
1776 if (rc)
1777 goto out_disabled;
1778
1779 name = kasprintf(GFP_KERNEL, "hl_controlD%d", hdev->cdev_idx);
1780 if (!name) {
1781 rc = -ENOMEM;
1782 goto free_dev;
1783 }
1784
1785 /* Initialize cdev and device structures for control device */
1786 rc = device_init_cdev(hdev, hclass, hdev->id_control, &hl_ctrl_ops,
1787 name, &hdev->cdev_ctrl, &hdev->dev_ctrl);
1788
1789 kfree(name);
1790
1791 if (rc)
1792 goto free_dev;
1793
1794 /* Initialize ASIC function pointers and perform early init */
1795 rc = device_early_init(hdev);
1796 if (rc)
1797 goto free_dev_ctrl;
1798
1799 user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
1800 hdev->asic_prop.user_interrupt_count;
1801
1802 if (user_interrupt_cnt) {
1803 hdev->user_interrupt = kcalloc(user_interrupt_cnt, sizeof(*hdev->user_interrupt),
1804 GFP_KERNEL);
1805 if (!hdev->user_interrupt) {
1806 rc = -ENOMEM;
1807 goto early_fini;
1808 }
1809 }
1810
1811 /*
1812 * Start calling ASIC initialization. First S/W then H/W and finally
1813 * late init
1814 */
1815 rc = hdev->asic_funcs->sw_init(hdev);
1816 if (rc)
1817 goto free_usr_intr_mem;
1818
1819
1820 /* initialize completion structure for multi CS wait */
1821 hl_multi_cs_completion_init(hdev);
1822
1823 /*
1824 * Initialize the H/W queues. Must be done before hw_init, because
1825 * there the addresses of the kernel queue are being written to the
1826 * registers of the device
1827 */
1828 rc = hl_hw_queues_create(hdev);
1829 if (rc) {
1830 dev_err(hdev->dev, "failed to initialize kernel queues\n");
1831 goto sw_fini;
1832 }
1833
1834 cq_cnt = hdev->asic_prop.completion_queues_count;
1835
1836 /*
1837 * Initialize the completion queues. Must be done before hw_init,
1838 * because there the addresses of the completion queues are being
1839 * passed as arguments to request_irq
1840 */
1841 if (cq_cnt) {
1842 hdev->completion_queue = kcalloc(cq_cnt,
1843 sizeof(*hdev->completion_queue),
1844 GFP_KERNEL);
1845
1846 if (!hdev->completion_queue) {
1847 dev_err(hdev->dev,
1848 "failed to allocate completion queues\n");
1849 rc = -ENOMEM;
1850 goto hw_queues_destroy;
1851 }
1852 }
1853
1854 for (i = 0, cq_ready_cnt = 0 ; i < cq_cnt ; i++, cq_ready_cnt++) {
1855 rc = hl_cq_init(hdev, &hdev->completion_queue[i],
1856 hdev->asic_funcs->get_queue_id_for_cq(hdev, i));
1857 if (rc) {
1858 dev_err(hdev->dev,
1859 "failed to initialize completion queue\n");
1860 goto cq_fini;
1861 }
1862 hdev->completion_queue[i].cq_idx = i;
1863 }
1864
1865 hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,
1866 sizeof(struct hl_cs *), GFP_KERNEL);
1867 if (!hdev->shadow_cs_queue) {
1868 rc = -ENOMEM;
1869 goto cq_fini;
1870 }
1871
1872 /*
1873 * Initialize the event queue. Must be done before hw_init,
1874 * because there the address of the event queue is being
1875 * passed as argument to request_irq
1876 */
1877 rc = hl_eq_init(hdev, &hdev->event_queue);
1878 if (rc) {
1879 dev_err(hdev->dev, "failed to initialize event queue\n");
1880 goto free_shadow_cs_queue;
1881 }
1882
1883 /* MMU S/W must be initialized before kernel context is created */
1884 rc = hl_mmu_init(hdev);
1885 if (rc) {
1886 dev_err(hdev->dev, "Failed to initialize MMU S/W structures\n");
1887 goto eq_fini;
1888 }
1889
1890 /* Allocate the kernel context */
1891 hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL);
1892 if (!hdev->kernel_ctx) {
1893 rc = -ENOMEM;
1894 goto mmu_fini;
1895 }
1896
1897 hdev->is_compute_ctx_active = false;
1898
1899 hdev->asic_funcs->state_dump_init(hdev);
1900
1901 hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
1902 hl_debugfs_add_device(hdev);
1903
1904 /* debugfs nodes are created in hl_ctx_init so it must be called after
1905 * hl_debugfs_add_device.
1906 */
1907 rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
1908 if (rc) {
1909 dev_err(hdev->dev, "failed to initialize kernel context\n");
1910 kfree(hdev->kernel_ctx);
1911 goto remove_device_from_debugfs;
1912 }
1913
1914 rc = hl_cb_pool_init(hdev);
1915 if (rc) {
1916 dev_err(hdev->dev, "failed to initialize CB pool\n");
1917 goto release_ctx;
1918 }
1919
1920 rc = hl_dec_init(hdev);
1921 if (rc) {
1922 dev_err(hdev->dev, "Failed to initialize the decoder module\n");
1923 goto cb_pool_fini;
1924 }
1925
1926 /*
1927 * From this point, override rc (=0) in case of an error to allow
1928 * debugging (by adding char devices and create sysfs nodes as part of
1929 * the error flow).
1930 */
1931 add_cdev_sysfs_on_err = true;
1932
1933 /* Device is now enabled as part of the initialization requires
1934 * communication with the device firmware to get information that
1935 * is required for the initialization itself
1936 */
1937 hdev->disabled = false;
1938
1939 rc = hdev->asic_funcs->hw_init(hdev);
1940 if (rc) {
1941 dev_err(hdev->dev, "failed to initialize the H/W\n");
1942 rc = 0;
1943 goto out_disabled;
1944 }
1945
1946 /* Check that the communication with the device is working */
1947 rc = hdev->asic_funcs->test_queues(hdev);
1948 if (rc) {
1949 dev_err(hdev->dev, "Failed to detect if device is alive\n");
1950 rc = 0;
1951 goto out_disabled;
1952 }
1953
1954 rc = device_late_init(hdev);
1955 if (rc) {
1956 dev_err(hdev->dev, "Failed late initialization\n");
1957 rc = 0;
1958 goto out_disabled;
1959 }
1960
1961 dev_info(hdev->dev, "Found %s device with %lluGB DRAM\n",
1962 hdev->asic_name,
1963 hdev->asic_prop.dram_size / SZ_1G);
1964
1965 rc = hl_vm_init(hdev);
1966 if (rc) {
1967 dev_err(hdev->dev, "Failed to initialize memory module\n");
1968 rc = 0;
1969 goto out_disabled;
1970 }
1971
1972 /*
1973 * Expose devices and sysfs nodes to user.
1974 * From here there is no need to add char devices and create sysfs nodes
1975 * in case of an error.
1976 */
1977 add_cdev_sysfs_on_err = false;
1978 rc = device_cdev_sysfs_add(hdev);
1979 if (rc) {
1980 dev_err(hdev->dev,
1981 "Failed to add char devices and sysfs nodes\n");
1982 rc = 0;
1983 goto out_disabled;
1984 }
1985
1986 /* Need to call this again because the max power might change,
1987 * depending on card type for certain ASICs
1988 */
1989 if (hdev->asic_prop.set_max_power_on_device_init &&
1990 !hdev->asic_prop.fw_security_enabled)
1991 hl_fw_set_max_power(hdev);
1992
1993 /*
1994 * hl_hwmon_init() must be called after device_late_init(), because only
1995 * there we get the information from the device about which
1996 * hwmon-related sensors the device supports.
1997 * Furthermore, it must be done after adding the device to the system.
1998 */
1999 rc = hl_hwmon_init(hdev);
2000 if (rc) {
2001 dev_err(hdev->dev, "Failed to initialize hwmon\n");
2002 rc = 0;
2003 goto out_disabled;
2004 }
2005
2006 dev_notice(hdev->dev,
2007 "Successfully added device to habanalabs driver\n");
2008
2009 hdev->init_done = true;
2010
2011 /* After initialization is done, we are ready to receive events from
2012 * the F/W. We can't do it before because we will ignore events and if
2013 * those events are fatal, we won't know about it and the device will
2014 * be operational although it shouldn't be
2015 */
2016 hdev->asic_funcs->enable_events_from_fw(hdev);
2017
2018 return 0;
2019
2020 cb_pool_fini:
2021 hl_cb_pool_fini(hdev);
2022 release_ctx:
2023 if (hl_ctx_put(hdev->kernel_ctx) != 1)
2024 dev_err(hdev->dev,
2025 "kernel ctx is still alive on initialization failure\n");
2026 remove_device_from_debugfs:
2027 hl_debugfs_remove_device(hdev);
2028 mmu_fini:
2029 hl_mmu_fini(hdev);
2030 eq_fini:
2031 hl_eq_fini(hdev, &hdev->event_queue);
2032 free_shadow_cs_queue:
2033 kfree(hdev->shadow_cs_queue);
2034 cq_fini:
2035 for (i = 0 ; i < cq_ready_cnt ; i++)
2036 hl_cq_fini(hdev, &hdev->completion_queue[i]);
2037 kfree(hdev->completion_queue);
2038 hw_queues_destroy:
2039 hl_hw_queues_destroy(hdev);
2040 sw_fini:
2041 hdev->asic_funcs->sw_fini(hdev);
2042 free_usr_intr_mem:
2043 kfree(hdev->user_interrupt);
2044 early_fini:
2045 device_early_fini(hdev);
2046 free_dev_ctrl:
2047 put_device(hdev->dev_ctrl);
2048 free_dev:
2049 put_device(hdev->dev);
2050 out_disabled:
2051 hdev->disabled = true;
2052 if (add_cdev_sysfs_on_err)
2053 device_cdev_sysfs_add(hdev);
2054 if (hdev->pdev)
2055 dev_err(&hdev->pdev->dev,
2056 "Failed to initialize hl%d. Device is NOT usable !\n",
2057 hdev->cdev_idx);
2058 else
2059 pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
2060 hdev->cdev_idx);
2061
2062 return rc;
2063 }
2064
2065 /*
2066 * hl_device_fini - main tear-down function for habanalabs device
2067 *
2068 * @hdev: pointer to habanalabs device structure
2069 *
2070 * Destroy the device, call ASIC fini functions and release the id
2071 */
hl_device_fini(struct hl_device * hdev)2072 void hl_device_fini(struct hl_device *hdev)
2073 {
2074 bool device_in_reset;
2075 ktime_t timeout;
2076 u64 reset_sec;
2077 int i, rc;
2078
2079 dev_info(hdev->dev, "Removing device\n");
2080
2081 hdev->device_fini_pending = 1;
2082 flush_delayed_work(&hdev->device_reset_work.reset_work);
2083
2084 if (hdev->pldm)
2085 reset_sec = HL_PLDM_HARD_RESET_MAX_TIMEOUT;
2086 else
2087 reset_sec = HL_HARD_RESET_MAX_TIMEOUT;
2088
2089 /*
2090 * This function is competing with the reset function, so try to
2091 * take the reset atomic and if we are already in middle of reset,
2092 * wait until reset function is finished. Reset function is designed
2093 * to always finish. However, in Gaudi, because of all the network
2094 * ports, the hard reset could take between 10-30 seconds
2095 */
2096
2097 timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
2098
2099 spin_lock(&hdev->reset_info.lock);
2100 device_in_reset = !!hdev->reset_info.in_reset;
2101 if (!device_in_reset)
2102 hdev->reset_info.in_reset = 1;
2103 spin_unlock(&hdev->reset_info.lock);
2104
2105 while (device_in_reset) {
2106 usleep_range(50, 200);
2107
2108 spin_lock(&hdev->reset_info.lock);
2109 device_in_reset = !!hdev->reset_info.in_reset;
2110 if (!device_in_reset)
2111 hdev->reset_info.in_reset = 1;
2112 spin_unlock(&hdev->reset_info.lock);
2113
2114 if (ktime_compare(ktime_get(), timeout) > 0) {
2115 dev_crit(hdev->dev,
2116 "Failed to remove device because reset function did not finish\n");
2117 return;
2118 }
2119 }
2120
2121 /* Disable PCI access from device F/W so it won't send us additional
2122 * interrupts. We disable MSI/MSI-X at the halt_engines function and we
2123 * can't have the F/W sending us interrupts after that. We need to
2124 * disable the access here because if the device is marked disable, the
2125 * message won't be send. Also, in case of heartbeat, the device CPU is
2126 * marked as disable so this message won't be sent
2127 */
2128 hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
2129
2130 /* Mark device as disabled */
2131 hdev->disabled = true;
2132
2133 take_release_locks(hdev);
2134
2135 hdev->reset_info.hard_reset_pending = true;
2136
2137 hl_hwmon_fini(hdev);
2138
2139 cleanup_resources(hdev, true, false, false);
2140
2141 /* Kill processes here after CS rollback. This is because the process
2142 * can't really exit until all its CSs are done, which is what we
2143 * do in cs rollback
2144 */
2145 dev_info(hdev->dev,
2146 "Waiting for all processes to exit (timeout of %u seconds)",
2147 HL_PENDING_RESET_LONG_SEC);
2148
2149 rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC, false);
2150 if (rc) {
2151 dev_crit(hdev->dev, "Failed to kill all open processes\n");
2152 device_disable_open_processes(hdev, false);
2153 }
2154
2155 rc = device_kill_open_processes(hdev, 0, true);
2156 if (rc) {
2157 dev_crit(hdev->dev, "Failed to kill all control device open processes\n");
2158 device_disable_open_processes(hdev, true);
2159 }
2160
2161 hl_cb_pool_fini(hdev);
2162
2163 /* Reset the H/W. It will be in idle state after this returns */
2164 hdev->asic_funcs->hw_fini(hdev, true, false);
2165
2166 hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
2167
2168 /* Release kernel context */
2169 if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
2170 dev_err(hdev->dev, "kernel ctx is still alive\n");
2171
2172 hl_debugfs_remove_device(hdev);
2173
2174 hl_dec_fini(hdev);
2175
2176 hl_vm_fini(hdev);
2177
2178 hl_mmu_fini(hdev);
2179
2180 hl_eq_fini(hdev, &hdev->event_queue);
2181
2182 kfree(hdev->shadow_cs_queue);
2183
2184 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
2185 hl_cq_fini(hdev, &hdev->completion_queue[i]);
2186 kfree(hdev->completion_queue);
2187 kfree(hdev->user_interrupt);
2188
2189 hl_hw_queues_destroy(hdev);
2190
2191 /* Call ASIC S/W finalize function */
2192 hdev->asic_funcs->sw_fini(hdev);
2193
2194 device_early_fini(hdev);
2195
2196 /* Hide devices and sysfs nodes from user */
2197 device_cdev_sysfs_del(hdev);
2198
2199 pr_info("removed device successfully\n");
2200 }
2201
2202 /*
2203 * MMIO register access helper functions.
2204 */
2205
2206 /*
2207 * hl_rreg - Read an MMIO register
2208 *
2209 * @hdev: pointer to habanalabs device structure
2210 * @reg: MMIO register offset (in bytes)
2211 *
2212 * Returns the value of the MMIO register we are asked to read
2213 *
2214 */
hl_rreg(struct hl_device * hdev,u32 reg)2215 inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
2216 {
2217 return readl(hdev->rmmio + reg);
2218 }
2219
2220 /*
2221 * hl_wreg - Write to an MMIO register
2222 *
2223 * @hdev: pointer to habanalabs device structure
2224 * @reg: MMIO register offset (in bytes)
2225 * @val: 32-bit value
2226 *
2227 * Writes the 32-bit value into the MMIO register
2228 *
2229 */
hl_wreg(struct hl_device * hdev,u32 reg,u32 val)2230 inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
2231 {
2232 writel(val, hdev->rmmio + reg);
2233 }
2234