1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2021 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 #include "../include/hw_ip/mmu/mmu_general.h"
10 
11 #include <linux/pci.h>
12 #include <linux/uaccess.h>
13 #include <linux/vmalloc.h>
14 #include <linux/iommu.h>
15 
16 #define MMU_ADDR_BUF_SIZE	40
17 #define MMU_ASID_BUF_SIZE	10
18 #define MMU_KBUF_SIZE		(MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
19 #define I2C_MAX_TRANSACTION_LEN	8
20 
21 static struct dentry *hl_debug_root;
22 
hl_debugfs_i2c_read(struct hl_device * hdev,u8 i2c_bus,u8 i2c_addr,u8 i2c_reg,u8 i2c_len,u64 * val)23 static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
24 				u8 i2c_reg, u8 i2c_len, u64 *val)
25 {
26 	struct cpucp_packet pkt;
27 	int rc;
28 
29 	if (!hl_device_operational(hdev, NULL))
30 		return -EBUSY;
31 
32 	if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
33 		dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
34 				i2c_len, I2C_MAX_TRANSACTION_LEN);
35 		return -EINVAL;
36 	}
37 
38 	memset(&pkt, 0, sizeof(pkt));
39 
40 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
41 				CPUCP_PKT_CTL_OPCODE_SHIFT);
42 	pkt.i2c_bus = i2c_bus;
43 	pkt.i2c_addr = i2c_addr;
44 	pkt.i2c_reg = i2c_reg;
45 	pkt.i2c_len = i2c_len;
46 
47 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
48 						0, val);
49 	if (rc)
50 		dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
51 
52 	return rc;
53 }
54 
hl_debugfs_i2c_write(struct hl_device * hdev,u8 i2c_bus,u8 i2c_addr,u8 i2c_reg,u8 i2c_len,u64 val)55 static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
56 				u8 i2c_reg, u8 i2c_len, u64 val)
57 {
58 	struct cpucp_packet pkt;
59 	int rc;
60 
61 	if (!hl_device_operational(hdev, NULL))
62 		return -EBUSY;
63 
64 	if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
65 		dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
66 				i2c_len, I2C_MAX_TRANSACTION_LEN);
67 		return -EINVAL;
68 	}
69 
70 	memset(&pkt, 0, sizeof(pkt));
71 
72 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
73 				CPUCP_PKT_CTL_OPCODE_SHIFT);
74 	pkt.i2c_bus = i2c_bus;
75 	pkt.i2c_addr = i2c_addr;
76 	pkt.i2c_reg = i2c_reg;
77 	pkt.i2c_len = i2c_len;
78 	pkt.value = cpu_to_le64(val);
79 
80 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
81 						0, NULL);
82 
83 	if (rc)
84 		dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
85 
86 	return rc;
87 }
88 
hl_debugfs_led_set(struct hl_device * hdev,u8 led,u8 state)89 static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
90 {
91 	struct cpucp_packet pkt;
92 	int rc;
93 
94 	if (!hl_device_operational(hdev, NULL))
95 		return;
96 
97 	memset(&pkt, 0, sizeof(pkt));
98 
99 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET <<
100 				CPUCP_PKT_CTL_OPCODE_SHIFT);
101 	pkt.led_index = cpu_to_le32(led);
102 	pkt.value = cpu_to_le64(state);
103 
104 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
105 						0, NULL);
106 
107 	if (rc)
108 		dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
109 }
110 
command_buffers_show(struct seq_file * s,void * data)111 static int command_buffers_show(struct seq_file *s, void *data)
112 {
113 	struct hl_debugfs_entry *entry = s->private;
114 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
115 	struct hl_cb *cb;
116 	bool first = true;
117 
118 	spin_lock(&dev_entry->cb_spinlock);
119 
120 	list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
121 		if (first) {
122 			first = false;
123 			seq_puts(s, "\n");
124 			seq_puts(s, " CB ID   CTX ID   CB size    CB RefCnt    mmap?   CS counter\n");
125 			seq_puts(s, "---------------------------------------------------------------\n");
126 		}
127 		seq_printf(s,
128 			"   %03llu        %d    0x%08x      %d          %d          %d\n",
129 			cb->buf->handle, cb->ctx->asid, cb->size,
130 			kref_read(&cb->buf->refcount),
131 			atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt));
132 	}
133 
134 	spin_unlock(&dev_entry->cb_spinlock);
135 
136 	if (!first)
137 		seq_puts(s, "\n");
138 
139 	return 0;
140 }
141 
command_submission_show(struct seq_file * s,void * data)142 static int command_submission_show(struct seq_file *s, void *data)
143 {
144 	struct hl_debugfs_entry *entry = s->private;
145 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
146 	struct hl_cs *cs;
147 	bool first = true;
148 
149 	spin_lock(&dev_entry->cs_spinlock);
150 
151 	list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
152 		if (first) {
153 			first = false;
154 			seq_puts(s, "\n");
155 			seq_puts(s, " CS ID   CTX ASID   CS RefCnt   Submitted    Completed\n");
156 			seq_puts(s, "------------------------------------------------------\n");
157 		}
158 		seq_printf(s,
159 			"   %llu       %d          %d           %d            %d\n",
160 			cs->sequence, cs->ctx->asid,
161 			kref_read(&cs->refcount),
162 			cs->submitted, cs->completed);
163 	}
164 
165 	spin_unlock(&dev_entry->cs_spinlock);
166 
167 	if (!first)
168 		seq_puts(s, "\n");
169 
170 	return 0;
171 }
172 
command_submission_jobs_show(struct seq_file * s,void * data)173 static int command_submission_jobs_show(struct seq_file *s, void *data)
174 {
175 	struct hl_debugfs_entry *entry = s->private;
176 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
177 	struct hl_cs_job *job;
178 	bool first = true;
179 
180 	spin_lock(&dev_entry->cs_job_spinlock);
181 
182 	list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
183 		if (first) {
184 			first = false;
185 			seq_puts(s, "\n");
186 			seq_puts(s, " JOB ID   CS ID    CTX ASID   JOB RefCnt   H/W Queue\n");
187 			seq_puts(s, "----------------------------------------------------\n");
188 		}
189 		if (job->cs)
190 			seq_printf(s,
191 				"   %02d      %llu        %d          %d           %d\n",
192 				job->id, job->cs->sequence, job->cs->ctx->asid,
193 				kref_read(&job->refcount), job->hw_queue_id);
194 		else
195 			seq_printf(s,
196 				"   %02d      0        %d          %d           %d\n",
197 				job->id, HL_KERNEL_ASID_ID,
198 				kref_read(&job->refcount), job->hw_queue_id);
199 	}
200 
201 	spin_unlock(&dev_entry->cs_job_spinlock);
202 
203 	if (!first)
204 		seq_puts(s, "\n");
205 
206 	return 0;
207 }
208 
userptr_show(struct seq_file * s,void * data)209 static int userptr_show(struct seq_file *s, void *data)
210 {
211 	struct hl_debugfs_entry *entry = s->private;
212 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
213 	struct hl_userptr *userptr;
214 	char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
215 				"DMA_FROM_DEVICE", "DMA_NONE"};
216 	bool first = true;
217 
218 	spin_lock(&dev_entry->userptr_spinlock);
219 
220 	list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
221 		if (first) {
222 			first = false;
223 			seq_puts(s, "\n");
224 			seq_puts(s, " pid      user virtual address     size             dma dir\n");
225 			seq_puts(s, "----------------------------------------------------------\n");
226 		}
227 		seq_printf(s, " %-7d  0x%-14llx      %-10llu    %-30s\n",
228 				userptr->pid, userptr->addr, userptr->size,
229 				dma_dir[userptr->dir]);
230 	}
231 
232 	spin_unlock(&dev_entry->userptr_spinlock);
233 
234 	if (!first)
235 		seq_puts(s, "\n");
236 
237 	return 0;
238 }
239 
vm_show(struct seq_file * s,void * data)240 static int vm_show(struct seq_file *s, void *data)
241 {
242 	struct hl_debugfs_entry *entry = s->private;
243 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
244 	struct hl_vm_hw_block_list_node *lnode;
245 	struct hl_ctx *ctx;
246 	struct hl_vm *vm;
247 	struct hl_vm_hash_node *hnode;
248 	struct hl_userptr *userptr;
249 	struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
250 	struct hl_va_range *va_range;
251 	struct hl_vm_va_block *va_block;
252 	enum vm_type *vm_type;
253 	bool once = true;
254 	u64 j;
255 	int i;
256 
257 	if (!dev_entry->hdev->mmu_enable)
258 		return 0;
259 
260 	spin_lock(&dev_entry->ctx_mem_hash_spinlock);
261 
262 	list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
263 		once = false;
264 		seq_puts(s, "\n\n----------------------------------------------------");
265 		seq_puts(s, "\n----------------------------------------------------\n\n");
266 		seq_printf(s, "ctx asid: %u\n", ctx->asid);
267 
268 		seq_puts(s, "\nmappings:\n\n");
269 		seq_puts(s, "    virtual address        size          handle\n");
270 		seq_puts(s, "----------------------------------------------------\n");
271 		mutex_lock(&ctx->mem_hash_lock);
272 		hash_for_each(ctx->mem_hash, i, hnode, node) {
273 			vm_type = hnode->ptr;
274 
275 			if (*vm_type == VM_TYPE_USERPTR) {
276 				userptr = hnode->ptr;
277 				seq_printf(s,
278 					"    0x%-14llx      %-10llu\n",
279 					hnode->vaddr, userptr->size);
280 			} else {
281 				phys_pg_pack = hnode->ptr;
282 				seq_printf(s,
283 					"    0x%-14llx      %-10llu       %-4u\n",
284 					hnode->vaddr, phys_pg_pack->total_size,
285 					phys_pg_pack->handle);
286 			}
287 		}
288 		mutex_unlock(&ctx->mem_hash_lock);
289 
290 		if (ctx->asid != HL_KERNEL_ASID_ID &&
291 		    !list_empty(&ctx->hw_block_mem_list)) {
292 			seq_puts(s, "\nhw_block mappings:\n\n");
293 			seq_puts(s, "    virtual address    size    HW block id\n");
294 			seq_puts(s, "-------------------------------------------\n");
295 			mutex_lock(&ctx->hw_block_list_lock);
296 			list_for_each_entry(lnode, &ctx->hw_block_mem_list,
297 					    node) {
298 				seq_printf(s,
299 					"    0x%-14lx   %-6u      %-9u\n",
300 					lnode->vaddr, lnode->size, lnode->id);
301 			}
302 			mutex_unlock(&ctx->hw_block_list_lock);
303 		}
304 
305 		vm = &ctx->hdev->vm;
306 		spin_lock(&vm->idr_lock);
307 
308 		if (!idr_is_empty(&vm->phys_pg_pack_handles))
309 			seq_puts(s, "\n\nallocations:\n");
310 
311 		idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
312 			if (phys_pg_pack->asid != ctx->asid)
313 				continue;
314 
315 			seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
316 			seq_printf(s, "page size: %u\n\n",
317 						phys_pg_pack->page_size);
318 			seq_puts(s, "   physical address\n");
319 			seq_puts(s, "---------------------\n");
320 			for (j = 0 ; j < phys_pg_pack->npages ; j++) {
321 				seq_printf(s, "    0x%-14llx\n",
322 						phys_pg_pack->pages[j]);
323 			}
324 		}
325 		spin_unlock(&vm->idr_lock);
326 
327 	}
328 
329 	spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
330 
331 	ctx = hl_get_compute_ctx(dev_entry->hdev);
332 	if (ctx) {
333 		seq_puts(s, "\nVA ranges:\n\n");
334 		for (i = HL_VA_RANGE_TYPE_HOST ; i < HL_VA_RANGE_TYPE_MAX ; ++i) {
335 			va_range = ctx->va_range[i];
336 			seq_printf(s, "   va_range %d\n", i);
337 			seq_puts(s, "---------------------\n");
338 			mutex_lock(&va_range->lock);
339 			list_for_each_entry(va_block, &va_range->list, node) {
340 				seq_printf(s, "%#16llx - %#16llx (%#llx)\n",
341 					   va_block->start, va_block->end,
342 					   va_block->size);
343 			}
344 			mutex_unlock(&va_range->lock);
345 			seq_puts(s, "\n");
346 		}
347 		hl_ctx_put(ctx);
348 	}
349 
350 	if (!once)
351 		seq_puts(s, "\n");
352 
353 	return 0;
354 }
355 
userptr_lookup_show(struct seq_file * s,void * data)356 static int userptr_lookup_show(struct seq_file *s, void *data)
357 {
358 	struct hl_debugfs_entry *entry = s->private;
359 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
360 	struct scatterlist *sg;
361 	struct hl_userptr *userptr;
362 	bool first = true;
363 	u64 total_npages, npages, sg_start, sg_end;
364 	dma_addr_t dma_addr;
365 	int i;
366 
367 	spin_lock(&dev_entry->userptr_spinlock);
368 
369 	list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
370 		if (dev_entry->userptr_lookup >= userptr->addr &&
371 		dev_entry->userptr_lookup < userptr->addr + userptr->size) {
372 			total_npages = 0;
373 			for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
374 				npages = hl_get_sg_info(sg, &dma_addr);
375 				sg_start = userptr->addr +
376 					total_npages * PAGE_SIZE;
377 				sg_end = userptr->addr +
378 					(total_npages + npages) * PAGE_SIZE;
379 
380 				if (dev_entry->userptr_lookup >= sg_start &&
381 				    dev_entry->userptr_lookup < sg_end) {
382 					dma_addr += (dev_entry->userptr_lookup -
383 							sg_start);
384 					if (first) {
385 						first = false;
386 						seq_puts(s, "\n");
387 						seq_puts(s, " user virtual address         dma address       pid        region start     region size\n");
388 						seq_puts(s, "---------------------------------------------------------------------------------------\n");
389 					}
390 					seq_printf(s, " 0x%-18llx  0x%-16llx  %-8u  0x%-16llx %-12llu\n",
391 						dev_entry->userptr_lookup,
392 						(u64)dma_addr, userptr->pid,
393 						userptr->addr, userptr->size);
394 				}
395 				total_npages += npages;
396 			}
397 		}
398 	}
399 
400 	spin_unlock(&dev_entry->userptr_spinlock);
401 
402 	if (!first)
403 		seq_puts(s, "\n");
404 
405 	return 0;
406 }
407 
userptr_lookup_write(struct file * file,const char __user * buf,size_t count,loff_t * f_pos)408 static ssize_t userptr_lookup_write(struct file *file, const char __user *buf,
409 		size_t count, loff_t *f_pos)
410 {
411 	struct seq_file *s = file->private_data;
412 	struct hl_debugfs_entry *entry = s->private;
413 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
414 	ssize_t rc;
415 	u64 value;
416 
417 	rc = kstrtoull_from_user(buf, count, 16, &value);
418 	if (rc)
419 		return rc;
420 
421 	dev_entry->userptr_lookup = value;
422 
423 	return count;
424 }
425 
mmu_show(struct seq_file * s,void * data)426 static int mmu_show(struct seq_file *s, void *data)
427 {
428 	struct hl_debugfs_entry *entry = s->private;
429 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
430 	struct hl_device *hdev = dev_entry->hdev;
431 	struct hl_ctx *ctx;
432 	struct hl_mmu_hop_info hops_info = {0};
433 	u64 virt_addr = dev_entry->mmu_addr, phys_addr;
434 	int i;
435 
436 	if (!hdev->mmu_enable)
437 		return 0;
438 
439 	if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
440 		ctx = hdev->kernel_ctx;
441 	else
442 		ctx = hl_get_compute_ctx(hdev);
443 
444 	if (!ctx) {
445 		dev_err(hdev->dev, "no ctx available\n");
446 		return 0;
447 	}
448 
449 	if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) {
450 		dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
451 				virt_addr);
452 		return 0;
453 	}
454 
455 	hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
456 
457 	if (hops_info.scrambled_vaddr &&
458 		(dev_entry->mmu_addr != hops_info.scrambled_vaddr))
459 		seq_printf(s,
460 			"asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx,\nphys_addr: 0x%llx, scrambled_phys_addr: 0x%llx\n",
461 			dev_entry->mmu_asid, dev_entry->mmu_addr,
462 			hops_info.scrambled_vaddr,
463 			hops_info.unscrambled_paddr, phys_addr);
464 	else
465 		seq_printf(s,
466 			"asid: %u, virt_addr: 0x%llx, phys_addr: 0x%llx\n",
467 			dev_entry->mmu_asid, dev_entry->mmu_addr, phys_addr);
468 
469 	for (i = 0 ; i < hops_info.used_hops ; i++) {
470 		seq_printf(s, "hop%d_addr: 0x%llx\n",
471 				i, hops_info.hop_info[i].hop_addr);
472 		seq_printf(s, "hop%d_pte_addr: 0x%llx\n",
473 				i, hops_info.hop_info[i].hop_pte_addr);
474 		seq_printf(s, "hop%d_pte: 0x%llx\n",
475 				i, hops_info.hop_info[i].hop_pte_val);
476 	}
477 
478 	return 0;
479 }
480 
mmu_asid_va_write(struct file * file,const char __user * buf,size_t count,loff_t * f_pos)481 static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
482 		size_t count, loff_t *f_pos)
483 {
484 	struct seq_file *s = file->private_data;
485 	struct hl_debugfs_entry *entry = s->private;
486 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
487 	struct hl_device *hdev = dev_entry->hdev;
488 	char kbuf[MMU_KBUF_SIZE];
489 	char *c;
490 	ssize_t rc;
491 
492 	if (!hdev->mmu_enable)
493 		return count;
494 
495 	if (count > sizeof(kbuf) - 1)
496 		goto err;
497 	if (copy_from_user(kbuf, buf, count))
498 		goto err;
499 	kbuf[count] = 0;
500 
501 	c = strchr(kbuf, ' ');
502 	if (!c)
503 		goto err;
504 	*c = '\0';
505 
506 	rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
507 	if (rc)
508 		goto err;
509 
510 	if (strncmp(c+1, "0x", 2))
511 		goto err;
512 	rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
513 	if (rc)
514 		goto err;
515 
516 	return count;
517 
518 err:
519 	dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
520 
521 	return -EINVAL;
522 }
523 
engines_show(struct seq_file * s,void * data)524 static int engines_show(struct seq_file *s, void *data)
525 {
526 	struct hl_debugfs_entry *entry = s->private;
527 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
528 	struct hl_device *hdev = dev_entry->hdev;
529 
530 	if (hdev->reset_info.in_reset) {
531 		dev_warn_ratelimited(hdev->dev,
532 				"Can't check device idle during reset\n");
533 		return 0;
534 	}
535 
536 	hdev->asic_funcs->is_device_idle(hdev, NULL, 0, s);
537 
538 	return 0;
539 }
540 
hl_memory_scrub(struct file * f,const char __user * buf,size_t count,loff_t * ppos)541 static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
542 					size_t count, loff_t *ppos)
543 {
544 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
545 	struct hl_device *hdev = entry->hdev;
546 	u64 val = entry->memory_scrub_val;
547 	int rc;
548 
549 	if (!hl_device_operational(hdev, NULL)) {
550 		dev_warn_ratelimited(hdev->dev, "Can't scrub memory, device is not operational\n");
551 		return -EIO;
552 	}
553 
554 	mutex_lock(&hdev->fpriv_list_lock);
555 	if (hdev->is_compute_ctx_active) {
556 		mutex_unlock(&hdev->fpriv_list_lock);
557 		dev_err(hdev->dev, "can't scrub dram, context exist\n");
558 		return -EBUSY;
559 	}
560 	hdev->is_in_dram_scrub = true;
561 	mutex_unlock(&hdev->fpriv_list_lock);
562 
563 	rc = hdev->asic_funcs->scrub_device_dram(hdev, val);
564 
565 	mutex_lock(&hdev->fpriv_list_lock);
566 	hdev->is_in_dram_scrub = false;
567 	mutex_unlock(&hdev->fpriv_list_lock);
568 
569 	if (rc)
570 		return rc;
571 	return count;
572 }
573 
hl_is_device_va(struct hl_device * hdev,u64 addr)574 static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
575 {
576 	struct asic_fixed_properties *prop = &hdev->asic_prop;
577 
578 	if (!hdev->mmu_enable)
579 		goto out;
580 
581 	if (prop->dram_supports_virtual_memory &&
582 		(addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
583 		return true;
584 
585 	if (addr >= prop->pmmu.start_addr &&
586 		addr < prop->pmmu.end_addr)
587 		return true;
588 
589 	if (addr >= prop->pmmu_huge.start_addr &&
590 		addr < prop->pmmu_huge.end_addr)
591 		return true;
592 out:
593 	return false;
594 }
595 
hl_is_device_internal_memory_va(struct hl_device * hdev,u64 addr,u32 size)596 static bool hl_is_device_internal_memory_va(struct hl_device *hdev, u64 addr,
597 						u32 size)
598 {
599 	struct asic_fixed_properties *prop = &hdev->asic_prop;
600 	u64 dram_start_addr, dram_end_addr;
601 
602 	if (!hdev->mmu_enable)
603 		return false;
604 
605 	if (prop->dram_supports_virtual_memory) {
606 		dram_start_addr = prop->dmmu.start_addr;
607 		dram_end_addr = prop->dmmu.end_addr;
608 	} else {
609 		dram_start_addr = prop->dram_base_address;
610 		dram_end_addr = prop->dram_end_address;
611 	}
612 
613 	if (hl_mem_area_inside_range(addr, size, dram_start_addr,
614 					dram_end_addr))
615 		return true;
616 
617 	if (hl_mem_area_inside_range(addr, size, prop->sram_base_address,
618 					prop->sram_end_address))
619 		return true;
620 
621 	return false;
622 }
623 
device_va_to_pa(struct hl_device * hdev,u64 virt_addr,u32 size,u64 * phys_addr)624 static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
625 			u64 *phys_addr)
626 {
627 	struct hl_vm_phys_pg_pack *phys_pg_pack;
628 	struct hl_ctx *ctx;
629 	struct hl_vm_hash_node *hnode;
630 	u64 end_address, range_size;
631 	struct hl_userptr *userptr;
632 	enum vm_type *vm_type;
633 	bool valid = false;
634 	int i, rc = 0;
635 
636 	ctx = hl_get_compute_ctx(hdev);
637 
638 	if (!ctx) {
639 		dev_err(hdev->dev, "no ctx available\n");
640 		return -EINVAL;
641 	}
642 
643 	/* Verify address is mapped */
644 	mutex_lock(&ctx->mem_hash_lock);
645 	hash_for_each(ctx->mem_hash, i, hnode, node) {
646 		vm_type = hnode->ptr;
647 
648 		if (*vm_type == VM_TYPE_USERPTR) {
649 			userptr = hnode->ptr;
650 			range_size = userptr->size;
651 		} else {
652 			phys_pg_pack = hnode->ptr;
653 			range_size = phys_pg_pack->total_size;
654 		}
655 
656 		end_address = virt_addr + size;
657 		if ((virt_addr >= hnode->vaddr) &&
658 				(end_address <= hnode->vaddr + range_size)) {
659 			valid = true;
660 			break;
661 		}
662 	}
663 	mutex_unlock(&ctx->mem_hash_lock);
664 
665 	if (!valid) {
666 		dev_err(hdev->dev,
667 			"virt addr 0x%llx is not mapped\n",
668 			virt_addr);
669 		return -EINVAL;
670 	}
671 
672 	rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
673 	if (rc) {
674 		dev_err(hdev->dev,
675 			"virt addr 0x%llx is not mapped to phys addr\n",
676 			virt_addr);
677 		rc = -EINVAL;
678 	}
679 
680 	return rc;
681 }
682 
hl_access_dev_mem_by_region(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type,bool * found)683 static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
684 		u64 *val, enum debugfs_access_type acc_type, bool *found)
685 {
686 	size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
687 		sizeof(u64) : sizeof(u32);
688 	struct pci_mem_region *mem_reg;
689 	int i;
690 
691 	for (i = 0; i < PCI_REGION_NUMBER; i++) {
692 		mem_reg = &hdev->pci_mem_region[i];
693 		if (!mem_reg->used)
694 			continue;
695 		if (addr >= mem_reg->region_base &&
696 			addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
697 			*found = true;
698 			return hdev->asic_funcs->access_dev_mem(hdev, mem_reg, i,
699 				addr, val, acc_type);
700 		}
701 	}
702 	return 0;
703 }
704 
hl_access_host_mem(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type)705 static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
706 		enum debugfs_access_type acc_type)
707 {
708 	struct asic_fixed_properties *prop = &hdev->asic_prop;
709 	u64 offset = prop->device_dma_offset_for_host_access;
710 
711 	switch (acc_type) {
712 	case DEBUGFS_READ32:
713 		*val = *(u32 *) phys_to_virt(addr - offset);
714 		break;
715 	case DEBUGFS_WRITE32:
716 		*(u32 *) phys_to_virt(addr - offset) = *val;
717 		break;
718 	case DEBUGFS_READ64:
719 		*val = *(u64 *) phys_to_virt(addr - offset);
720 		break;
721 	case DEBUGFS_WRITE64:
722 		*(u64 *) phys_to_virt(addr - offset) = *val;
723 		break;
724 	default:
725 		dev_err(hdev->dev, "hostmem access-type %d id not supported\n", acc_type);
726 		break;
727 	}
728 }
729 
hl_access_mem(struct hl_device * hdev,u64 addr,u64 * val,enum debugfs_access_type acc_type)730 static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
731 	enum debugfs_access_type acc_type)
732 {
733 	size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
734 		sizeof(u64) : sizeof(u32);
735 	u64 host_start = hdev->asic_prop.host_base_address;
736 	u64 host_end = hdev->asic_prop.host_end_address;
737 	bool user_address, found = false;
738 	int rc;
739 
740 	user_address = hl_is_device_va(hdev, addr);
741 	if (user_address) {
742 		rc = device_va_to_pa(hdev, addr, acc_size, &addr);
743 		if (rc)
744 			return rc;
745 	}
746 
747 	rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
748 	if (rc) {
749 		dev_err(hdev->dev,
750 			"Failed reading addr %#llx from dev mem (%d)\n",
751 			addr, rc);
752 		return rc;
753 	}
754 
755 	if (found)
756 		return 0;
757 
758 	if (!user_address || device_iommu_mapped(&hdev->pdev->dev)) {
759 		rc = -EINVAL;
760 		goto err;
761 	}
762 
763 	if (addr >= host_start && addr <= host_end - acc_size) {
764 		hl_access_host_mem(hdev, addr, val, acc_type);
765 	} else {
766 		rc = -EINVAL;
767 		goto err;
768 	}
769 
770 	return 0;
771 err:
772 	dev_err(hdev->dev, "invalid addr %#llx\n", addr);
773 	return rc;
774 }
775 
hl_data_read32(struct file * f,char __user * buf,size_t count,loff_t * ppos)776 static ssize_t hl_data_read32(struct file *f, char __user *buf,
777 					size_t count, loff_t *ppos)
778 {
779 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
780 	struct hl_device *hdev = entry->hdev;
781 	u64 value64, addr = entry->addr;
782 	char tmp_buf[32];
783 	ssize_t rc;
784 	u32 val;
785 
786 	if (hdev->reset_info.in_reset) {
787 		dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
788 		return 0;
789 	}
790 
791 	if (*ppos)
792 		return 0;
793 
794 	rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_READ32);
795 	if (rc)
796 		return rc;
797 
798 	val = value64; /* downcast back to 32 */
799 
800 	sprintf(tmp_buf, "0x%08x\n", val);
801 	return simple_read_from_buffer(buf, count, ppos, tmp_buf,
802 			strlen(tmp_buf));
803 }
804 
hl_data_write32(struct file * f,const char __user * buf,size_t count,loff_t * ppos)805 static ssize_t hl_data_write32(struct file *f, const char __user *buf,
806 					size_t count, loff_t *ppos)
807 {
808 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
809 	struct hl_device *hdev = entry->hdev;
810 	u64 value64, addr = entry->addr;
811 	u32 value;
812 	ssize_t rc;
813 
814 	if (hdev->reset_info.in_reset) {
815 		dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
816 		return 0;
817 	}
818 
819 	rc = kstrtouint_from_user(buf, count, 16, &value);
820 	if (rc)
821 		return rc;
822 
823 	value64 = value;
824 	rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_WRITE32);
825 	if (rc)
826 		return rc;
827 
828 	return count;
829 }
830 
hl_data_read64(struct file * f,char __user * buf,size_t count,loff_t * ppos)831 static ssize_t hl_data_read64(struct file *f, char __user *buf,
832 					size_t count, loff_t *ppos)
833 {
834 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
835 	struct hl_device *hdev = entry->hdev;
836 	u64 addr = entry->addr;
837 	char tmp_buf[32];
838 	ssize_t rc;
839 	u64 val;
840 
841 	if (hdev->reset_info.in_reset) {
842 		dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
843 		return 0;
844 	}
845 
846 	if (*ppos)
847 		return 0;
848 
849 	rc = hl_access_mem(hdev, addr, &val, DEBUGFS_READ64);
850 	if (rc)
851 		return rc;
852 
853 	sprintf(tmp_buf, "0x%016llx\n", val);
854 	return simple_read_from_buffer(buf, count, ppos, tmp_buf,
855 			strlen(tmp_buf));
856 }
857 
hl_data_write64(struct file * f,const char __user * buf,size_t count,loff_t * ppos)858 static ssize_t hl_data_write64(struct file *f, const char __user *buf,
859 					size_t count, loff_t *ppos)
860 {
861 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
862 	struct hl_device *hdev = entry->hdev;
863 	u64 addr = entry->addr;
864 	u64 value;
865 	ssize_t rc;
866 
867 	if (hdev->reset_info.in_reset) {
868 		dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
869 		return 0;
870 	}
871 
872 	rc = kstrtoull_from_user(buf, count, 16, &value);
873 	if (rc)
874 		return rc;
875 
876 	rc = hl_access_mem(hdev, addr, &value, DEBUGFS_WRITE64);
877 	if (rc)
878 		return rc;
879 
880 	return count;
881 }
882 
hl_dma_size_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)883 static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
884 					size_t count, loff_t *ppos)
885 {
886 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
887 	struct hl_device *hdev = entry->hdev;
888 	u64 addr = entry->addr;
889 	ssize_t rc;
890 	u32 size;
891 
892 	if (hdev->reset_info.in_reset) {
893 		dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
894 		return 0;
895 	}
896 	rc = kstrtouint_from_user(buf, count, 16, &size);
897 	if (rc)
898 		return rc;
899 
900 	if (!size) {
901 		dev_err(hdev->dev, "DMA read failed. size can't be 0\n");
902 		return -EINVAL;
903 	}
904 
905 	if (size > SZ_128M) {
906 		dev_err(hdev->dev,
907 			"DMA read failed. size can't be larger than 128MB\n");
908 		return -EINVAL;
909 	}
910 
911 	if (!hl_is_device_internal_memory_va(hdev, addr, size)) {
912 		dev_err(hdev->dev,
913 			"DMA read failed. Invalid 0x%010llx + 0x%08x\n",
914 			addr, size);
915 		return -EINVAL;
916 	}
917 
918 	/* Free the previous allocation, if there was any */
919 	entry->data_dma_blob_desc.size = 0;
920 	vfree(entry->data_dma_blob_desc.data);
921 
922 	entry->data_dma_blob_desc.data = vmalloc(size);
923 	if (!entry->data_dma_blob_desc.data)
924 		return -ENOMEM;
925 
926 	rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
927 						entry->data_dma_blob_desc.data);
928 	if (rc) {
929 		dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
930 		vfree(entry->data_dma_blob_desc.data);
931 		entry->data_dma_blob_desc.data = NULL;
932 		return -EIO;
933 	}
934 
935 	entry->data_dma_blob_desc.size = size;
936 
937 	return count;
938 }
939 
hl_monitor_dump_trigger(struct file * f,const char __user * buf,size_t count,loff_t * ppos)940 static ssize_t hl_monitor_dump_trigger(struct file *f, const char __user *buf,
941 					size_t count, loff_t *ppos)
942 {
943 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
944 	struct hl_device *hdev = entry->hdev;
945 	u32 size, trig;
946 	ssize_t rc;
947 
948 	if (hdev->reset_info.in_reset) {
949 		dev_warn_ratelimited(hdev->dev, "Can't dump monitors during reset\n");
950 		return 0;
951 	}
952 	rc = kstrtouint_from_user(buf, count, 10, &trig);
953 	if (rc)
954 		return rc;
955 
956 	if (trig != 1) {
957 		dev_err(hdev->dev, "Must write 1 to trigger monitor dump\n");
958 		return -EINVAL;
959 	}
960 
961 	size = sizeof(struct cpucp_monitor_dump);
962 
963 	/* Free the previous allocation, if there was any */
964 	entry->mon_dump_blob_desc.size = 0;
965 	vfree(entry->mon_dump_blob_desc.data);
966 
967 	entry->mon_dump_blob_desc.data = vmalloc(size);
968 	if (!entry->mon_dump_blob_desc.data)
969 		return -ENOMEM;
970 
971 	rc = hdev->asic_funcs->get_monitor_dump(hdev, entry->mon_dump_blob_desc.data);
972 	if (rc) {
973 		dev_err(hdev->dev, "Failed to dump monitors\n");
974 		vfree(entry->mon_dump_blob_desc.data);
975 		entry->mon_dump_blob_desc.data = NULL;
976 		return -EIO;
977 	}
978 
979 	entry->mon_dump_blob_desc.size = size;
980 
981 	return count;
982 }
983 
hl_get_power_state(struct file * f,char __user * buf,size_t count,loff_t * ppos)984 static ssize_t hl_get_power_state(struct file *f, char __user *buf,
985 		size_t count, loff_t *ppos)
986 {
987 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
988 	struct hl_device *hdev = entry->hdev;
989 	char tmp_buf[200];
990 	int i;
991 
992 	if (*ppos)
993 		return 0;
994 
995 	if (hdev->pdev->current_state == PCI_D0)
996 		i = 1;
997 	else if (hdev->pdev->current_state == PCI_D3hot)
998 		i = 2;
999 	else
1000 		i = 3;
1001 
1002 	sprintf(tmp_buf,
1003 		"current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
1004 	return simple_read_from_buffer(buf, count, ppos, tmp_buf,
1005 			strlen(tmp_buf));
1006 }
1007 
hl_set_power_state(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1008 static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
1009 					size_t count, loff_t *ppos)
1010 {
1011 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1012 	struct hl_device *hdev = entry->hdev;
1013 	u32 value;
1014 	ssize_t rc;
1015 
1016 	rc = kstrtouint_from_user(buf, count, 10, &value);
1017 	if (rc)
1018 		return rc;
1019 
1020 	if (value == 1) {
1021 		pci_set_power_state(hdev->pdev, PCI_D0);
1022 		pci_restore_state(hdev->pdev);
1023 		rc = pci_enable_device(hdev->pdev);
1024 		if (rc < 0)
1025 			return rc;
1026 	} else if (value == 2) {
1027 		pci_save_state(hdev->pdev);
1028 		pci_disable_device(hdev->pdev);
1029 		pci_set_power_state(hdev->pdev, PCI_D3hot);
1030 	} else {
1031 		dev_dbg(hdev->dev, "invalid power state value %u\n", value);
1032 		return -EINVAL;
1033 	}
1034 
1035 	return count;
1036 }
1037 
hl_i2c_data_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1038 static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
1039 					size_t count, loff_t *ppos)
1040 {
1041 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1042 	struct hl_device *hdev = entry->hdev;
1043 	char tmp_buf[32];
1044 	u64 val;
1045 	ssize_t rc;
1046 
1047 	if (*ppos)
1048 		return 0;
1049 
1050 	rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
1051 			entry->i2c_reg, entry->i2c_len, &val);
1052 	if (rc) {
1053 		dev_err(hdev->dev,
1054 			"Failed to read from I2C bus %d, addr %d, reg %d, len %d\n",
1055 			entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
1056 		return rc;
1057 	}
1058 
1059 	sprintf(tmp_buf, "%#02llx\n", val);
1060 	rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
1061 			strlen(tmp_buf));
1062 
1063 	return rc;
1064 }
1065 
hl_i2c_data_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1066 static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
1067 					size_t count, loff_t *ppos)
1068 {
1069 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1070 	struct hl_device *hdev = entry->hdev;
1071 	u64 value;
1072 	ssize_t rc;
1073 
1074 	rc = kstrtou64_from_user(buf, count, 16, &value);
1075 	if (rc)
1076 		return rc;
1077 
1078 	rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
1079 			entry->i2c_reg, entry->i2c_len, value);
1080 	if (rc) {
1081 		dev_err(hdev->dev,
1082 			"Failed to write %#02llx to I2C bus %d, addr %d, reg %d, len %d\n",
1083 			value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
1084 		return rc;
1085 	}
1086 
1087 	return count;
1088 }
1089 
hl_led0_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1090 static ssize_t hl_led0_write(struct file *f, const char __user *buf,
1091 					size_t count, loff_t *ppos)
1092 {
1093 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1094 	struct hl_device *hdev = entry->hdev;
1095 	u32 value;
1096 	ssize_t rc;
1097 
1098 	rc = kstrtouint_from_user(buf, count, 10, &value);
1099 	if (rc)
1100 		return rc;
1101 
1102 	value = value ? 1 : 0;
1103 
1104 	hl_debugfs_led_set(hdev, 0, value);
1105 
1106 	return count;
1107 }
1108 
hl_led1_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1109 static ssize_t hl_led1_write(struct file *f, const char __user *buf,
1110 					size_t count, loff_t *ppos)
1111 {
1112 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1113 	struct hl_device *hdev = entry->hdev;
1114 	u32 value;
1115 	ssize_t rc;
1116 
1117 	rc = kstrtouint_from_user(buf, count, 10, &value);
1118 	if (rc)
1119 		return rc;
1120 
1121 	value = value ? 1 : 0;
1122 
1123 	hl_debugfs_led_set(hdev, 1, value);
1124 
1125 	return count;
1126 }
1127 
hl_led2_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1128 static ssize_t hl_led2_write(struct file *f, const char __user *buf,
1129 					size_t count, loff_t *ppos)
1130 {
1131 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1132 	struct hl_device *hdev = entry->hdev;
1133 	u32 value;
1134 	ssize_t rc;
1135 
1136 	rc = kstrtouint_from_user(buf, count, 10, &value);
1137 	if (rc)
1138 		return rc;
1139 
1140 	value = value ? 1 : 0;
1141 
1142 	hl_debugfs_led_set(hdev, 2, value);
1143 
1144 	return count;
1145 }
1146 
hl_device_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1147 static ssize_t hl_device_read(struct file *f, char __user *buf,
1148 					size_t count, loff_t *ppos)
1149 {
1150 	static const char *help =
1151 		"Valid values: disable, enable, suspend, resume, cpu_timeout\n";
1152 	return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
1153 }
1154 
hl_device_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1155 static ssize_t hl_device_write(struct file *f, const char __user *buf,
1156 				     size_t count, loff_t *ppos)
1157 {
1158 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1159 	struct hl_device *hdev = entry->hdev;
1160 	char data[30] = {0};
1161 
1162 	/* don't allow partial writes */
1163 	if (*ppos != 0)
1164 		return 0;
1165 
1166 	simple_write_to_buffer(data, 29, ppos, buf, count);
1167 
1168 	if (strncmp("disable", data, strlen("disable")) == 0) {
1169 		hdev->disabled = true;
1170 	} else if (strncmp("enable", data, strlen("enable")) == 0) {
1171 		hdev->disabled = false;
1172 	} else if (strncmp("suspend", data, strlen("suspend")) == 0) {
1173 		hdev->asic_funcs->suspend(hdev);
1174 	} else if (strncmp("resume", data, strlen("resume")) == 0) {
1175 		hdev->asic_funcs->resume(hdev);
1176 	} else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
1177 		hdev->device_cpu_disabled = true;
1178 	} else {
1179 		dev_err(hdev->dev,
1180 			"Valid values: disable, enable, suspend, resume, cpu_timeout\n");
1181 		count = -EINVAL;
1182 	}
1183 
1184 	return count;
1185 }
1186 
hl_clk_gate_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1187 static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
1188 					size_t count, loff_t *ppos)
1189 {
1190 	return 0;
1191 }
1192 
hl_clk_gate_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1193 static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
1194 				     size_t count, loff_t *ppos)
1195 {
1196 	return count;
1197 }
1198 
hl_stop_on_err_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1199 static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
1200 					size_t count, loff_t *ppos)
1201 {
1202 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1203 	struct hl_device *hdev = entry->hdev;
1204 	char tmp_buf[200];
1205 	ssize_t rc;
1206 
1207 	if (!hdev->asic_prop.configurable_stop_on_err)
1208 		return -EOPNOTSUPP;
1209 
1210 	if (*ppos)
1211 		return 0;
1212 
1213 	sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
1214 	rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
1215 			strlen(tmp_buf) + 1);
1216 
1217 	return rc;
1218 }
1219 
hl_stop_on_err_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1220 static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
1221 				     size_t count, loff_t *ppos)
1222 {
1223 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1224 	struct hl_device *hdev = entry->hdev;
1225 	u32 value;
1226 	ssize_t rc;
1227 
1228 	if (!hdev->asic_prop.configurable_stop_on_err)
1229 		return -EOPNOTSUPP;
1230 
1231 	if (hdev->reset_info.in_reset) {
1232 		dev_warn_ratelimited(hdev->dev,
1233 				"Can't change stop on error during reset\n");
1234 		return 0;
1235 	}
1236 
1237 	rc = kstrtouint_from_user(buf, count, 10, &value);
1238 	if (rc)
1239 		return rc;
1240 
1241 	hdev->stop_on_err = value ? 1 : 0;
1242 
1243 	hl_device_reset(hdev, 0);
1244 
1245 	return count;
1246 }
1247 
hl_security_violations_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1248 static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
1249 					size_t count, loff_t *ppos)
1250 {
1251 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1252 	struct hl_device *hdev = entry->hdev;
1253 
1254 	hdev->asic_funcs->ack_protection_bits_errors(hdev);
1255 
1256 	return 0;
1257 }
1258 
hl_state_dump_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1259 static ssize_t hl_state_dump_read(struct file *f, char __user *buf,
1260 					size_t count, loff_t *ppos)
1261 {
1262 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1263 	ssize_t rc;
1264 
1265 	down_read(&entry->state_dump_sem);
1266 	if (!entry->state_dump[entry->state_dump_head])
1267 		rc = 0;
1268 	else
1269 		rc = simple_read_from_buffer(
1270 			buf, count, ppos,
1271 			entry->state_dump[entry->state_dump_head],
1272 			strlen(entry->state_dump[entry->state_dump_head]));
1273 	up_read(&entry->state_dump_sem);
1274 
1275 	return rc;
1276 }
1277 
hl_state_dump_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1278 static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
1279 					size_t count, loff_t *ppos)
1280 {
1281 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1282 	struct hl_device *hdev = entry->hdev;
1283 	ssize_t rc;
1284 	u32 size;
1285 	int i;
1286 
1287 	rc = kstrtouint_from_user(buf, count, 10, &size);
1288 	if (rc)
1289 		return rc;
1290 
1291 	if (size <= 0 || size >= ARRAY_SIZE(entry->state_dump)) {
1292 		dev_err(hdev->dev, "Invalid number of dumps to skip\n");
1293 		return -EINVAL;
1294 	}
1295 
1296 	if (entry->state_dump[entry->state_dump_head]) {
1297 		down_write(&entry->state_dump_sem);
1298 		for (i = 0; i < size; ++i) {
1299 			vfree(entry->state_dump[entry->state_dump_head]);
1300 			entry->state_dump[entry->state_dump_head] = NULL;
1301 			if (entry->state_dump_head > 0)
1302 				entry->state_dump_head--;
1303 			else
1304 				entry->state_dump_head =
1305 					ARRAY_SIZE(entry->state_dump) - 1;
1306 		}
1307 		up_write(&entry->state_dump_sem);
1308 	}
1309 
1310 	return count;
1311 }
1312 
hl_timeout_locked_read(struct file * f,char __user * buf,size_t count,loff_t * ppos)1313 static ssize_t hl_timeout_locked_read(struct file *f, char __user *buf,
1314 					size_t count, loff_t *ppos)
1315 {
1316 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1317 	struct hl_device *hdev = entry->hdev;
1318 	char tmp_buf[200];
1319 	ssize_t rc;
1320 
1321 	if (*ppos)
1322 		return 0;
1323 
1324 	sprintf(tmp_buf, "%d\n",
1325 		jiffies_to_msecs(hdev->timeout_jiffies) / 1000);
1326 	rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
1327 			strlen(tmp_buf) + 1);
1328 
1329 	return rc;
1330 }
1331 
hl_timeout_locked_write(struct file * f,const char __user * buf,size_t count,loff_t * ppos)1332 static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
1333 				     size_t count, loff_t *ppos)
1334 {
1335 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1336 	struct hl_device *hdev = entry->hdev;
1337 	u32 value;
1338 	ssize_t rc;
1339 
1340 	rc = kstrtouint_from_user(buf, count, 10, &value);
1341 	if (rc)
1342 		return rc;
1343 
1344 	if (value)
1345 		hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
1346 	else
1347 		hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
1348 
1349 	return count;
1350 }
1351 
1352 static const struct file_operations hl_mem_scrub_fops = {
1353 	.owner = THIS_MODULE,
1354 	.write = hl_memory_scrub,
1355 };
1356 
1357 static const struct file_operations hl_data32b_fops = {
1358 	.owner = THIS_MODULE,
1359 	.read = hl_data_read32,
1360 	.write = hl_data_write32
1361 };
1362 
1363 static const struct file_operations hl_data64b_fops = {
1364 	.owner = THIS_MODULE,
1365 	.read = hl_data_read64,
1366 	.write = hl_data_write64
1367 };
1368 
1369 static const struct file_operations hl_dma_size_fops = {
1370 	.owner = THIS_MODULE,
1371 	.write = hl_dma_size_write
1372 };
1373 
1374 static const struct file_operations hl_monitor_dump_fops = {
1375 	.owner = THIS_MODULE,
1376 	.write = hl_monitor_dump_trigger
1377 };
1378 
1379 static const struct file_operations hl_i2c_data_fops = {
1380 	.owner = THIS_MODULE,
1381 	.read = hl_i2c_data_read,
1382 	.write = hl_i2c_data_write
1383 };
1384 
1385 static const struct file_operations hl_power_fops = {
1386 	.owner = THIS_MODULE,
1387 	.read = hl_get_power_state,
1388 	.write = hl_set_power_state
1389 };
1390 
1391 static const struct file_operations hl_led0_fops = {
1392 	.owner = THIS_MODULE,
1393 	.write = hl_led0_write
1394 };
1395 
1396 static const struct file_operations hl_led1_fops = {
1397 	.owner = THIS_MODULE,
1398 	.write = hl_led1_write
1399 };
1400 
1401 static const struct file_operations hl_led2_fops = {
1402 	.owner = THIS_MODULE,
1403 	.write = hl_led2_write
1404 };
1405 
1406 static const struct file_operations hl_device_fops = {
1407 	.owner = THIS_MODULE,
1408 	.read = hl_device_read,
1409 	.write = hl_device_write
1410 };
1411 
1412 static const struct file_operations hl_clk_gate_fops = {
1413 	.owner = THIS_MODULE,
1414 	.read = hl_clk_gate_read,
1415 	.write = hl_clk_gate_write
1416 };
1417 
1418 static const struct file_operations hl_stop_on_err_fops = {
1419 	.owner = THIS_MODULE,
1420 	.read = hl_stop_on_err_read,
1421 	.write = hl_stop_on_err_write
1422 };
1423 
1424 static const struct file_operations hl_security_violations_fops = {
1425 	.owner = THIS_MODULE,
1426 	.read = hl_security_violations_read
1427 };
1428 
1429 static const struct file_operations hl_state_dump_fops = {
1430 	.owner = THIS_MODULE,
1431 	.read = hl_state_dump_read,
1432 	.write = hl_state_dump_write
1433 };
1434 
1435 static const struct file_operations hl_timeout_locked_fops = {
1436 	.owner = THIS_MODULE,
1437 	.read = hl_timeout_locked_read,
1438 	.write = hl_timeout_locked_write
1439 };
1440 
1441 static const struct hl_info_list hl_debugfs_list[] = {
1442 	{"command_buffers", command_buffers_show, NULL},
1443 	{"command_submission", command_submission_show, NULL},
1444 	{"command_submission_jobs", command_submission_jobs_show, NULL},
1445 	{"userptr", userptr_show, NULL},
1446 	{"vm", vm_show, NULL},
1447 	{"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
1448 	{"mmu", mmu_show, mmu_asid_va_write},
1449 	{"engines", engines_show, NULL}
1450 };
1451 
hl_debugfs_open(struct inode * inode,struct file * file)1452 static int hl_debugfs_open(struct inode *inode, struct file *file)
1453 {
1454 	struct hl_debugfs_entry *node = inode->i_private;
1455 
1456 	return single_open(file, node->info_ent->show, node);
1457 }
1458 
hl_debugfs_write(struct file * file,const char __user * buf,size_t count,loff_t * f_pos)1459 static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
1460 		size_t count, loff_t *f_pos)
1461 {
1462 	struct hl_debugfs_entry *node = file->f_inode->i_private;
1463 
1464 	if (node->info_ent->write)
1465 		return node->info_ent->write(file, buf, count, f_pos);
1466 	else
1467 		return -EINVAL;
1468 
1469 }
1470 
1471 static const struct file_operations hl_debugfs_fops = {
1472 	.owner = THIS_MODULE,
1473 	.open = hl_debugfs_open,
1474 	.read = seq_read,
1475 	.write = hl_debugfs_write,
1476 	.llseek = seq_lseek,
1477 	.release = single_release,
1478 };
1479 
hl_debugfs_add_device(struct hl_device * hdev)1480 void hl_debugfs_add_device(struct hl_device *hdev)
1481 {
1482 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1483 	int count = ARRAY_SIZE(hl_debugfs_list);
1484 	struct hl_debugfs_entry *entry;
1485 	int i;
1486 
1487 	dev_entry->hdev = hdev;
1488 	dev_entry->entry_arr = kmalloc_array(count,
1489 					sizeof(struct hl_debugfs_entry),
1490 					GFP_KERNEL);
1491 	if (!dev_entry->entry_arr)
1492 		return;
1493 
1494 	dev_entry->data_dma_blob_desc.size = 0;
1495 	dev_entry->data_dma_blob_desc.data = NULL;
1496 	dev_entry->mon_dump_blob_desc.size = 0;
1497 	dev_entry->mon_dump_blob_desc.data = NULL;
1498 
1499 	INIT_LIST_HEAD(&dev_entry->file_list);
1500 	INIT_LIST_HEAD(&dev_entry->cb_list);
1501 	INIT_LIST_HEAD(&dev_entry->cs_list);
1502 	INIT_LIST_HEAD(&dev_entry->cs_job_list);
1503 	INIT_LIST_HEAD(&dev_entry->userptr_list);
1504 	INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
1505 	mutex_init(&dev_entry->file_mutex);
1506 	init_rwsem(&dev_entry->state_dump_sem);
1507 	spin_lock_init(&dev_entry->cb_spinlock);
1508 	spin_lock_init(&dev_entry->cs_spinlock);
1509 	spin_lock_init(&dev_entry->cs_job_spinlock);
1510 	spin_lock_init(&dev_entry->userptr_spinlock);
1511 	spin_lock_init(&dev_entry->ctx_mem_hash_spinlock);
1512 
1513 	dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
1514 						hl_debug_root);
1515 
1516 	debugfs_create_x64("memory_scrub_val",
1517 				0644,
1518 				dev_entry->root,
1519 				&dev_entry->memory_scrub_val);
1520 
1521 	debugfs_create_file("memory_scrub",
1522 				0200,
1523 				dev_entry->root,
1524 				dev_entry,
1525 				&hl_mem_scrub_fops);
1526 
1527 	debugfs_create_x64("addr",
1528 				0644,
1529 				dev_entry->root,
1530 				&dev_entry->addr);
1531 
1532 	debugfs_create_file("data32",
1533 				0644,
1534 				dev_entry->root,
1535 				dev_entry,
1536 				&hl_data32b_fops);
1537 
1538 	debugfs_create_file("data64",
1539 				0644,
1540 				dev_entry->root,
1541 				dev_entry,
1542 				&hl_data64b_fops);
1543 
1544 	debugfs_create_file("set_power_state",
1545 				0200,
1546 				dev_entry->root,
1547 				dev_entry,
1548 				&hl_power_fops);
1549 
1550 	debugfs_create_u8("i2c_bus",
1551 				0644,
1552 				dev_entry->root,
1553 				&dev_entry->i2c_bus);
1554 
1555 	debugfs_create_u8("i2c_addr",
1556 				0644,
1557 				dev_entry->root,
1558 				&dev_entry->i2c_addr);
1559 
1560 	debugfs_create_u8("i2c_reg",
1561 				0644,
1562 				dev_entry->root,
1563 				&dev_entry->i2c_reg);
1564 
1565 	debugfs_create_u8("i2c_len",
1566 				0644,
1567 				dev_entry->root,
1568 				&dev_entry->i2c_len);
1569 
1570 	debugfs_create_file("i2c_data",
1571 				0644,
1572 				dev_entry->root,
1573 				dev_entry,
1574 				&hl_i2c_data_fops);
1575 
1576 	debugfs_create_file("led0",
1577 				0200,
1578 				dev_entry->root,
1579 				dev_entry,
1580 				&hl_led0_fops);
1581 
1582 	debugfs_create_file("led1",
1583 				0200,
1584 				dev_entry->root,
1585 				dev_entry,
1586 				&hl_led1_fops);
1587 
1588 	debugfs_create_file("led2",
1589 				0200,
1590 				dev_entry->root,
1591 				dev_entry,
1592 				&hl_led2_fops);
1593 
1594 	debugfs_create_file("device",
1595 				0200,
1596 				dev_entry->root,
1597 				dev_entry,
1598 				&hl_device_fops);
1599 
1600 	debugfs_create_file("clk_gate",
1601 				0200,
1602 				dev_entry->root,
1603 				dev_entry,
1604 				&hl_clk_gate_fops);
1605 
1606 	debugfs_create_file("stop_on_err",
1607 				0644,
1608 				dev_entry->root,
1609 				dev_entry,
1610 				&hl_stop_on_err_fops);
1611 
1612 	debugfs_create_file("dump_security_violations",
1613 				0644,
1614 				dev_entry->root,
1615 				dev_entry,
1616 				&hl_security_violations_fops);
1617 
1618 	debugfs_create_file("dma_size",
1619 				0200,
1620 				dev_entry->root,
1621 				dev_entry,
1622 				&hl_dma_size_fops);
1623 
1624 	debugfs_create_blob("data_dma",
1625 				0400,
1626 				dev_entry->root,
1627 				&dev_entry->data_dma_blob_desc);
1628 
1629 	debugfs_create_file("monitor_dump_trig",
1630 				0200,
1631 				dev_entry->root,
1632 				dev_entry,
1633 				&hl_monitor_dump_fops);
1634 
1635 	debugfs_create_blob("monitor_dump",
1636 				0400,
1637 				dev_entry->root,
1638 				&dev_entry->mon_dump_blob_desc);
1639 
1640 	debugfs_create_x8("skip_reset_on_timeout",
1641 				0644,
1642 				dev_entry->root,
1643 				&hdev->reset_info.skip_reset_on_timeout);
1644 
1645 	debugfs_create_file("state_dump",
1646 				0600,
1647 				dev_entry->root,
1648 				dev_entry,
1649 				&hl_state_dump_fops);
1650 
1651 	debugfs_create_file("timeout_locked",
1652 				0644,
1653 				dev_entry->root,
1654 				dev_entry,
1655 				&hl_timeout_locked_fops);
1656 
1657 	for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
1658 		debugfs_create_file(hl_debugfs_list[i].name,
1659 					0444,
1660 					dev_entry->root,
1661 					entry,
1662 					&hl_debugfs_fops);
1663 		entry->info_ent = &hl_debugfs_list[i];
1664 		entry->dev_entry = dev_entry;
1665 	}
1666 }
1667 
hl_debugfs_remove_device(struct hl_device * hdev)1668 void hl_debugfs_remove_device(struct hl_device *hdev)
1669 {
1670 	struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
1671 	int i;
1672 
1673 	debugfs_remove_recursive(entry->root);
1674 
1675 	mutex_destroy(&entry->file_mutex);
1676 
1677 	vfree(entry->data_dma_blob_desc.data);
1678 	vfree(entry->mon_dump_blob_desc.data);
1679 
1680 	for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
1681 		vfree(entry->state_dump[i]);
1682 
1683 	kfree(entry->entry_arr);
1684 }
1685 
hl_debugfs_add_file(struct hl_fpriv * hpriv)1686 void hl_debugfs_add_file(struct hl_fpriv *hpriv)
1687 {
1688 	struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1689 
1690 	mutex_lock(&dev_entry->file_mutex);
1691 	list_add(&hpriv->debugfs_list, &dev_entry->file_list);
1692 	mutex_unlock(&dev_entry->file_mutex);
1693 }
1694 
hl_debugfs_remove_file(struct hl_fpriv * hpriv)1695 void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
1696 {
1697 	struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1698 
1699 	mutex_lock(&dev_entry->file_mutex);
1700 	list_del(&hpriv->debugfs_list);
1701 	mutex_unlock(&dev_entry->file_mutex);
1702 }
1703 
hl_debugfs_add_cb(struct hl_cb * cb)1704 void hl_debugfs_add_cb(struct hl_cb *cb)
1705 {
1706 	struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1707 
1708 	spin_lock(&dev_entry->cb_spinlock);
1709 	list_add(&cb->debugfs_list, &dev_entry->cb_list);
1710 	spin_unlock(&dev_entry->cb_spinlock);
1711 }
1712 
hl_debugfs_remove_cb(struct hl_cb * cb)1713 void hl_debugfs_remove_cb(struct hl_cb *cb)
1714 {
1715 	struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1716 
1717 	spin_lock(&dev_entry->cb_spinlock);
1718 	list_del(&cb->debugfs_list);
1719 	spin_unlock(&dev_entry->cb_spinlock);
1720 }
1721 
hl_debugfs_add_cs(struct hl_cs * cs)1722 void hl_debugfs_add_cs(struct hl_cs *cs)
1723 {
1724 	struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1725 
1726 	spin_lock(&dev_entry->cs_spinlock);
1727 	list_add(&cs->debugfs_list, &dev_entry->cs_list);
1728 	spin_unlock(&dev_entry->cs_spinlock);
1729 }
1730 
hl_debugfs_remove_cs(struct hl_cs * cs)1731 void hl_debugfs_remove_cs(struct hl_cs *cs)
1732 {
1733 	struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1734 
1735 	spin_lock(&dev_entry->cs_spinlock);
1736 	list_del(&cs->debugfs_list);
1737 	spin_unlock(&dev_entry->cs_spinlock);
1738 }
1739 
hl_debugfs_add_job(struct hl_device * hdev,struct hl_cs_job * job)1740 void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
1741 {
1742 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1743 
1744 	spin_lock(&dev_entry->cs_job_spinlock);
1745 	list_add(&job->debugfs_list, &dev_entry->cs_job_list);
1746 	spin_unlock(&dev_entry->cs_job_spinlock);
1747 }
1748 
hl_debugfs_remove_job(struct hl_device * hdev,struct hl_cs_job * job)1749 void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
1750 {
1751 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1752 
1753 	spin_lock(&dev_entry->cs_job_spinlock);
1754 	list_del(&job->debugfs_list);
1755 	spin_unlock(&dev_entry->cs_job_spinlock);
1756 }
1757 
hl_debugfs_add_userptr(struct hl_device * hdev,struct hl_userptr * userptr)1758 void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
1759 {
1760 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1761 
1762 	spin_lock(&dev_entry->userptr_spinlock);
1763 	list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
1764 	spin_unlock(&dev_entry->userptr_spinlock);
1765 }
1766 
hl_debugfs_remove_userptr(struct hl_device * hdev,struct hl_userptr * userptr)1767 void hl_debugfs_remove_userptr(struct hl_device *hdev,
1768 				struct hl_userptr *userptr)
1769 {
1770 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1771 
1772 	spin_lock(&dev_entry->userptr_spinlock);
1773 	list_del(&userptr->debugfs_list);
1774 	spin_unlock(&dev_entry->userptr_spinlock);
1775 }
1776 
hl_debugfs_add_ctx_mem_hash(struct hl_device * hdev,struct hl_ctx * ctx)1777 void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1778 {
1779 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1780 
1781 	spin_lock(&dev_entry->ctx_mem_hash_spinlock);
1782 	list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
1783 	spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
1784 }
1785 
hl_debugfs_remove_ctx_mem_hash(struct hl_device * hdev,struct hl_ctx * ctx)1786 void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1787 {
1788 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1789 
1790 	spin_lock(&dev_entry->ctx_mem_hash_spinlock);
1791 	list_del(&ctx->debugfs_list);
1792 	spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
1793 }
1794 
1795 /**
1796  * hl_debugfs_set_state_dump - register state dump making it accessible via
1797  *                             debugfs
1798  * @hdev: pointer to the device structure
1799  * @data: the actual dump data
1800  * @length: the length of the data
1801  */
hl_debugfs_set_state_dump(struct hl_device * hdev,char * data,unsigned long length)1802 void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
1803 					unsigned long length)
1804 {
1805 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1806 
1807 	down_write(&dev_entry->state_dump_sem);
1808 
1809 	dev_entry->state_dump_head = (dev_entry->state_dump_head + 1) %
1810 					ARRAY_SIZE(dev_entry->state_dump);
1811 	vfree(dev_entry->state_dump[dev_entry->state_dump_head]);
1812 	dev_entry->state_dump[dev_entry->state_dump_head] = data;
1813 
1814 	up_write(&dev_entry->state_dump_sem);
1815 }
1816 
hl_debugfs_init(void)1817 void __init hl_debugfs_init(void)
1818 {
1819 	hl_debug_root = debugfs_create_dir("habanalabs", NULL);
1820 }
1821 
hl_debugfs_fini(void)1822 void hl_debugfs_fini(void)
1823 {
1824 	debugfs_remove_recursive(hl_debug_root);
1825 }
1826