1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * Copyright 2016-2022 HabanaLabs, Ltd.
4  * All Rights Reserved.
5  *
6  */
7 
8 #ifndef HABANALABSP_H_
9 #define HABANALABSP_H_
10 
11 #include "../include/common/cpucp_if.h"
12 #include "../include/common/qman_if.h"
13 #include "../include/hw_ip/mmu/mmu_general.h"
14 #include <uapi/misc/habanalabs.h>
15 
16 #include <linux/cdev.h>
17 #include <linux/iopoll.h>
18 #include <linux/irqreturn.h>
19 #include <linux/dma-direction.h>
20 #include <linux/scatterlist.h>
21 #include <linux/hashtable.h>
22 #include <linux/debugfs.h>
23 #include <linux/rwsem.h>
24 #include <linux/eventfd.h>
25 #include <linux/bitfield.h>
26 #include <linux/genalloc.h>
27 #include <linux/sched/signal.h>
28 #include <linux/io-64-nonatomic-lo-hi.h>
29 #include <linux/coresight.h>
30 #include <linux/dma-buf.h>
31 
32 #define HL_NAME				"habanalabs"
33 
34 /* Use upper bits of mmap offset to store habana driver specific information.
35  * bits[63:59] - Encode mmap type
36  * bits[45:0]  - mmap offset value
37  *
38  * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
39  *  defines are w.r.t to PAGE_SIZE
40  */
41 #define HL_MMAP_TYPE_SHIFT		(59 - PAGE_SHIFT)
42 #define HL_MMAP_TYPE_MASK		(0x1full << HL_MMAP_TYPE_SHIFT)
43 #define HL_MMAP_TYPE_TS_BUFF		(0x10ull << HL_MMAP_TYPE_SHIFT)
44 #define HL_MMAP_TYPE_BLOCK		(0x4ull << HL_MMAP_TYPE_SHIFT)
45 #define HL_MMAP_TYPE_CB			(0x2ull << HL_MMAP_TYPE_SHIFT)
46 
47 #define HL_MMAP_OFFSET_VALUE_MASK	(0x1FFFFFFFFFFFull >> PAGE_SHIFT)
48 #define HL_MMAP_OFFSET_VALUE_GET(off)	(off & HL_MMAP_OFFSET_VALUE_MASK)
49 
50 #define HL_PENDING_RESET_PER_SEC	10
51 #define HL_PENDING_RESET_MAX_TRIALS	60 /* 10 minutes */
52 #define HL_PENDING_RESET_LONG_SEC	60
53 
54 #define HL_HARD_RESET_MAX_TIMEOUT	120
55 #define HL_PLDM_HARD_RESET_MAX_TIMEOUT	(HL_HARD_RESET_MAX_TIMEOUT * 3)
56 
57 #define HL_DEVICE_TIMEOUT_USEC		1000000 /* 1 s */
58 
59 #define HL_HEARTBEAT_PER_USEC		5000000 /* 5 s */
60 
61 #define HL_PLL_LOW_JOB_FREQ_USEC	5000000 /* 5 s */
62 
63 #define HL_CPUCP_INFO_TIMEOUT_USEC	10000000 /* 10s */
64 #define HL_CPUCP_EEPROM_TIMEOUT_USEC	10000000 /* 10s */
65 #define HL_CPUCP_MON_DUMP_TIMEOUT_USEC	10000000 /* 10s */
66 
67 #define HL_FW_STATUS_POLL_INTERVAL_USEC		10000 /* 10ms */
68 #define HL_FW_COMMS_STATUS_PLDM_POLL_INTERVAL_USEC	1000000 /* 1s */
69 
70 #define HL_PCI_ELBI_TIMEOUT_MSEC	10 /* 10ms */
71 
72 #define HL_SIM_MAX_TIMEOUT_US		10000000 /* 10s */
73 
74 #define HL_COMMON_USER_INTERRUPT_ID	0xFFF
75 
76 #define HL_STATE_DUMP_HIST_LEN		5
77 
78 /* Default value for device reset trigger , an invalid value */
79 #define HL_RESET_TRIGGER_DEFAULT	0xFF
80 
81 #define OBJ_NAMES_HASH_TABLE_BITS	7 /* 1 << 7 buckets */
82 #define SYNC_TO_ENGINE_HASH_TABLE_BITS	7 /* 1 << 7 buckets */
83 
84 /* Memory */
85 #define MEM_HASH_TABLE_BITS		7 /* 1 << 7 buckets */
86 
87 /* MMU */
88 #define MMU_HASH_TABLE_BITS		7 /* 1 << 7 buckets */
89 
90 /**
91  * enum hl_mmu_page_table_locaion - mmu page table location
92  * @MMU_DR_PGT: page-table is located on device DRAM.
93  * @MMU_HR_PGT: page-table is located on host memory.
94  * @MMU_NUM_PGT_LOCATIONS: number of page-table locations currently supported.
95  */
96 enum hl_mmu_page_table_location {
97 	MMU_DR_PGT = 0,		/* device-dram-resident MMU PGT */
98 	MMU_HR_PGT,		/* host resident MMU PGT */
99 	MMU_NUM_PGT_LOCATIONS	/* num of PGT locations */
100 };
101 
102 /*
103  * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
104  * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
105  */
106 #define HL_RSVD_SOBS			2
107 #define HL_RSVD_MONS			1
108 
109 /*
110  * HL_COLLECTIVE_RSVD_MSTR_MONS 'collective' reserved monitors per QMAN stream
111  */
112 #define HL_COLLECTIVE_RSVD_MSTR_MONS	2
113 
114 #define HL_MAX_SOB_VAL			(1 << 15)
115 
116 #define IS_POWER_OF_2(n)		(n != 0 && ((n & (n - 1)) == 0))
117 #define IS_MAX_PENDING_CS_VALID(n)	(IS_POWER_OF_2(n) && (n > 1))
118 
119 #define HL_PCI_NUM_BARS			6
120 
121 #define HL_MAX_DCORES			4
122 
123 /*
124  * Reset Flags
125  *
126  * - HL_DRV_RESET_HARD
127  *       If set do hard reset to all engines. If not set reset just
128  *       compute/DMA engines.
129  *
130  * - HL_DRV_RESET_FROM_RESET_THR
131  *       Set if the caller is the hard-reset thread
132  *
133  * - HL_DRV_RESET_HEARTBEAT
134  *       Set if reset is due to heartbeat
135  *
136  * - HL_DRV_RESET_TDR
137  *       Set if reset is due to TDR
138  *
139  * - HL_DRV_RESET_DEV_RELEASE
140  *       Set if reset is due to device release
141  *
142  * - HL_DRV_RESET_BYPASS_REQ_TO_FW
143  *       F/W will perform the reset. No need to ask it to reset the device. This is relevant
144  *       only when running with secured f/w
145  *
146  * - HL_DRV_RESET_FW_FATAL_ERR
147  *       Set if reset is due to a fatal error from FW
148  *
149  * - HL_DRV_RESET_DELAY
150  *       Set if a delay should be added before the reset
151  */
152 
153 #define HL_DRV_RESET_HARD		(1 << 0)
154 #define HL_DRV_RESET_FROM_RESET_THR	(1 << 1)
155 #define HL_DRV_RESET_HEARTBEAT		(1 << 2)
156 #define HL_DRV_RESET_TDR		(1 << 3)
157 #define HL_DRV_RESET_DEV_RELEASE	(1 << 4)
158 #define HL_DRV_RESET_BYPASS_REQ_TO_FW	(1 << 5)
159 #define HL_DRV_RESET_FW_FATAL_ERR	(1 << 6)
160 #define HL_DRV_RESET_DELAY		(1 << 7)
161 
162 #define HL_MAX_SOBS_PER_MONITOR	8
163 
164 /**
165  * struct hl_gen_wait_properties - properties for generating a wait CB
166  * @data: command buffer
167  * @q_idx: queue id is used to extract fence register address
168  * @size: offset in command buffer
169  * @sob_base: SOB base to use in this wait CB
170  * @sob_val: SOB value to wait for
171  * @mon_id: monitor to use in this wait CB
172  * @sob_mask: each bit represents a SOB offset from sob_base to be used
173  */
174 struct hl_gen_wait_properties {
175 	void	*data;
176 	u32	q_idx;
177 	u32	size;
178 	u16	sob_base;
179 	u16	sob_val;
180 	u16	mon_id;
181 	u8	sob_mask;
182 };
183 
184 /**
185  * struct pgt_info - MMU hop page info.
186  * @node: hash linked-list node for the pgts shadow hash of pgts.
187  * @phys_addr: physical address of the pgt.
188  * @shadow_addr: shadow hop in the host.
189  * @ctx: pointer to the owner ctx.
190  * @num_of_ptes: indicates how many ptes are used in the pgt.
191  *
192  * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
193  * is needed during mapping, a new page is allocated and this structure holds
194  * its essential information. During unmapping, if no valid PTEs remained in the
195  * page, it is freed with its pgt_info structure.
196  */
197 struct pgt_info {
198 	struct hlist_node	node;
199 	u64			phys_addr;
200 	u64			shadow_addr;
201 	struct hl_ctx		*ctx;
202 	int			num_of_ptes;
203 };
204 
205 struct hl_device;
206 struct hl_fpriv;
207 
208 /**
209  * enum hl_pci_match_mode - pci match mode per region
210  * @PCI_ADDRESS_MATCH_MODE: address match mode
211  * @PCI_BAR_MATCH_MODE: bar match mode
212  */
213 enum hl_pci_match_mode {
214 	PCI_ADDRESS_MATCH_MODE,
215 	PCI_BAR_MATCH_MODE
216 };
217 
218 /**
219  * enum hl_fw_component - F/W components to read version through registers.
220  * @FW_COMP_BOOT_FIT: boot fit.
221  * @FW_COMP_PREBOOT: preboot.
222  * @FW_COMP_LINUX: linux.
223  */
224 enum hl_fw_component {
225 	FW_COMP_BOOT_FIT,
226 	FW_COMP_PREBOOT,
227 	FW_COMP_LINUX,
228 };
229 
230 /**
231  * enum hl_fw_types - F/W types present in the system
232  * @FW_TYPE_NONE: no FW component indication
233  * @FW_TYPE_LINUX: Linux image for device CPU
234  * @FW_TYPE_BOOT_CPU: Boot image for device CPU
235  * @FW_TYPE_PREBOOT_CPU: Indicates pre-loaded CPUs are present in the system
236  *                       (preboot, ppboot etc...)
237  * @FW_TYPE_ALL_TYPES: Mask for all types
238  */
239 enum hl_fw_types {
240 	FW_TYPE_NONE = 0x0,
241 	FW_TYPE_LINUX = 0x1,
242 	FW_TYPE_BOOT_CPU = 0x2,
243 	FW_TYPE_PREBOOT_CPU = 0x4,
244 	FW_TYPE_ALL_TYPES =
245 		(FW_TYPE_LINUX | FW_TYPE_BOOT_CPU | FW_TYPE_PREBOOT_CPU)
246 };
247 
248 /**
249  * enum hl_queue_type - Supported QUEUE types.
250  * @QUEUE_TYPE_NA: queue is not available.
251  * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
252  *                  host.
253  * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
254  *			memories and/or operates the compute engines.
255  * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
256  * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion
257  *                 notifications are sent by H/W.
258  */
259 enum hl_queue_type {
260 	QUEUE_TYPE_NA,
261 	QUEUE_TYPE_EXT,
262 	QUEUE_TYPE_INT,
263 	QUEUE_TYPE_CPU,
264 	QUEUE_TYPE_HW
265 };
266 
267 enum hl_cs_type {
268 	CS_TYPE_DEFAULT,
269 	CS_TYPE_SIGNAL,
270 	CS_TYPE_WAIT,
271 	CS_TYPE_COLLECTIVE_WAIT,
272 	CS_RESERVE_SIGNALS,
273 	CS_UNRESERVE_SIGNALS
274 };
275 
276 /*
277  * struct hl_inbound_pci_region - inbound region descriptor
278  * @mode: pci match mode for this region
279  * @addr: region target address
280  * @size: region size in bytes
281  * @offset_in_bar: offset within bar (address match mode)
282  * @bar: bar id
283  */
284 struct hl_inbound_pci_region {
285 	enum hl_pci_match_mode	mode;
286 	u64			addr;
287 	u64			size;
288 	u64			offset_in_bar;
289 	u8			bar;
290 };
291 
292 /*
293  * struct hl_outbound_pci_region - outbound region descriptor
294  * @addr: region target address
295  * @size: region size in bytes
296  */
297 struct hl_outbound_pci_region {
298 	u64	addr;
299 	u64	size;
300 };
301 
302 /*
303  * enum queue_cb_alloc_flags - Indicates queue support for CBs that
304  * allocated by Kernel or by User
305  * @CB_ALLOC_KERNEL: support only CBs that allocated by Kernel
306  * @CB_ALLOC_USER: support only CBs that allocated by User
307  */
308 enum queue_cb_alloc_flags {
309 	CB_ALLOC_KERNEL = 0x1,
310 	CB_ALLOC_USER   = 0x2
311 };
312 
313 /*
314  * struct hl_hw_sob - H/W SOB info.
315  * @hdev: habanalabs device structure.
316  * @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
317  * @sob_id: id of this SOB.
318  * @sob_addr: the sob offset from the base address.
319  * @q_idx: the H/W queue that uses this SOB.
320  * @need_reset: reset indication set when switching to the other sob.
321  */
322 struct hl_hw_sob {
323 	struct hl_device	*hdev;
324 	struct kref		kref;
325 	u32			sob_id;
326 	u32			sob_addr;
327 	u32			q_idx;
328 	bool			need_reset;
329 };
330 
331 enum hl_collective_mode {
332 	HL_COLLECTIVE_NOT_SUPPORTED = 0x0,
333 	HL_COLLECTIVE_MASTER = 0x1,
334 	HL_COLLECTIVE_SLAVE = 0x2
335 };
336 
337 /**
338  * struct hw_queue_properties - queue information.
339  * @type: queue type.
340  * @queue_cb_alloc_flags: bitmap which indicates if the hw queue supports CB
341  *                        that allocated by the Kernel driver and therefore,
342  *                        a CB handle can be provided for jobs on this queue.
343  *                        Otherwise, a CB address must be provided.
344  * @collective_mode: collective mode of current queue
345  * @driver_only: true if only the driver is allowed to send a job to this queue,
346  *               false otherwise.
347  * @supports_sync_stream: True if queue supports sync stream
348  */
349 struct hw_queue_properties {
350 	enum hl_queue_type	type;
351 	enum queue_cb_alloc_flags cb_alloc_flags;
352 	enum hl_collective_mode	collective_mode;
353 	u8			driver_only;
354 	u8			supports_sync_stream;
355 };
356 
357 /**
358  * enum vm_type - virtual memory mapping request information.
359  * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
360  * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
361  */
362 enum vm_type {
363 	VM_TYPE_USERPTR = 0x1,
364 	VM_TYPE_PHYS_PACK = 0x2
365 };
366 
367 /**
368  * enum mmu_op_flags - mmu operation relevant information.
369  * @MMU_OP_USERPTR: operation on user memory (host resident).
370  * @MMU_OP_PHYS_PACK: operation on DRAM (device resident).
371  * @MMU_OP_CLEAR_MEMCACHE: operation has to clear memcache.
372  * @MMU_OP_SKIP_LOW_CACHE_INV: operation is allowed to skip parts of cache invalidation.
373  */
374 enum mmu_op_flags {
375 	MMU_OP_USERPTR = 0x1,
376 	MMU_OP_PHYS_PACK = 0x2,
377 	MMU_OP_CLEAR_MEMCACHE = 0x4,
378 	MMU_OP_SKIP_LOW_CACHE_INV = 0x8,
379 };
380 
381 
382 /**
383  * enum hl_device_hw_state - H/W device state. use this to understand whether
384  *                           to do reset before hw_init or not
385  * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
386  * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
387  *                            hw_init
388  */
389 enum hl_device_hw_state {
390 	HL_DEVICE_HW_STATE_CLEAN = 0,
391 	HL_DEVICE_HW_STATE_DIRTY
392 };
393 
394 #define HL_MMU_VA_ALIGNMENT_NOT_NEEDED 0
395 
396 /**
397  * struct hl_mmu_properties - ASIC specific MMU address translation properties.
398  * @start_addr: virtual start address of the memory region.
399  * @end_addr: virtual end address of the memory region.
400  * @hop_shifts: array holds HOPs shifts.
401  * @hop_masks: array holds HOPs masks.
402  * @last_mask: mask to get the bit indicating this is the last hop.
403  * @pgt_size: size for page tables.
404  * @page_size: default page size used to allocate memory.
405  * @num_hops: The amount of hops supported by the translation table.
406  * @hop_table_size: HOP table size.
407  * @hop0_tables_total_size: total size for all HOP0 tables.
408  * @host_resident: Should the MMU page table reside in host memory or in the
409  *                 device DRAM.
410  */
411 struct hl_mmu_properties {
412 	u64	start_addr;
413 	u64	end_addr;
414 	u64	hop_shifts[MMU_HOP_MAX];
415 	u64	hop_masks[MMU_HOP_MAX];
416 	u64	last_mask;
417 	u64	pgt_size;
418 	u32	page_size;
419 	u32	num_hops;
420 	u32	hop_table_size;
421 	u32	hop0_tables_total_size;
422 	u8	host_resident;
423 };
424 
425 /**
426  * struct hl_hints_range - hint addresses reserved va range.
427  * @start_addr: start address of the va range.
428  * @end_addr: end address of the va range.
429  */
430 struct hl_hints_range {
431 	u64 start_addr;
432 	u64 end_addr;
433 };
434 
435 /**
436  * struct asic_fixed_properties - ASIC specific immutable properties.
437  * @hw_queues_props: H/W queues properties.
438  * @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
439  *		available sensors.
440  * @uboot_ver: F/W U-boot version.
441  * @preboot_ver: F/W Preboot version.
442  * @dmmu: DRAM MMU address translation properties.
443  * @pmmu: PCI (host) MMU address translation properties.
444  * @pmmu_huge: PCI (host) MMU address translation properties for memory
445  *              allocated with huge pages.
446  * @hints_dram_reserved_va_range: dram hint addresses reserved range.
447  * @hints_host_reserved_va_range: host hint addresses reserved range.
448  * @hints_host_hpage_reserved_va_range: host huge page hint addresses reserved
449  *                                      range.
450  * @sram_base_address: SRAM physical start address.
451  * @sram_end_address: SRAM physical end address.
452  * @sram_user_base_address - SRAM physical start address for user access.
453  * @dram_base_address: DRAM physical start address.
454  * @dram_end_address: DRAM physical end address.
455  * @dram_user_base_address: DRAM physical start address for user access.
456  * @dram_size: DRAM total size.
457  * @dram_pci_bar_size: size of PCI bar towards DRAM.
458  * @max_power_default: max power of the device after reset
459  * @dc_power_default: power consumed by the device in mode idle.
460  * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
461  *                                      fault.
462  * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
463  * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
464  * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
465  * @mmu_dram_default_page_addr: DRAM default page physical address.
466  * @cb_va_start_addr: virtual start address of command buffers which are mapped
467  *                    to the device's MMU.
468  * @cb_va_end_addr: virtual end address of command buffers which are mapped to
469  *                  the device's MMU.
470  * @dram_hints_align_mask: dram va hint addresses alignment mask which is used
471  *                  for hints validity check.
472  * @device_dma_offset_for_host_access: the offset to add to host DMA addresses
473  *                                     to enable the device to access them.
474  * @host_base_address: host physical start address for host DMA from device
475  * @host_end_address: host physical end address for host DMA from device
476  * @max_freq_value: current max clk frequency.
477  * @clk_pll_index: clock PLL index that specify which PLL determines the clock
478  *                 we display to the user
479  * @mmu_pgt_size: MMU page tables total size.
480  * @mmu_pte_size: PTE size in MMU page tables.
481  * @mmu_hop_table_size: MMU hop table size.
482  * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
483  * @dram_page_size: page size for MMU DRAM allocation.
484  * @cfg_size: configuration space size on SRAM.
485  * @sram_size: total size of SRAM.
486  * @max_asid: maximum number of open contexts (ASIDs).
487  * @num_of_events: number of possible internal H/W IRQs.
488  * @psoc_pci_pll_nr: PCI PLL NR value.
489  * @psoc_pci_pll_nf: PCI PLL NF value.
490  * @psoc_pci_pll_od: PCI PLL OD value.
491  * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
492  * @psoc_timestamp_frequency: frequency of the psoc timestamp clock.
493  * @high_pll: high PLL frequency used by the device.
494  * @cb_pool_cb_cnt: number of CBs in the CB pool.
495  * @cb_pool_cb_size: size of each CB in the CB pool.
496  * @max_pending_cs: maximum of concurrent pending command submissions
497  * @max_queues: maximum amount of queues in the system
498  * @fw_preboot_cpu_boot_dev_sts0: bitmap representation of preboot cpu
499  *                                capabilities reported by FW, bit description
500  *                                can be found in CPU_BOOT_DEV_STS0
501  * @fw_preboot_cpu_boot_dev_sts1: bitmap representation of preboot cpu
502  *                                capabilities reported by FW, bit description
503  *                                can be found in CPU_BOOT_DEV_STS1
504  * @fw_bootfit_cpu_boot_dev_sts0: bitmap representation of boot cpu security
505  *                                status reported by FW, bit description can be
506  *                                found in CPU_BOOT_DEV_STS0
507  * @fw_bootfit_cpu_boot_dev_sts1: bitmap representation of boot cpu security
508  *                                status reported by FW, bit description can be
509  *                                found in CPU_BOOT_DEV_STS1
510  * @fw_app_cpu_boot_dev_sts0: bitmap representation of application security
511  *                            status reported by FW, bit description can be
512  *                            found in CPU_BOOT_DEV_STS0
513  * @fw_app_cpu_boot_dev_sts1: bitmap representation of application security
514  *                            status reported by FW, bit description can be
515  *                            found in CPU_BOOT_DEV_STS1
516  * @device_mem_alloc_default_page_size: may be different than dram_page_size only for ASICs for
517  *                                      which the property supports_user_set_page_size is true
518  *                                      (i.e. the DRAM supports multiple page sizes), otherwise
519  *                                      it will shall  be equal to dram_page_size.
520  * @collective_first_sob: first sync object available for collective use
521  * @collective_first_mon: first monitor available for collective use
522  * @sync_stream_first_sob: first sync object available for sync stream use
523  * @sync_stream_first_mon: first monitor available for sync stream use
524  * @first_available_user_sob: first sob available for the user
525  * @first_available_user_mon: first monitor available for the user
526  * @first_available_user_msix_interrupt: first available msix interrupt
527  *                                       reserved for the user
528  * @first_available_cq: first available CQ for the user.
529  * @user_interrupt_count: number of user interrupts.
530  * @server_type: Server type that the ASIC is currently installed in.
531  *               The value is according to enum hl_server_type in uapi file.
532  * @tpc_enabled_mask: which TPCs are enabled.
533  * @completion_queues_count: number of completion queues.
534  * @fw_security_enabled: true if security measures are enabled in firmware,
535  *                       false otherwise
536  * @fw_cpu_boot_dev_sts0_valid: status bits are valid and can be fetched from
537  *                              BOOT_DEV_STS0
538  * @fw_cpu_boot_dev_sts1_valid: status bits are valid and can be fetched from
539  *                              BOOT_DEV_STS1
540  * @dram_supports_virtual_memory: is there an MMU towards the DRAM
541  * @hard_reset_done_by_fw: true if firmware is handling hard reset flow
542  * @num_functional_hbms: number of functional HBMs in each DCORE.
543  * @hints_range_reservation: device support hint addresses range reservation.
544  * @iatu_done_by_fw: true if iATU configuration is being done by FW.
545  * @dynamic_fw_load: is dynamic FW load is supported.
546  * @gic_interrupts_enable: true if FW is not blocking GIC controller,
547  *                         false otherwise.
548  * @use_get_power_for_reset_history: To support backward compatibility for Goya
549  *                                   and Gaudi
550  * @supports_soft_reset: is soft reset supported.
551  * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
552  *                              initiated by user or TDR. This is only true
553  *                              in inference ASICs, as there is no real-world
554  *                              use-case of doing soft-reset in training (due
555  *                              to the fact that training runs on multiple
556  *                              devices)
557  * @configurable_stop_on_err: is stop-on-error option configurable via debugfs.
558  * @set_max_power_on_device_init: true if need to set max power in F/W on device init.
559  * @supports_user_set_page_size: true if user can set the allocation page size.
560  * @dma_mask: the dma mask to be set for this device
561  */
562 struct asic_fixed_properties {
563 	struct hw_queue_properties	*hw_queues_props;
564 	struct cpucp_info		cpucp_info;
565 	char				uboot_ver[VERSION_MAX_LEN];
566 	char				preboot_ver[VERSION_MAX_LEN];
567 	struct hl_mmu_properties	dmmu;
568 	struct hl_mmu_properties	pmmu;
569 	struct hl_mmu_properties	pmmu_huge;
570 	struct hl_hints_range		hints_dram_reserved_va_range;
571 	struct hl_hints_range		hints_host_reserved_va_range;
572 	struct hl_hints_range		hints_host_hpage_reserved_va_range;
573 	u64				sram_base_address;
574 	u64				sram_end_address;
575 	u64				sram_user_base_address;
576 	u64				dram_base_address;
577 	u64				dram_end_address;
578 	u64				dram_user_base_address;
579 	u64				dram_size;
580 	u64				dram_pci_bar_size;
581 	u64				max_power_default;
582 	u64				dc_power_default;
583 	u64				dram_size_for_default_page_mapping;
584 	u64				pcie_dbi_base_address;
585 	u64				pcie_aux_dbi_reg_addr;
586 	u64				mmu_pgt_addr;
587 	u64				mmu_dram_default_page_addr;
588 	u64				cb_va_start_addr;
589 	u64				cb_va_end_addr;
590 	u64				dram_hints_align_mask;
591 	u64				device_dma_offset_for_host_access;
592 	u64				host_base_address;
593 	u64				host_end_address;
594 	u64				max_freq_value;
595 	u32				clk_pll_index;
596 	u32				mmu_pgt_size;
597 	u32				mmu_pte_size;
598 	u32				mmu_hop_table_size;
599 	u32				mmu_hop0_tables_total_size;
600 	u32				dram_page_size;
601 	u32				cfg_size;
602 	u32				sram_size;
603 	u32				max_asid;
604 	u32				num_of_events;
605 	u32				psoc_pci_pll_nr;
606 	u32				psoc_pci_pll_nf;
607 	u32				psoc_pci_pll_od;
608 	u32				psoc_pci_pll_div_factor;
609 	u32				psoc_timestamp_frequency;
610 	u32				high_pll;
611 	u32				cb_pool_cb_cnt;
612 	u32				cb_pool_cb_size;
613 	u32				max_pending_cs;
614 	u32				max_queues;
615 	u32				fw_preboot_cpu_boot_dev_sts0;
616 	u32				fw_preboot_cpu_boot_dev_sts1;
617 	u32				fw_bootfit_cpu_boot_dev_sts0;
618 	u32				fw_bootfit_cpu_boot_dev_sts1;
619 	u32				fw_app_cpu_boot_dev_sts0;
620 	u32				fw_app_cpu_boot_dev_sts1;
621 	u32				device_mem_alloc_default_page_size;
622 	u16				collective_first_sob;
623 	u16				collective_first_mon;
624 	u16				sync_stream_first_sob;
625 	u16				sync_stream_first_mon;
626 	u16				first_available_user_sob[HL_MAX_DCORES];
627 	u16				first_available_user_mon[HL_MAX_DCORES];
628 	u16				first_available_user_msix_interrupt;
629 	u16				first_available_cq[HL_MAX_DCORES];
630 	u16				user_interrupt_count;
631 	u16				server_type;
632 	u8				tpc_enabled_mask;
633 	u8				completion_queues_count;
634 	u8				fw_security_enabled;
635 	u8				fw_cpu_boot_dev_sts0_valid;
636 	u8				fw_cpu_boot_dev_sts1_valid;
637 	u8				dram_supports_virtual_memory;
638 	u8				hard_reset_done_by_fw;
639 	u8				num_functional_hbms;
640 	u8				hints_range_reservation;
641 	u8				iatu_done_by_fw;
642 	u8				dynamic_fw_load;
643 	u8				gic_interrupts_enable;
644 	u8				use_get_power_for_reset_history;
645 	u8				supports_soft_reset;
646 	u8				allow_inference_soft_reset;
647 	u8				configurable_stop_on_err;
648 	u8				set_max_power_on_device_init;
649 	u8				supports_user_set_page_size;
650 	u8				dma_mask;
651 };
652 
653 /**
654  * struct hl_fence - software synchronization primitive
655  * @completion: fence is implemented using completion
656  * @refcount: refcount for this fence
657  * @cs_sequence: sequence of the corresponding command submission
658  * @stream_master_qid_map: streams masters QID bitmap to represent all streams
659  *                         masters QIDs that multi cs is waiting on
660  * @error: mark this fence with error
661  * @timestamp: timestamp upon completion
662  * @mcs_handling_done: indicates that corresponding command submission has
663  *                     finished msc handling, this does not mean it was part
664  *                     of the mcs
665  */
666 struct hl_fence {
667 	struct completion	completion;
668 	struct kref		refcount;
669 	u64			cs_sequence;
670 	u32			stream_master_qid_map;
671 	int			error;
672 	ktime_t			timestamp;
673 	u8			mcs_handling_done;
674 };
675 
676 /**
677  * struct hl_cs_compl - command submission completion object.
678  * @base_fence: hl fence object.
679  * @lock: spinlock to protect fence.
680  * @hdev: habanalabs device structure.
681  * @hw_sob: the H/W SOB used in this signal/wait CS.
682  * @encaps_sig_hdl: encaps signals hanlder.
683  * @cs_seq: command submission sequence number.
684  * @type: type of the CS - signal/wait.
685  * @sob_val: the SOB value that is used in this signal/wait CS.
686  * @sob_group: the SOB group that is used in this collective wait CS.
687  * @encaps_signals: indication whether it's a completion object of cs with
688  * encaps signals or not.
689  */
690 struct hl_cs_compl {
691 	struct hl_fence		base_fence;
692 	spinlock_t		lock;
693 	struct hl_device	*hdev;
694 	struct hl_hw_sob	*hw_sob;
695 	struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
696 	u64			cs_seq;
697 	enum hl_cs_type		type;
698 	u16			sob_val;
699 	u16			sob_group;
700 	bool			encaps_signals;
701 };
702 
703 /*
704  * Command Buffers
705  */
706 
707 /**
708  * struct hl_ts_buff - describes a timestamp buffer.
709  * @kernel_buff_address: Holds the internal buffer's kernel virtual address.
710  * @user_buff_address: Holds the user buffer's kernel virtual address.
711  * @kernel_buff_size: Holds the internal kernel buffer size.
712  */
713 struct hl_ts_buff {
714 	void			*kernel_buff_address;
715 	void			*user_buff_address;
716 	u32			kernel_buff_size;
717 };
718 
719 struct hl_mmap_mem_buf;
720 
721 /**
722  * struct hl_mem_mgr - describes unified memory manager for mappable memory chunks.
723  * @dev: back pointer to the owning device
724  * @lock: protects handles
725  * @handles: an idr holding all active handles to the memory buffers in the system.
726  */
727 struct hl_mem_mgr {
728 	struct device *dev;
729 	spinlock_t lock;
730 	struct idr handles;
731 };
732 
733 /**
734  * struct hl_mmap_mem_buf_behavior - describes unified memory manager buffer behavior
735  * @topic: string identifier used for logging
736  * @mem_id: memory type identifier, embedded in the handle and used to identify
737  *          the memory type by handle.
738  * @alloc: callback executed on buffer allocation, shall allocate the memory,
739  *         set it under buffer private, and set mappable size.
740  * @mmap: callback executed on mmap, must map the buffer to vma
741  * @release: callback executed on release, must free the resources used by the buffer
742  */
743 struct hl_mmap_mem_buf_behavior {
744 	const char *topic;
745 	u64 mem_id;
746 
747 	int (*alloc)(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args);
748 	int (*mmap)(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args);
749 	void (*release)(struct hl_mmap_mem_buf *buf);
750 };
751 
752 /**
753  * struct hl_mmap_mem_buf - describes a single unified memory buffer
754  * @behavior: buffer behavior
755  * @mmg: back pointer to the unified memory manager
756  * @refcount: reference counter for buffer users
757  * @private: pointer to buffer behavior private data
758  * @mmap: atomic boolean indicating whether or not the buffer is mapped right now
759  * @real_mapped_size: the actual size of buffer mapped, after part of it may be released,
760  *                   may change at runtime.
761  * @mappable_size: the original mappable size of the buffer, does not change after
762  *                 the allocation.
763  * @handle: the buffer id in mmg handles store
764  */
765 struct hl_mmap_mem_buf {
766 	struct hl_mmap_mem_buf_behavior *behavior;
767 	struct hl_mem_mgr *mmg;
768 	struct kref refcount;
769 	void *private;
770 	atomic_t mmap;
771 	u64 real_mapped_size;
772 	u64 mappable_size;
773 	u64 handle;
774 };
775 
776 /**
777  * struct hl_cb - describes a Command Buffer.
778  * @hdev: pointer to device this CB belongs to.
779  * @ctx: pointer to the CB owner's context.
780  * @buf: back pointer to the parent mappable memory buffer
781  * @debugfs_list: node in debugfs list of command buffers.
782  * @pool_list: node in pool list of command buffers.
783  * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
784  *                 the device's MMU.
785  * @kernel_address: Holds the CB's kernel virtual address.
786  * @bus_address: Holds the CB's DMA address.
787  * @size: holds the CB's size.
788  * @cs_cnt: holds number of CS that this CB participates in.
789  * @is_pool: true if CB was acquired from the pool, false otherwise.
790  * @is_internal: internaly allocated
791  * @is_mmu_mapped: true if the CB is mapped to the device's MMU.
792  */
793 struct hl_cb {
794 	struct hl_device	*hdev;
795 	struct hl_ctx		*ctx;
796 	struct hl_mmap_mem_buf	*buf;
797 	struct list_head	debugfs_list;
798 	struct list_head	pool_list;
799 	struct list_head	va_block_list;
800 	void			*kernel_address;
801 	dma_addr_t		bus_address;
802 	u32			size;
803 	atomic_t		cs_cnt;
804 	u8			is_pool;
805 	u8			is_internal;
806 	u8			is_mmu_mapped;
807 };
808 
809 
810 /*
811  * QUEUES
812  */
813 
814 struct hl_cs;
815 struct hl_cs_job;
816 
817 /* Queue length of external and HW queues */
818 #define HL_QUEUE_LENGTH			4096
819 #define HL_QUEUE_SIZE_IN_BYTES		(HL_QUEUE_LENGTH * HL_BD_SIZE)
820 
821 #if (HL_MAX_JOBS_PER_CS > HL_QUEUE_LENGTH)
822 #error "HL_QUEUE_LENGTH must be greater than HL_MAX_JOBS_PER_CS"
823 #endif
824 
825 /* HL_CQ_LENGTH is in units of struct hl_cq_entry */
826 #define HL_CQ_LENGTH			HL_QUEUE_LENGTH
827 #define HL_CQ_SIZE_IN_BYTES		(HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
828 
829 /* Must be power of 2 */
830 #define HL_EQ_LENGTH			64
831 #define HL_EQ_SIZE_IN_BYTES		(HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
832 
833 /* Host <-> CPU-CP shared memory size */
834 #define HL_CPU_ACCESSIBLE_MEM_SIZE	SZ_2M
835 
836 /**
837  * struct hl_sync_stream_properties -
838  *     describes a H/W queue sync stream properties
839  * @hw_sob: array of the used H/W SOBs by this H/W queue.
840  * @next_sob_val: the next value to use for the currently used SOB.
841  * @base_sob_id: the base SOB id of the SOBs used by this queue.
842  * @base_mon_id: the base MON id of the MONs used by this queue.
843  * @collective_mstr_mon_id: the MON ids of the MONs used by this master queue
844  *                          in order to sync with all slave queues.
845  * @collective_slave_mon_id: the MON id used by this slave queue in order to
846  *                           sync with its master queue.
847  * @collective_sob_id: current SOB id used by this collective slave queue
848  *                     to signal its collective master queue upon completion.
849  * @curr_sob_offset: the id offset to the currently used SOB from the
850  *                   HL_RSVD_SOBS that are being used by this queue.
851  */
852 struct hl_sync_stream_properties {
853 	struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
854 	u16		next_sob_val;
855 	u16		base_sob_id;
856 	u16		base_mon_id;
857 	u16		collective_mstr_mon_id[HL_COLLECTIVE_RSVD_MSTR_MONS];
858 	u16		collective_slave_mon_id;
859 	u16		collective_sob_id;
860 	u8		curr_sob_offset;
861 };
862 
863 /**
864  * struct hl_encaps_signals_mgr - describes sync stream encapsulated signals
865  * handlers manager
866  * @lock: protects handles.
867  * @handles: an idr to hold all encapsulated signals handles.
868  */
869 struct hl_encaps_signals_mgr {
870 	spinlock_t		lock;
871 	struct idr		handles;
872 };
873 
874 /**
875  * struct hl_hw_queue - describes a H/W transport queue.
876  * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
877  * @sync_stream_prop: sync stream queue properties
878  * @queue_type: type of queue.
879  * @collective_mode: collective mode of current queue
880  * @kernel_address: holds the queue's kernel virtual address.
881  * @bus_address: holds the queue's DMA address.
882  * @pi: holds the queue's pi value.
883  * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
884  * @hw_queue_id: the id of the H/W queue.
885  * @cq_id: the id for the corresponding CQ for this H/W queue.
886  * @msi_vec: the IRQ number of the H/W queue.
887  * @int_queue_len: length of internal queue (number of entries).
888  * @valid: is the queue valid (we have array of 32 queues, not all of them
889  *         exist).
890  * @supports_sync_stream: True if queue supports sync stream
891  */
892 struct hl_hw_queue {
893 	struct hl_cs_job			**shadow_queue;
894 	struct hl_sync_stream_properties	sync_stream_prop;
895 	enum hl_queue_type			queue_type;
896 	enum hl_collective_mode			collective_mode;
897 	void					*kernel_address;
898 	dma_addr_t				bus_address;
899 	u32					pi;
900 	atomic_t				ci;
901 	u32					hw_queue_id;
902 	u32					cq_id;
903 	u32					msi_vec;
904 	u16					int_queue_len;
905 	u8					valid;
906 	u8					supports_sync_stream;
907 };
908 
909 /**
910  * struct hl_cq - describes a completion queue
911  * @hdev: pointer to the device structure
912  * @kernel_address: holds the queue's kernel virtual address
913  * @bus_address: holds the queue's DMA address
914  * @cq_idx: completion queue index in array
915  * @hw_queue_id: the id of the matching H/W queue
916  * @ci: ci inside the queue
917  * @pi: pi inside the queue
918  * @free_slots_cnt: counter of free slots in queue
919  */
920 struct hl_cq {
921 	struct hl_device	*hdev;
922 	void			*kernel_address;
923 	dma_addr_t		bus_address;
924 	u32			cq_idx;
925 	u32			hw_queue_id;
926 	u32			ci;
927 	u32			pi;
928 	atomic_t		free_slots_cnt;
929 };
930 
931 /**
932  * struct hl_user_interrupt - holds user interrupt information
933  * @hdev: pointer to the device structure
934  * @wait_list_head: head to the list of user threads pending on this interrupt
935  * @wait_list_lock: protects wait_list_head
936  * @interrupt_id: msix interrupt id
937  */
938 struct hl_user_interrupt {
939 	struct hl_device	*hdev;
940 	struct list_head	wait_list_head;
941 	spinlock_t		wait_list_lock;
942 	u32			interrupt_id;
943 };
944 
945 /**
946  * struct timestamp_reg_free_node - holds the timestamp registration free objects node
947  * @free_objects_node: node in the list free_obj_jobs
948  * @cq_cb: pointer to cq command buffer to be freed
949  * @buf: pointer to timestamp buffer to be freed
950  */
951 struct timestamp_reg_free_node {
952 	struct list_head	free_objects_node;
953 	struct hl_cb		*cq_cb;
954 	struct hl_mmap_mem_buf	*buf;
955 };
956 
957 /* struct timestamp_reg_work_obj - holds the timestamp registration free objects job
958  * the job will be to pass over the free_obj_jobs list and put refcount to objects
959  * in each node of the list
960  * @free_obj: workqueue object to free timestamp registration node objects
961  * @hdev: pointer to the device structure
962  * @free_obj_head: list of free jobs nodes (node type timestamp_reg_free_node)
963  */
964 struct timestamp_reg_work_obj {
965 	struct work_struct	free_obj;
966 	struct hl_device	*hdev;
967 	struct list_head	*free_obj_head;
968 };
969 
970 /* struct timestamp_reg_info - holds the timestamp registration related data.
971  * @buf: pointer to the timestamp buffer which include both user/kernel buffers.
972  *       relevant only when doing timestamps records registration.
973  * @cq_cb: pointer to CQ counter CB.
974  * @timestamp_kernel_addr: timestamp handle address, where to set timestamp
975  *                         relevant only when doing timestamps records
976  *                         registration.
977  * @in_use: indicates if the node already in use. relevant only when doing
978  *          timestamps records registration, since in this case the driver
979  *          will have it's own buffer which serve as a records pool instead of
980  *          allocating records dynamically.
981  */
982 struct timestamp_reg_info {
983 	struct hl_mmap_mem_buf	*buf;
984 	struct hl_cb		*cq_cb;
985 	u64			*timestamp_kernel_addr;
986 	u8			in_use;
987 };
988 
989 /**
990  * struct hl_user_pending_interrupt - holds a context to a user thread
991  *                                    pending on an interrupt
992  * @ts_reg_info: holds the timestamps registration nodes info
993  * @wait_list_node: node in the list of user threads pending on an interrupt
994  * @fence: hl fence object for interrupt completion
995  * @cq_target_value: CQ target value
996  * @cq_kernel_addr: CQ kernel address, to be used in the cq interrupt
997  *                  handler for taget value comparison
998  */
999 struct hl_user_pending_interrupt {
1000 	struct timestamp_reg_info	ts_reg_info;
1001 	struct list_head		wait_list_node;
1002 	struct hl_fence			fence;
1003 	u64				cq_target_value;
1004 	u64				*cq_kernel_addr;
1005 };
1006 
1007 /**
1008  * struct hl_eq - describes the event queue (single one per device)
1009  * @hdev: pointer to the device structure
1010  * @kernel_address: holds the queue's kernel virtual address
1011  * @bus_address: holds the queue's DMA address
1012  * @ci: ci inside the queue
1013  * @prev_eqe_index: the index of the previous event queue entry. The index of
1014  *                  the current entry's index must be +1 of the previous one.
1015  * @check_eqe_index: do we need to check the index of the current entry vs. the
1016  *                   previous one. This is for backward compatibility with older
1017  *                   firmwares
1018  */
1019 struct hl_eq {
1020 	struct hl_device	*hdev;
1021 	void			*kernel_address;
1022 	dma_addr_t		bus_address;
1023 	u32			ci;
1024 	u32			prev_eqe_index;
1025 	bool			check_eqe_index;
1026 };
1027 
1028 
1029 /*
1030  * ASICs
1031  */
1032 
1033 /**
1034  * enum hl_asic_type - supported ASIC types.
1035  * @ASIC_INVALID: Invalid ASIC type.
1036  * @ASIC_GOYA: Goya device.
1037  * @ASIC_GAUDI: Gaudi device.
1038  * @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
1039  */
1040 enum hl_asic_type {
1041 	ASIC_INVALID,
1042 	ASIC_GOYA,
1043 	ASIC_GAUDI,
1044 	ASIC_GAUDI_SEC
1045 };
1046 
1047 struct hl_cs_parser;
1048 
1049 /**
1050  * enum hl_pm_mng_profile - power management profile.
1051  * @PM_AUTO: internal clock is set by the Linux driver.
1052  * @PM_MANUAL: internal clock is set by the user.
1053  * @PM_LAST: last power management type.
1054  */
1055 enum hl_pm_mng_profile {
1056 	PM_AUTO = 1,
1057 	PM_MANUAL,
1058 	PM_LAST
1059 };
1060 
1061 /**
1062  * enum hl_pll_frequency - PLL frequency.
1063  * @PLL_HIGH: high frequency.
1064  * @PLL_LOW: low frequency.
1065  * @PLL_LAST: last frequency values that were configured by the user.
1066  */
1067 enum hl_pll_frequency {
1068 	PLL_HIGH = 1,
1069 	PLL_LOW,
1070 	PLL_LAST
1071 };
1072 
1073 #define PLL_REF_CLK 50
1074 
1075 enum div_select_defs {
1076 	DIV_SEL_REF_CLK = 0,
1077 	DIV_SEL_PLL_CLK = 1,
1078 	DIV_SEL_DIVIDED_REF = 2,
1079 	DIV_SEL_DIVIDED_PLL = 3,
1080 };
1081 
1082 enum debugfs_access_type {
1083 	DEBUGFS_READ8,
1084 	DEBUGFS_WRITE8,
1085 	DEBUGFS_READ32,
1086 	DEBUGFS_WRITE32,
1087 	DEBUGFS_READ64,
1088 	DEBUGFS_WRITE64,
1089 };
1090 
1091 enum pci_region {
1092 	PCI_REGION_CFG,
1093 	PCI_REGION_SRAM,
1094 	PCI_REGION_DRAM,
1095 	PCI_REGION_SP_SRAM,
1096 	PCI_REGION_NUMBER,
1097 };
1098 
1099 /**
1100  * struct pci_mem_region - describe memory region in a PCI bar
1101  * @region_base: region base address
1102  * @region_size: region size
1103  * @bar_size: size of the BAR
1104  * @offset_in_bar: region offset into the bar
1105  * @bar_id: bar ID of the region
1106  * @used: if used 1, otherwise 0
1107  */
1108 struct pci_mem_region {
1109 	u64 region_base;
1110 	u64 region_size;
1111 	u64 bar_size;
1112 	u64 offset_in_bar;
1113 	u8 bar_id;
1114 	u8 used;
1115 };
1116 
1117 /**
1118  * struct static_fw_load_mgr - static FW load manager
1119  * @preboot_version_max_off: max offset to preboot version
1120  * @boot_fit_version_max_off: max offset to boot fit version
1121  * @kmd_msg_to_cpu_reg: register address for KDM->CPU messages
1122  * @cpu_cmd_status_to_host_reg: register address for CPU command status response
1123  * @cpu_boot_status_reg: boot status register
1124  * @cpu_boot_dev_status0_reg: boot device status register 0
1125  * @cpu_boot_dev_status1_reg: boot device status register 1
1126  * @boot_err0_reg: boot error register 0
1127  * @boot_err1_reg: boot error register 1
1128  * @preboot_version_offset_reg: SRAM offset to preboot version register
1129  * @boot_fit_version_offset_reg: SRAM offset to boot fit version register
1130  * @sram_offset_mask: mask for getting offset into the SRAM
1131  * @cpu_reset_wait_msec: used when setting WFE via kmd_msg_to_cpu_reg
1132  */
1133 struct static_fw_load_mgr {
1134 	u64 preboot_version_max_off;
1135 	u64 boot_fit_version_max_off;
1136 	u32 kmd_msg_to_cpu_reg;
1137 	u32 cpu_cmd_status_to_host_reg;
1138 	u32 cpu_boot_status_reg;
1139 	u32 cpu_boot_dev_status0_reg;
1140 	u32 cpu_boot_dev_status1_reg;
1141 	u32 boot_err0_reg;
1142 	u32 boot_err1_reg;
1143 	u32 preboot_version_offset_reg;
1144 	u32 boot_fit_version_offset_reg;
1145 	u32 sram_offset_mask;
1146 	u32 cpu_reset_wait_msec;
1147 };
1148 
1149 /**
1150  * struct fw_response - FW response to LKD command
1151  * @ram_offset: descriptor offset into the RAM
1152  * @ram_type: RAM type containing the descriptor (SRAM/DRAM)
1153  * @status: command status
1154  */
1155 struct fw_response {
1156 	u32 ram_offset;
1157 	u8 ram_type;
1158 	u8 status;
1159 };
1160 
1161 /**
1162  * struct dynamic_fw_load_mgr - dynamic FW load manager
1163  * @response: FW to LKD response
1164  * @comm_desc: the communication descriptor with FW
1165  * @image_region: region to copy the FW image to
1166  * @fw_image_size: size of FW image to load
1167  * @wait_for_bl_timeout: timeout for waiting for boot loader to respond
1168  * @fw_desc_valid: true if FW descriptor has been validated and hence the data can be used
1169  */
1170 struct dynamic_fw_load_mgr {
1171 	struct fw_response response;
1172 	struct lkd_fw_comms_desc comm_desc;
1173 	struct pci_mem_region *image_region;
1174 	size_t fw_image_size;
1175 	u32 wait_for_bl_timeout;
1176 	bool fw_desc_valid;
1177 };
1178 
1179 /**
1180  * struct fw_image_props - properties of FW image
1181  * @image_name: name of the image
1182  * @src_off: offset in src FW to copy from
1183  * @copy_size: amount of bytes to copy (0 to copy the whole binary)
1184  */
1185 struct fw_image_props {
1186 	char *image_name;
1187 	u32 src_off;
1188 	u32 copy_size;
1189 };
1190 
1191 /**
1192  * struct fw_load_mgr - manager FW loading process
1193  * @dynamic_loader: specific structure for dynamic load
1194  * @static_loader: specific structure for static load
1195  * @boot_fit_img: boot fit image properties
1196  * @linux_img: linux image properties
1197  * @cpu_timeout: CPU response timeout in usec
1198  * @boot_fit_timeout: Boot fit load timeout in usec
1199  * @skip_bmc: should BMC be skipped
1200  * @sram_bar_id: SRAM bar ID
1201  * @dram_bar_id: DRAM bar ID
1202  * @fw_comp_loaded: bitmask of loaded FW components. set bit meaning loaded
1203  *                  component. values are set according to enum hl_fw_types.
1204  */
1205 struct fw_load_mgr {
1206 	union {
1207 		struct dynamic_fw_load_mgr dynamic_loader;
1208 		struct static_fw_load_mgr static_loader;
1209 	};
1210 	struct fw_image_props boot_fit_img;
1211 	struct fw_image_props linux_img;
1212 	u32 cpu_timeout;
1213 	u32 boot_fit_timeout;
1214 	u8 skip_bmc;
1215 	u8 sram_bar_id;
1216 	u8 dram_bar_id;
1217 	u8 fw_comp_loaded;
1218 };
1219 
1220 /**
1221  * struct hl_asic_funcs - ASIC specific functions that are can be called from
1222  *                        common code.
1223  * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
1224  * @early_fini: tears down what was done in early_init.
1225  * @late_init: sets up late driver/hw state (post hw_init) - Optional.
1226  * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
1227  * @sw_init: sets up driver state, does not configure H/W.
1228  * @sw_fini: tears down driver state, does not configure H/W.
1229  * @hw_init: sets up the H/W state.
1230  * @hw_fini: tears down the H/W state.
1231  * @halt_engines: halt engines, needed for reset sequence. This also disables
1232  *                interrupts from the device. Should be called before
1233  *                hw_fini and before CS rollback.
1234  * @suspend: handles IP specific H/W or SW changes for suspend.
1235  * @resume: handles IP specific H/W or SW changes for resume.
1236  * @mmap: maps a memory.
1237  * @ring_doorbell: increment PI on a given QMAN.
1238  * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
1239  *             function because the PQs are located in different memory areas
1240  *             per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
1241  *             writing the PQE must match the destination memory area
1242  *             properties.
1243  * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
1244  *                           dma_alloc_coherent(). This is ASIC function because
1245  *                           its implementation is not trivial when the driver
1246  *                           is loaded in simulation mode (not upstreamed).
1247  * @asic_dma_free_coherent:  Free coherent DMA memory by calling
1248  *                           dma_free_coherent(). This is ASIC function because
1249  *                           its implementation is not trivial when the driver
1250  *                           is loaded in simulation mode (not upstreamed).
1251  * @scrub_device_mem: Scrub device memory given an address and size
1252  * @scrub_device_dram: Scrub the dram memory of the device.
1253  * @get_int_queue_base: get the internal queue base address.
1254  * @test_queues: run simple test on all queues for sanity check.
1255  * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
1256  *                        size of allocation is HL_DMA_POOL_BLK_SIZE.
1257  * @asic_dma_pool_free: free small DMA allocation from pool.
1258  * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
1259  * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
1260  * @hl_dma_unmap_sgtable: DMA unmap scatter-gather table.
1261  * @cs_parser: parse Command Submission.
1262  * @asic_dma_map_sgtable: DMA map scatter-gather table.
1263  * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
1264  * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
1265  * @update_eq_ci: update event queue CI.
1266  * @context_switch: called upon ASID context switch.
1267  * @restore_phase_topology: clear all SOBs amd MONs.
1268  * @debugfs_read_dma: debug interface for reading up to 2MB from the device's
1269  *                    internal memory via DMA engine.
1270  * @add_device_attr: add ASIC specific device attributes.
1271  * @handle_eqe: handle event queue entry (IRQ) from CPU-CP.
1272  * @get_events_stat: retrieve event queue entries histogram.
1273  * @read_pte: read MMU page table entry from DRAM.
1274  * @write_pte: write MMU page table entry to DRAM.
1275  * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
1276  *                        (L1 only) or hard (L0 & L1) flush.
1277  * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with ASID-VA-size mask.
1278  * @mmu_prefetch_cache_range: pre-fetch specific MMU STLB cache lines with ASID-VA-size mask.
1279  * @send_heartbeat: send is-alive packet to CPU-CP and verify response.
1280  * @debug_coresight: perform certain actions on Coresight for debugging.
1281  * @is_device_idle: return true if device is idle, false otherwise.
1282  * @non_hard_reset_late_init: perform certain actions needed after a reset which is not hard-reset
1283  * @hw_queues_lock: acquire H/W queues lock.
1284  * @hw_queues_unlock: release H/W queues lock.
1285  * @get_pci_id: retrieve PCI ID.
1286  * @get_eeprom_data: retrieve EEPROM data from F/W.
1287  * @get_monitor_dump: retrieve monitor registers dump from F/W.
1288  * @send_cpu_message: send message to F/W. If the message is timedout, the
1289  *                    driver will eventually reset the device. The timeout can
1290  *                    be determined by the calling function or it can be 0 and
1291  *                    then the timeout is the default timeout for the specific
1292  *                    ASIC
1293  * @get_hw_state: retrieve the H/W state
1294  * @pci_bars_map: Map PCI BARs.
1295  * @init_iatu: Initialize the iATU unit inside the PCI controller.
1296  * @rreg: Read a register. Needed for simulator support.
1297  * @wreg: Write a register. Needed for simulator support.
1298  * @halt_coresight: stop the ETF and ETR traces.
1299  * @ctx_init: context dependent initialization.
1300  * @ctx_fini: context dependent cleanup.
1301  * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
1302  * @load_firmware_to_device: load the firmware to the device's memory
1303  * @load_boot_fit_to_device: load boot fit to device's memory
1304  * @get_signal_cb_size: Get signal CB size.
1305  * @get_wait_cb_size: Get wait CB size.
1306  * @gen_signal_cb: Generate a signal CB.
1307  * @gen_wait_cb: Generate a wait CB.
1308  * @reset_sob: Reset a SOB.
1309  * @reset_sob_group: Reset SOB group
1310  * @get_device_time: Get the device time.
1311  * @collective_wait_init_cs: Generate collective master/slave packets
1312  *                           and place them in the relevant cs jobs
1313  * @collective_wait_create_jobs: allocate collective wait cs jobs
1314  * @scramble_addr: Routine to scramble the address prior of mapping it
1315  *                 in the MMU.
1316  * @descramble_addr: Routine to de-scramble the address prior of
1317  *                   showing it to users.
1318  * @ack_protection_bits_errors: ack and dump all security violations
1319  * @get_hw_block_id: retrieve a HW block id to be used by the user to mmap it.
1320  *                   also returns the size of the block if caller supplies
1321  *                   a valid pointer for it
1322  * @hw_block_mmap: mmap a HW block with a given id.
1323  * @enable_events_from_fw: send interrupt to firmware to notify them the
1324  *                         driver is ready to receive asynchronous events. This
1325  *                         function should be called during the first init and
1326  *                         after every hard-reset of the device
1327  * @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
1328  * @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
1329  *                         generic f/w compatible PLL Indexes
1330  * @init_firmware_loader: initialize data for FW loader.
1331  * @init_cpu_scrambler_dram: Enable CPU specific DRAM scrambling
1332  * @state_dump_init: initialize constants required for state dump
1333  * @get_sob_addr: get SOB base address offset.
1334  * @set_pci_memory_regions: setting properties of PCI memory regions
1335  * @get_stream_master_qid_arr: get pointer to stream masters QID array
1336  * @is_valid_dram_page_size: return true if page size is supported in device
1337  *                           memory allocation, otherwise false.
1338  * @get_valid_dram_page_orders: get valid device memory allocation page orders
1339  * @access_dev_mem: access device memory
1340  * @set_dram_bar_base: set the base of the DRAM BAR
1341  */
1342 struct hl_asic_funcs {
1343 	int (*early_init)(struct hl_device *hdev);
1344 	int (*early_fini)(struct hl_device *hdev);
1345 	int (*late_init)(struct hl_device *hdev);
1346 	void (*late_fini)(struct hl_device *hdev);
1347 	int (*sw_init)(struct hl_device *hdev);
1348 	int (*sw_fini)(struct hl_device *hdev);
1349 	int (*hw_init)(struct hl_device *hdev);
1350 	void (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
1351 	void (*halt_engines)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
1352 	int (*suspend)(struct hl_device *hdev);
1353 	int (*resume)(struct hl_device *hdev);
1354 	int (*mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
1355 			void *cpu_addr, dma_addr_t dma_addr, size_t size);
1356 	void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
1357 	void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
1358 			struct hl_bd *bd);
1359 	void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
1360 					dma_addr_t *dma_handle, gfp_t flag);
1361 	void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
1362 					void *cpu_addr, dma_addr_t dma_handle);
1363 	int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
1364 	int (*scrub_device_dram)(struct hl_device *hdev, u64 val);
1365 	void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
1366 				dma_addr_t *dma_handle, u16 *queue_len);
1367 	int (*test_queues)(struct hl_device *hdev);
1368 	void* (*asic_dma_pool_zalloc)(struct hl_device *hdev, size_t size,
1369 				gfp_t mem_flags, dma_addr_t *dma_handle);
1370 	void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr,
1371 				dma_addr_t dma_addr);
1372 	void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
1373 				size_t size, dma_addr_t *dma_handle);
1374 	void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
1375 				size_t size, void *vaddr);
1376 	void (*hl_dma_unmap_sgtable)(struct hl_device *hdev,
1377 				struct sg_table *sgt,
1378 				enum dma_data_direction dir);
1379 	int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
1380 	int (*asic_dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
1381 				enum dma_data_direction dir);
1382 	u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
1383 					struct sg_table *sgt);
1384 	void (*add_end_of_cb_packets)(struct hl_device *hdev,
1385 					void *kernel_address, u32 len,
1386 					u64 cq_addr, u32 cq_val, u32 msix_num,
1387 					bool eb);
1388 	void (*update_eq_ci)(struct hl_device *hdev, u32 val);
1389 	int (*context_switch)(struct hl_device *hdev, u32 asid);
1390 	void (*restore_phase_topology)(struct hl_device *hdev);
1391 	int (*debugfs_read_dma)(struct hl_device *hdev, u64 addr, u32 size,
1392 				void *blob_addr);
1393 	void (*add_device_attr)(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
1394 				struct attribute_group *dev_vrm_attr_grp);
1395 	void (*handle_eqe)(struct hl_device *hdev,
1396 				struct hl_eq_entry *eq_entry);
1397 	void* (*get_events_stat)(struct hl_device *hdev, bool aggregate,
1398 				u32 *size);
1399 	u64 (*read_pte)(struct hl_device *hdev, u64 addr);
1400 	void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
1401 	int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
1402 					u32 flags);
1403 	int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
1404 				u32 flags, u32 asid, u64 va, u64 size);
1405 	int (*mmu_prefetch_cache_range)(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size);
1406 	int (*send_heartbeat)(struct hl_device *hdev);
1407 	int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
1408 	bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
1409 					u8 mask_len, struct seq_file *s);
1410 	int (*non_hard_reset_late_init)(struct hl_device *hdev);
1411 	void (*hw_queues_lock)(struct hl_device *hdev);
1412 	void (*hw_queues_unlock)(struct hl_device *hdev);
1413 	u32 (*get_pci_id)(struct hl_device *hdev);
1414 	int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size);
1415 	int (*get_monitor_dump)(struct hl_device *hdev, void *data);
1416 	int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
1417 				u16 len, u32 timeout, u64 *result);
1418 	int (*pci_bars_map)(struct hl_device *hdev);
1419 	int (*init_iatu)(struct hl_device *hdev);
1420 	u32 (*rreg)(struct hl_device *hdev, u32 reg);
1421 	void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
1422 	void (*halt_coresight)(struct hl_device *hdev, struct hl_ctx *ctx);
1423 	int (*ctx_init)(struct hl_ctx *ctx);
1424 	void (*ctx_fini)(struct hl_ctx *ctx);
1425 	u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
1426 	int (*load_firmware_to_device)(struct hl_device *hdev);
1427 	int (*load_boot_fit_to_device)(struct hl_device *hdev);
1428 	u32 (*get_signal_cb_size)(struct hl_device *hdev);
1429 	u32 (*get_wait_cb_size)(struct hl_device *hdev);
1430 	u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
1431 			u32 size, bool eb);
1432 	u32 (*gen_wait_cb)(struct hl_device *hdev,
1433 			struct hl_gen_wait_properties *prop);
1434 	void (*reset_sob)(struct hl_device *hdev, void *data);
1435 	void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
1436 	u64 (*get_device_time)(struct hl_device *hdev);
1437 	int (*collective_wait_init_cs)(struct hl_cs *cs);
1438 	int (*collective_wait_create_jobs)(struct hl_device *hdev,
1439 			struct hl_ctx *ctx, struct hl_cs *cs,
1440 			u32 wait_queue_id, u32 collective_engine_id,
1441 			u32 encaps_signal_offset);
1442 	u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
1443 	u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
1444 	void (*ack_protection_bits_errors)(struct hl_device *hdev);
1445 	int (*get_hw_block_id)(struct hl_device *hdev, u64 block_addr,
1446 				u32 *block_size, u32 *block_id);
1447 	int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
1448 			u32 block_id, u32 block_size);
1449 	void (*enable_events_from_fw)(struct hl_device *hdev);
1450 	void (*get_msi_info)(__le32 *table);
1451 	int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
1452 	void (*init_firmware_loader)(struct hl_device *hdev);
1453 	void (*init_cpu_scrambler_dram)(struct hl_device *hdev);
1454 	void (*state_dump_init)(struct hl_device *hdev);
1455 	u32 (*get_sob_addr)(struct hl_device *hdev, u32 sob_id);
1456 	void (*set_pci_memory_regions)(struct hl_device *hdev);
1457 	u32* (*get_stream_master_qid_arr)(void);
1458 	bool (*is_valid_dram_page_size)(u32 page_size);
1459 	int (*mmu_get_real_page_size)(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
1460 					u32 page_size, u32 *real_page_size, bool is_dram_addr);
1461 	void (*get_valid_dram_page_orders)(struct hl_info_dev_memalloc_page_sizes *info);
1462 	int (*access_dev_mem)(struct hl_device *hdev, struct pci_mem_region *region,
1463 		enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
1464 	u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
1465 };
1466 
1467 
1468 /*
1469  * CONTEXTS
1470  */
1471 
1472 #define HL_KERNEL_ASID_ID	0
1473 
1474 /**
1475  * enum hl_va_range_type - virtual address range type.
1476  * @HL_VA_RANGE_TYPE_HOST: range type of host pages
1477  * @HL_VA_RANGE_TYPE_HOST_HUGE: range type of host huge pages
1478  * @HL_VA_RANGE_TYPE_DRAM: range type of dram pages
1479  */
1480 enum hl_va_range_type {
1481 	HL_VA_RANGE_TYPE_HOST,
1482 	HL_VA_RANGE_TYPE_HOST_HUGE,
1483 	HL_VA_RANGE_TYPE_DRAM,
1484 	HL_VA_RANGE_TYPE_MAX
1485 };
1486 
1487 /**
1488  * struct hl_va_range - virtual addresses range.
1489  * @lock: protects the virtual addresses list.
1490  * @list: list of virtual addresses blocks available for mappings.
1491  * @start_addr: range start address.
1492  * @end_addr: range end address.
1493  * @page_size: page size of this va range.
1494  */
1495 struct hl_va_range {
1496 	struct mutex		lock;
1497 	struct list_head	list;
1498 	u64			start_addr;
1499 	u64			end_addr;
1500 	u32			page_size;
1501 };
1502 
1503 /**
1504  * struct hl_cs_counters_atomic - command submission counters
1505  * @out_of_mem_drop_cnt: dropped due to memory allocation issue
1506  * @parsing_drop_cnt: dropped due to error in packet parsing
1507  * @queue_full_drop_cnt: dropped due to queue full
1508  * @device_in_reset_drop_cnt: dropped due to device in reset
1509  * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
1510  * @validation_drop_cnt: dropped due to error in validation
1511  */
1512 struct hl_cs_counters_atomic {
1513 	atomic64_t out_of_mem_drop_cnt;
1514 	atomic64_t parsing_drop_cnt;
1515 	atomic64_t queue_full_drop_cnt;
1516 	atomic64_t device_in_reset_drop_cnt;
1517 	atomic64_t max_cs_in_flight_drop_cnt;
1518 	atomic64_t validation_drop_cnt;
1519 };
1520 
1521 /**
1522  * struct hl_dmabuf_priv - a dma-buf private object.
1523  * @dmabuf: pointer to dma-buf object.
1524  * @ctx: pointer to the dma-buf owner's context.
1525  * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported for
1526  *                memory allocation handle.
1527  * @device_address: physical address of the device's memory. Relevant only
1528  *                  if phys_pg_pack is NULL (dma-buf was exported from address).
1529  *                  The total size can be taken from the dmabuf object.
1530  */
1531 struct hl_dmabuf_priv {
1532 	struct dma_buf			*dmabuf;
1533 	struct hl_ctx			*ctx;
1534 	struct hl_vm_phys_pg_pack	*phys_pg_pack;
1535 	uint64_t			device_address;
1536 };
1537 
1538 /**
1539  * struct hl_ctx - user/kernel context.
1540  * @mem_hash: holds mapping from virtual address to virtual memory area
1541  *		descriptor (hl_vm_phys_pg_list or hl_userptr).
1542  * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
1543  * @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
1544  * @hdev: pointer to the device structure.
1545  * @refcount: reference counter for the context. Context is released only when
1546  *		this hits 0l. It is incremented on CS and CS_WAIT.
1547  * @cs_pending: array of hl fence objects representing pending CS.
1548  * @va_range: holds available virtual addresses for host and dram mappings.
1549  * @mem_hash_lock: protects the mem_hash.
1550  * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
1551  *            MMU hash or walking the PGT requires talking this lock.
1552  * @hw_block_list_lock: protects the HW block memory list.
1553  * @debugfs_list: node in debugfs list of contexts.
1554  * @hw_block_mem_list: list of HW block virtual mapped addresses.
1555  * @cs_counters: context command submission counters.
1556  * @cb_va_pool: device VA pool for command buffers which are mapped to the
1557  *              device's MMU.
1558  * @sig_mgr: encaps signals handle manager.
1559  * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
1560  *			to user so user could inquire about CS. It is used as
1561  *			index to cs_pending array.
1562  * @dram_default_hops: array that holds all hops addresses needed for default
1563  *                     DRAM mapping.
1564  * @cs_lock: spinlock to protect cs_sequence.
1565  * @dram_phys_mem: amount of used physical DRAM memory by this context.
1566  * @thread_ctx_switch_token: token to prevent multiple threads of the same
1567  *				context	from running the context switch phase.
1568  *				Only a single thread should run it.
1569  * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
1570  *				the context switch phase from moving to their
1571  *				execution phase before the context switch phase
1572  *				has finished.
1573  * @asid: context's unique address space ID in the device's MMU.
1574  * @handle: context's opaque handle for user
1575  */
1576 struct hl_ctx {
1577 	DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
1578 	DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
1579 	struct hl_fpriv			*hpriv;
1580 	struct hl_device		*hdev;
1581 	struct kref			refcount;
1582 	struct hl_fence			**cs_pending;
1583 	struct hl_va_range		*va_range[HL_VA_RANGE_TYPE_MAX];
1584 	struct mutex			mem_hash_lock;
1585 	struct mutex			mmu_lock;
1586 	struct mutex			hw_block_list_lock;
1587 	struct list_head		debugfs_list;
1588 	struct list_head		hw_block_mem_list;
1589 	struct hl_cs_counters_atomic	cs_counters;
1590 	struct gen_pool			*cb_va_pool;
1591 	struct hl_encaps_signals_mgr	sig_mgr;
1592 	u64				cs_sequence;
1593 	u64				*dram_default_hops;
1594 	spinlock_t			cs_lock;
1595 	atomic64_t			dram_phys_mem;
1596 	atomic_t			thread_ctx_switch_token;
1597 	u32				thread_ctx_switch_wait_token;
1598 	u32				asid;
1599 	u32				handle;
1600 };
1601 
1602 /**
1603  * struct hl_ctx_mgr - for handling multiple contexts.
1604  * @ctx_lock: protects ctx_handles.
1605  * @ctx_handles: idr to hold all ctx handles.
1606  */
1607 struct hl_ctx_mgr {
1608 	struct mutex		ctx_lock;
1609 	struct idr		ctx_handles;
1610 };
1611 
1612 
1613 
1614 /*
1615  * COMMAND SUBMISSIONS
1616  */
1617 
1618 /**
1619  * struct hl_userptr - memory mapping chunk information
1620  * @vm_type: type of the VM.
1621  * @job_node: linked-list node for hanging the object on the Job's list.
1622  * @pages: pointer to struct page array
1623  * @npages: size of @pages array
1624  * @sgt: pointer to the scatter-gather table that holds the pages.
1625  * @dir: for DMA unmapping, the direction must be supplied, so save it.
1626  * @debugfs_list: node in debugfs list of command submissions.
1627  * @pid: the pid of the user process owning the memory
1628  * @addr: user-space virtual address of the start of the memory area.
1629  * @size: size of the memory area to pin & map.
1630  * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
1631  */
1632 struct hl_userptr {
1633 	enum vm_type		vm_type; /* must be first */
1634 	struct list_head	job_node;
1635 	struct page		**pages;
1636 	unsigned int		npages;
1637 	struct sg_table		*sgt;
1638 	enum dma_data_direction dir;
1639 	struct list_head	debugfs_list;
1640 	pid_t			pid;
1641 	u64			addr;
1642 	u64			size;
1643 	u8			dma_mapped;
1644 };
1645 
1646 /**
1647  * struct hl_cs - command submission.
1648  * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
1649  * @ctx: the context this CS belongs to.
1650  * @job_list: list of the CS's jobs in the various queues.
1651  * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
1652  * @refcount: reference counter for usage of the CS.
1653  * @fence: pointer to the fence object of this CS.
1654  * @signal_fence: pointer to the fence object of the signal CS (used by wait
1655  *                CS only).
1656  * @finish_work: workqueue object to run when CS is completed by H/W.
1657  * @work_tdr: delayed work node for TDR.
1658  * @mirror_node : node in device mirror list of command submissions.
1659  * @staged_cs_node: node in the staged cs list.
1660  * @debugfs_list: node in debugfs list of command submissions.
1661  * @encaps_sig_hdl: holds the encaps signals handle.
1662  * @sequence: the sequence number of this CS.
1663  * @staged_sequence: the sequence of the staged submission this CS is part of,
1664  *                   relevant only if staged_cs is set.
1665  * @timeout_jiffies: cs timeout in jiffies.
1666  * @submission_time_jiffies: submission time of the cs
1667  * @type: CS_TYPE_*.
1668  * @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
1669  * @sob_addr_offset: sob offset from the configuration base address.
1670  * @initial_sob_count: count of completed signals in SOB before current submission of signal or
1671  *                     cs with encaps signals.
1672  * @submitted: true if CS was submitted to H/W.
1673  * @completed: true if CS was completed by device.
1674  * @timedout : true if CS was timedout.
1675  * @tdr_active: true if TDR was activated for this CS (to prevent
1676  *		double TDR activation).
1677  * @aborted: true if CS was aborted due to some device error.
1678  * @timestamp: true if a timestmap must be captured upon completion.
1679  * @staged_last: true if this is the last staged CS and needs completion.
1680  * @staged_first: true if this is the first staged CS and we need to receive
1681  *                timeout for this CS.
1682  * @staged_cs: true if this CS is part of a staged submission.
1683  * @skip_reset_on_timeout: true if we shall not reset the device in case
1684  *                         timeout occurs (debug scenario).
1685  * @encaps_signals: true if this CS has encaps reserved signals.
1686  */
1687 struct hl_cs {
1688 	u16			*jobs_in_queue_cnt;
1689 	struct hl_ctx		*ctx;
1690 	struct list_head	job_list;
1691 	spinlock_t		job_lock;
1692 	struct kref		refcount;
1693 	struct hl_fence		*fence;
1694 	struct hl_fence		*signal_fence;
1695 	struct work_struct	finish_work;
1696 	struct delayed_work	work_tdr;
1697 	struct list_head	mirror_node;
1698 	struct list_head	staged_cs_node;
1699 	struct list_head	debugfs_list;
1700 	struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
1701 	u64			sequence;
1702 	u64			staged_sequence;
1703 	u64			timeout_jiffies;
1704 	u64			submission_time_jiffies;
1705 	enum hl_cs_type		type;
1706 	u32			encaps_sig_hdl_id;
1707 	u32			sob_addr_offset;
1708 	u16			initial_sob_count;
1709 	u8			submitted;
1710 	u8			completed;
1711 	u8			timedout;
1712 	u8			tdr_active;
1713 	u8			aborted;
1714 	u8			timestamp;
1715 	u8			staged_last;
1716 	u8			staged_first;
1717 	u8			staged_cs;
1718 	u8			skip_reset_on_timeout;
1719 	u8			encaps_signals;
1720 };
1721 
1722 /**
1723  * struct hl_cs_job - command submission job.
1724  * @cs_node: the node to hang on the CS jobs list.
1725  * @cs: the CS this job belongs to.
1726  * @user_cb: the CB we got from the user.
1727  * @patched_cb: in case of patching, this is internal CB which is submitted on
1728  *		the queue instead of the CB we got from the IOCTL.
1729  * @finish_work: workqueue object to run when job is completed.
1730  * @userptr_list: linked-list of userptr mappings that belong to this job and
1731  *			wait for completion.
1732  * @debugfs_list: node in debugfs list of command submission jobs.
1733  * @refcount: reference counter for usage of the CS job.
1734  * @queue_type: the type of the H/W queue this job is submitted to.
1735  * @id: the id of this job inside a CS.
1736  * @hw_queue_id: the id of the H/W queue this job is submitted to.
1737  * @user_cb_size: the actual size of the CB we got from the user.
1738  * @job_cb_size: the actual size of the CB that we put on the queue.
1739  * @encaps_sig_wait_offset: encapsulated signals offset, which allow user
1740  *                          to wait on part of the reserved signals.
1741  * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
1742  *                          handle to a kernel-allocated CB object, false
1743  *                          otherwise (SRAM/DRAM/host address).
1744  * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
1745  *                    info is needed later, when adding the 2xMSG_PROT at the
1746  *                    end of the JOB, to know which barriers to put in the
1747  *                    MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
1748  *                    have streams so the engine can't be busy by another
1749  *                    stream.
1750  */
1751 struct hl_cs_job {
1752 	struct list_head	cs_node;
1753 	struct hl_cs		*cs;
1754 	struct hl_cb		*user_cb;
1755 	struct hl_cb		*patched_cb;
1756 	struct work_struct	finish_work;
1757 	struct list_head	userptr_list;
1758 	struct list_head	debugfs_list;
1759 	struct kref		refcount;
1760 	enum hl_queue_type	queue_type;
1761 	u32			id;
1762 	u32			hw_queue_id;
1763 	u32			user_cb_size;
1764 	u32			job_cb_size;
1765 	u32			encaps_sig_wait_offset;
1766 	u8			is_kernel_allocated_cb;
1767 	u8			contains_dma_pkt;
1768 };
1769 
1770 /**
1771  * struct hl_cs_parser - command submission parser properties.
1772  * @user_cb: the CB we got from the user.
1773  * @patched_cb: in case of patching, this is internal CB which is submitted on
1774  *		the queue instead of the CB we got from the IOCTL.
1775  * @job_userptr_list: linked-list of userptr mappings that belong to the related
1776  *			job and wait for completion.
1777  * @cs_sequence: the sequence number of the related CS.
1778  * @queue_type: the type of the H/W queue this job is submitted to.
1779  * @ctx_id: the ID of the context the related CS belongs to.
1780  * @hw_queue_id: the id of the H/W queue this job is submitted to.
1781  * @user_cb_size: the actual size of the CB we got from the user.
1782  * @patched_cb_size: the size of the CB after parsing.
1783  * @job_id: the id of the related job inside the related CS.
1784  * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
1785  *                          handle to a kernel-allocated CB object, false
1786  *                          otherwise (SRAM/DRAM/host address).
1787  * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
1788  *                    info is needed later, when adding the 2xMSG_PROT at the
1789  *                    end of the JOB, to know which barriers to put in the
1790  *                    MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
1791  *                    have streams so the engine can't be busy by another
1792  *                    stream.
1793  * @completion: true if we need completion for this CS.
1794  */
1795 struct hl_cs_parser {
1796 	struct hl_cb		*user_cb;
1797 	struct hl_cb		*patched_cb;
1798 	struct list_head	*job_userptr_list;
1799 	u64			cs_sequence;
1800 	enum hl_queue_type	queue_type;
1801 	u32			ctx_id;
1802 	u32			hw_queue_id;
1803 	u32			user_cb_size;
1804 	u32			patched_cb_size;
1805 	u8			job_id;
1806 	u8			is_kernel_allocated_cb;
1807 	u8			contains_dma_pkt;
1808 	u8			completion;
1809 };
1810 
1811 /*
1812  * MEMORY STRUCTURE
1813  */
1814 
1815 /**
1816  * struct hl_vm_hash_node - hash element from virtual address to virtual
1817  *				memory area descriptor (hl_vm_phys_pg_list or
1818  *				hl_userptr).
1819  * @node: node to hang on the hash table in context object.
1820  * @vaddr: key virtual address.
1821  * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
1822  */
1823 struct hl_vm_hash_node {
1824 	struct hlist_node	node;
1825 	u64			vaddr;
1826 	void			*ptr;
1827 };
1828 
1829 /**
1830  * struct hl_vm_hw_block_list_node - list element from user virtual address to
1831  *				HW block id.
1832  * @node: node to hang on the list in context object.
1833  * @ctx: the context this node belongs to.
1834  * @vaddr: virtual address of the HW block.
1835  * @size: size of the block.
1836  * @id: HW block id (handle).
1837  */
1838 struct hl_vm_hw_block_list_node {
1839 	struct list_head	node;
1840 	struct hl_ctx		*ctx;
1841 	unsigned long		vaddr;
1842 	u32			size;
1843 	u32			id;
1844 };
1845 
1846 /**
1847  * struct hl_vm_phys_pg_pack - physical page pack.
1848  * @vm_type: describes the type of the virtual area descriptor.
1849  * @pages: the physical page array.
1850  * @npages: num physical pages in the pack.
1851  * @total_size: total size of all the pages in this list.
1852  * @node: used to attach to deletion list that is used when all the allocations are cleared
1853  *        at the teardown of the context.
1854  * @mapping_cnt: number of shared mappings.
1855  * @exporting_cnt: number of dma-buf exporting.
1856  * @asid: the context related to this list.
1857  * @page_size: size of each page in the pack.
1858  * @flags: HL_MEM_* flags related to this list.
1859  * @handle: the provided handle related to this list.
1860  * @offset: offset from the first page.
1861  * @contiguous: is contiguous physical memory.
1862  * @created_from_userptr: is product of host virtual address.
1863  */
1864 struct hl_vm_phys_pg_pack {
1865 	enum vm_type		vm_type; /* must be first */
1866 	u64			*pages;
1867 	u64			npages;
1868 	u64			total_size;
1869 	struct list_head	node;
1870 	atomic_t		mapping_cnt;
1871 	u32			exporting_cnt;
1872 	u32			asid;
1873 	u32			page_size;
1874 	u32			flags;
1875 	u32			handle;
1876 	u32			offset;
1877 	u8			contiguous;
1878 	u8			created_from_userptr;
1879 };
1880 
1881 /**
1882  * struct hl_vm_va_block - virtual range block information.
1883  * @node: node to hang on the virtual range list in context object.
1884  * @start: virtual range start address.
1885  * @end: virtual range end address.
1886  * @size: virtual range size.
1887  */
1888 struct hl_vm_va_block {
1889 	struct list_head	node;
1890 	u64			start;
1891 	u64			end;
1892 	u64			size;
1893 };
1894 
1895 /**
1896  * struct hl_vm - virtual memory manager for MMU.
1897  * @dram_pg_pool: pool for DRAM physical pages of 2MB.
1898  * @dram_pg_pool_refcount: reference counter for the pool usage.
1899  * @idr_lock: protects the phys_pg_list_handles.
1900  * @phys_pg_pack_handles: idr to hold all device allocations handles.
1901  * @init_done: whether initialization was done. We need this because VM
1902  *		initialization might be skipped during device initialization.
1903  */
1904 struct hl_vm {
1905 	struct gen_pool		*dram_pg_pool;
1906 	struct kref		dram_pg_pool_refcount;
1907 	spinlock_t		idr_lock;
1908 	struct idr		phys_pg_pack_handles;
1909 	u8			init_done;
1910 };
1911 
1912 
1913 /*
1914  * DEBUG, PROFILING STRUCTURE
1915  */
1916 
1917 /**
1918  * struct hl_debug_params - Coresight debug parameters.
1919  * @input: pointer to component specific input parameters.
1920  * @output: pointer to component specific output parameters.
1921  * @output_size: size of output buffer.
1922  * @reg_idx: relevant register ID.
1923  * @op: component operation to execute.
1924  * @enable: true if to enable component debugging, false otherwise.
1925  */
1926 struct hl_debug_params {
1927 	void *input;
1928 	void *output;
1929 	u32 output_size;
1930 	u32 reg_idx;
1931 	u32 op;
1932 	bool enable;
1933 };
1934 
1935 /**
1936  * struct hl_notifier_event - holds the notifier data structure
1937  * @eventfd: the event file descriptor to raise the notifications
1938  * @lock: mutex lock to protect the notifier data flows
1939  * @events_mask: indicates the bitmap events
1940  */
1941 struct hl_notifier_event {
1942 	struct eventfd_ctx	*eventfd;
1943 	struct mutex		lock;
1944 	u64			events_mask;
1945 };
1946 
1947 /*
1948  * FILE PRIVATE STRUCTURE
1949  */
1950 
1951 /**
1952  * struct hl_fpriv - process information stored in FD private data.
1953  * @hdev: habanalabs device structure.
1954  * @filp: pointer to the given file structure.
1955  * @taskpid: current process ID.
1956  * @ctx: current executing context. TODO: remove for multiple ctx per process
1957  * @ctx_mgr: context manager to handle multiple context for this FD.
1958  * @mem_mgr: manager descriptor for memory exportable via mmap
1959  * @notifier_event: notifier eventfd towards user process
1960  * @debugfs_list: list of relevant ASIC debugfs.
1961  * @dev_node: node in the device list of file private data
1962  * @refcount: number of related contexts.
1963  * @restore_phase_mutex: lock for context switch and restore phase.
1964  */
1965 struct hl_fpriv {
1966 	struct hl_device		*hdev;
1967 	struct file			*filp;
1968 	struct pid			*taskpid;
1969 	struct hl_ctx			*ctx;
1970 	struct hl_ctx_mgr		ctx_mgr;
1971 	struct hl_mem_mgr		mem_mgr;
1972 	struct hl_notifier_event	notifier_event;
1973 	struct list_head		debugfs_list;
1974 	struct list_head		dev_node;
1975 	struct kref			refcount;
1976 	struct mutex			restore_phase_mutex;
1977 };
1978 
1979 
1980 /*
1981  * DebugFS
1982  */
1983 
1984 /**
1985  * struct hl_info_list - debugfs file ops.
1986  * @name: file name.
1987  * @show: function to output information.
1988  * @write: function to write to the file.
1989  */
1990 struct hl_info_list {
1991 	const char	*name;
1992 	int		(*show)(struct seq_file *s, void *data);
1993 	ssize_t		(*write)(struct file *file, const char __user *buf,
1994 				size_t count, loff_t *f_pos);
1995 };
1996 
1997 /**
1998  * struct hl_debugfs_entry - debugfs dentry wrapper.
1999  * @info_ent: dentry realted ops.
2000  * @dev_entry: ASIC specific debugfs manager.
2001  */
2002 struct hl_debugfs_entry {
2003 	const struct hl_info_list	*info_ent;
2004 	struct hl_dbg_device_entry	*dev_entry;
2005 };
2006 
2007 /**
2008  * struct hl_dbg_device_entry - ASIC specific debugfs manager.
2009  * @root: root dentry.
2010  * @hdev: habanalabs device structure.
2011  * @entry_arr: array of available hl_debugfs_entry.
2012  * @file_list: list of available debugfs files.
2013  * @file_mutex: protects file_list.
2014  * @cb_list: list of available CBs.
2015  * @cb_spinlock: protects cb_list.
2016  * @cs_list: list of available CSs.
2017  * @cs_spinlock: protects cs_list.
2018  * @cs_job_list: list of available CB jobs.
2019  * @cs_job_spinlock: protects cs_job_list.
2020  * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
2021  * @userptr_spinlock: protects userptr_list.
2022  * @ctx_mem_hash_list: list of available contexts with MMU mappings.
2023  * @ctx_mem_hash_spinlock: protects cb_list.
2024  * @data_dma_blob_desc: data DMA descriptor of blob.
2025  * @mon_dump_blob_desc: monitor dump descriptor of blob.
2026  * @state_dump: data of the system states in case of a bad cs.
2027  * @state_dump_sem: protects state_dump.
2028  * @addr: next address to read/write from/to in read/write32.
2029  * @mmu_addr: next virtual address to translate to physical address in mmu_show.
2030  * @userptr_lookup: the target user ptr to look up for on demand.
2031  * @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram
2032  * @mmu_asid: ASID to use while translating in mmu_show.
2033  * @state_dump_head: index of the latest state dump
2034  * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
2035  * @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
2036  * @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
2037  * @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
2038  */
2039 struct hl_dbg_device_entry {
2040 	struct dentry			*root;
2041 	struct hl_device		*hdev;
2042 	struct hl_debugfs_entry		*entry_arr;
2043 	struct list_head		file_list;
2044 	struct mutex			file_mutex;
2045 	struct list_head		cb_list;
2046 	spinlock_t			cb_spinlock;
2047 	struct list_head		cs_list;
2048 	spinlock_t			cs_spinlock;
2049 	struct list_head		cs_job_list;
2050 	spinlock_t			cs_job_spinlock;
2051 	struct list_head		userptr_list;
2052 	spinlock_t			userptr_spinlock;
2053 	struct list_head		ctx_mem_hash_list;
2054 	spinlock_t			ctx_mem_hash_spinlock;
2055 	struct debugfs_blob_wrapper	data_dma_blob_desc;
2056 	struct debugfs_blob_wrapper	mon_dump_blob_desc;
2057 	char				*state_dump[HL_STATE_DUMP_HIST_LEN];
2058 	struct rw_semaphore		state_dump_sem;
2059 	u64				addr;
2060 	u64				mmu_addr;
2061 	u64				userptr_lookup;
2062 	u64				memory_scrub_val;
2063 	u32				mmu_asid;
2064 	u32				state_dump_head;
2065 	u8				i2c_bus;
2066 	u8				i2c_addr;
2067 	u8				i2c_reg;
2068 	u8				i2c_len;
2069 };
2070 
2071 /**
2072  * struct hl_hw_obj_name_entry - single hw object name, member of
2073  * hl_state_dump_specs
2074  * @node: link to the containing hash table
2075  * @name: hw object name
2076  * @id: object identifier
2077  */
2078 struct hl_hw_obj_name_entry {
2079 	struct hlist_node	node;
2080 	const char		*name;
2081 	u32			id;
2082 };
2083 
2084 enum hl_state_dump_specs_props {
2085 	SP_SYNC_OBJ_BASE_ADDR,
2086 	SP_NEXT_SYNC_OBJ_ADDR,
2087 	SP_SYNC_OBJ_AMOUNT,
2088 	SP_MON_OBJ_WR_ADDR_LOW,
2089 	SP_MON_OBJ_WR_ADDR_HIGH,
2090 	SP_MON_OBJ_WR_DATA,
2091 	SP_MON_OBJ_ARM_DATA,
2092 	SP_MON_OBJ_STATUS,
2093 	SP_MONITORS_AMOUNT,
2094 	SP_TPC0_CMDQ,
2095 	SP_TPC0_CFG_SO,
2096 	SP_NEXT_TPC,
2097 	SP_MME_CMDQ,
2098 	SP_MME_CFG_SO,
2099 	SP_NEXT_MME,
2100 	SP_DMA_CMDQ,
2101 	SP_DMA_CFG_SO,
2102 	SP_DMA_QUEUES_OFFSET,
2103 	SP_NUM_OF_MME_ENGINES,
2104 	SP_SUB_MME_ENG_NUM,
2105 	SP_NUM_OF_DMA_ENGINES,
2106 	SP_NUM_OF_TPC_ENGINES,
2107 	SP_ENGINE_NUM_OF_QUEUES,
2108 	SP_ENGINE_NUM_OF_STREAMS,
2109 	SP_ENGINE_NUM_OF_FENCES,
2110 	SP_FENCE0_CNT_OFFSET,
2111 	SP_FENCE0_RDATA_OFFSET,
2112 	SP_CP_STS_OFFSET,
2113 	SP_NUM_CORES,
2114 
2115 	SP_MAX
2116 };
2117 
2118 enum hl_sync_engine_type {
2119 	ENGINE_TPC,
2120 	ENGINE_DMA,
2121 	ENGINE_MME,
2122 };
2123 
2124 /**
2125  * struct hl_mon_state_dump - represents a state dump of a single monitor
2126  * @id: monitor id
2127  * @wr_addr_low: address monitor will write to, low bits
2128  * @wr_addr_high: address monitor will write to, high bits
2129  * @wr_data: data monitor will write
2130  * @arm_data: register value containing monitor configuration
2131  * @status: monitor status
2132  */
2133 struct hl_mon_state_dump {
2134 	u32		id;
2135 	u32		wr_addr_low;
2136 	u32		wr_addr_high;
2137 	u32		wr_data;
2138 	u32		arm_data;
2139 	u32		status;
2140 };
2141 
2142 /**
2143  * struct hl_sync_to_engine_map_entry - sync object id to engine mapping entry
2144  * @engine_type: type of the engine
2145  * @engine_id: id of the engine
2146  * @sync_id: id of the sync object
2147  */
2148 struct hl_sync_to_engine_map_entry {
2149 	struct hlist_node		node;
2150 	enum hl_sync_engine_type	engine_type;
2151 	u32				engine_id;
2152 	u32				sync_id;
2153 };
2154 
2155 /**
2156  * struct hl_sync_to_engine_map - maps sync object id to associated engine id
2157  * @tb: hash table containing the mapping, each element is of type
2158  *      struct hl_sync_to_engine_map_entry
2159  */
2160 struct hl_sync_to_engine_map {
2161 	DECLARE_HASHTABLE(tb, SYNC_TO_ENGINE_HASH_TABLE_BITS);
2162 };
2163 
2164 /**
2165  * struct hl_state_dump_specs_funcs - virtual functions used by the state dump
2166  * @gen_sync_to_engine_map: generate a hash map from sync obj id to its engine
2167  * @print_single_monitor: format monitor data as string
2168  * @monitor_valid: return true if given monitor dump is valid
2169  * @print_fences_single_engine: format fences data as string
2170  */
2171 struct hl_state_dump_specs_funcs {
2172 	int (*gen_sync_to_engine_map)(struct hl_device *hdev,
2173 				struct hl_sync_to_engine_map *map);
2174 	int (*print_single_monitor)(char **buf, size_t *size, size_t *offset,
2175 				    struct hl_device *hdev,
2176 				    struct hl_mon_state_dump *mon);
2177 	int (*monitor_valid)(struct hl_mon_state_dump *mon);
2178 	int (*print_fences_single_engine)(struct hl_device *hdev,
2179 					u64 base_offset,
2180 					u64 status_base_offset,
2181 					enum hl_sync_engine_type engine_type,
2182 					u32 engine_id, char **buf,
2183 					size_t *size, size_t *offset);
2184 };
2185 
2186 /**
2187  * struct hl_state_dump_specs - defines ASIC known hw objects names
2188  * @so_id_to_str_tb: sync objects names index table
2189  * @monitor_id_to_str_tb: monitors names index table
2190  * @funcs: virtual functions used for state dump
2191  * @sync_namager_names: readable names for sync manager if available (ex: N_E)
2192  * @props: pointer to a per asic const props array required for state dump
2193  */
2194 struct hl_state_dump_specs {
2195 	DECLARE_HASHTABLE(so_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
2196 	DECLARE_HASHTABLE(monitor_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
2197 	struct hl_state_dump_specs_funcs	funcs;
2198 	const char * const			*sync_namager_names;
2199 	s64					*props;
2200 };
2201 
2202 
2203 /*
2204  * DEVICES
2205  */
2206 
2207 #define HL_STR_MAX	32
2208 
2209 #define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
2210 
2211 /* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
2212  * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
2213  */
2214 #define HL_MAX_MINORS	256
2215 
2216 /*
2217  * Registers read & write functions.
2218  */
2219 
2220 u32 hl_rreg(struct hl_device *hdev, u32 reg);
2221 void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
2222 
2223 #define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg))
2224 #define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v))
2225 #define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n",	\
2226 			hdev->asic_funcs->rreg(hdev, (reg)))
2227 
2228 #define WREG32_P(reg, val, mask)				\
2229 	do {							\
2230 		u32 tmp_ = RREG32(reg);				\
2231 		tmp_ &= (mask);					\
2232 		tmp_ |= ((val) & ~(mask));			\
2233 		WREG32(reg, tmp_);				\
2234 	} while (0)
2235 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2236 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2237 
2238 #define RMWREG32(reg, val, mask)				\
2239 	do {							\
2240 		u32 tmp_ = RREG32(reg);				\
2241 		tmp_ &= ~(mask);				\
2242 		tmp_ |= ((val) << __ffs(mask));			\
2243 		WREG32(reg, tmp_);				\
2244 	} while (0)
2245 
2246 #define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask))
2247 
2248 #define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
2249 #define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
2250 #define WREG32_FIELD(reg, offset, field, val)	\
2251 	WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \
2252 				~REG_FIELD_MASK(reg, field)) | \
2253 				(val) << REG_FIELD_SHIFT(reg, field))
2254 
2255 /* Timeout should be longer when working with simulator but cap the
2256  * increased timeout to some maximum
2257  */
2258 #define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
2259 ({ \
2260 	ktime_t __timeout; \
2261 	if (hdev->pdev) \
2262 		__timeout = ktime_add_us(ktime_get(), timeout_us); \
2263 	else \
2264 		__timeout = ktime_add_us(ktime_get(),\
2265 				min((u64)(timeout_us * 10), \
2266 					(u64) HL_SIM_MAX_TIMEOUT_US)); \
2267 	might_sleep_if(sleep_us); \
2268 	for (;;) { \
2269 		(val) = RREG32(addr); \
2270 		if (cond) \
2271 			break; \
2272 		if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
2273 			(val) = RREG32(addr); \
2274 			break; \
2275 		} \
2276 		if (sleep_us) \
2277 			usleep_range((sleep_us >> 2) + 1, sleep_us); \
2278 	} \
2279 	(cond) ? 0 : -ETIMEDOUT; \
2280 })
2281 
2282 /*
2283  * address in this macro points always to a memory location in the
2284  * host's (server's) memory. That location is updated asynchronously
2285  * either by the direct access of the device or by another core.
2286  *
2287  * To work both in LE and BE architectures, we need to distinguish between the
2288  * two states (device or another core updates the memory location). Therefore,
2289  * if mem_written_by_device is true, the host memory being polled will be
2290  * updated directly by the device. If false, the host memory being polled will
2291  * be updated by host CPU. Required so host knows whether or not the memory
2292  * might need to be byte-swapped before returning value to caller.
2293  */
2294 #define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
2295 				mem_written_by_device) \
2296 ({ \
2297 	ktime_t __timeout; \
2298 	if (hdev->pdev) \
2299 		__timeout = ktime_add_us(ktime_get(), timeout_us); \
2300 	else \
2301 		__timeout = ktime_add_us(ktime_get(),\
2302 				min((u64)(timeout_us * 10), \
2303 					(u64) HL_SIM_MAX_TIMEOUT_US)); \
2304 	might_sleep_if(sleep_us); \
2305 	for (;;) { \
2306 		/* Verify we read updates done by other cores or by device */ \
2307 		mb(); \
2308 		(val) = *((u32 *)(addr)); \
2309 		if (mem_written_by_device) \
2310 			(val) = le32_to_cpu(*(__le32 *) &(val)); \
2311 		if (cond) \
2312 			break; \
2313 		if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
2314 			(val) = *((u32 *)(addr)); \
2315 			if (mem_written_by_device) \
2316 				(val) = le32_to_cpu(*(__le32 *) &(val)); \
2317 			break; \
2318 		} \
2319 		if (sleep_us) \
2320 			usleep_range((sleep_us >> 2) + 1, sleep_us); \
2321 	} \
2322 	(cond) ? 0 : -ETIMEDOUT; \
2323 })
2324 
2325 #define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
2326 					timeout_us) \
2327 ({ \
2328 	ktime_t __timeout; \
2329 	if (hdev->pdev) \
2330 		__timeout = ktime_add_us(ktime_get(), timeout_us); \
2331 	else \
2332 		__timeout = ktime_add_us(ktime_get(),\
2333 				min((u64)(timeout_us * 10), \
2334 					(u64) HL_SIM_MAX_TIMEOUT_US)); \
2335 	might_sleep_if(sleep_us); \
2336 	for (;;) { \
2337 		(val) = readl(addr); \
2338 		if (cond) \
2339 			break; \
2340 		if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
2341 			(val) = readl(addr); \
2342 			break; \
2343 		} \
2344 		if (sleep_us) \
2345 			usleep_range((sleep_us >> 2) + 1, sleep_us); \
2346 	} \
2347 	(cond) ? 0 : -ETIMEDOUT; \
2348 })
2349 
2350 struct hwmon_chip_info;
2351 
2352 /**
2353  * struct hl_device_reset_work - reset workqueue task wrapper.
2354  * @wq: work queue for device reset procedure.
2355  * @reset_work: reset work to be done.
2356  * @hdev: habanalabs device structure.
2357  * @flags: reset flags.
2358  */
2359 struct hl_device_reset_work {
2360 	struct workqueue_struct		*wq;
2361 	struct delayed_work		reset_work;
2362 	struct hl_device		*hdev;
2363 	u32				flags;
2364 };
2365 
2366 /**
2367  * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop
2368  * information.
2369  * @virt_addr: the virtual address of the hop.
2370  * @phys-addr: the physical address of the hop (used by the device-mmu).
2371  * @shadow_addr: The shadow of the hop used by the driver for walking the hops.
2372  */
2373 struct hr_mmu_hop_addrs {
2374 	u64 virt_addr;
2375 	u64 phys_addr;
2376 	u64 shadow_addr;
2377 };
2378 
2379 /**
2380  * struct hl_mmu_hr_pgt_priv - used for holding per-device mmu host-resident
2381  * page-table internal information.
2382  * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
2383  * @mmu_shadow_hop0: shadow array of hop0 tables.
2384  */
2385 struct hl_mmu_hr_priv {
2386 	struct gen_pool *mmu_pgt_pool;
2387 	struct hr_mmu_hop_addrs *mmu_shadow_hop0;
2388 };
2389 
2390 /**
2391  * struct hl_mmu_dr_pgt_priv - used for holding per-device mmu device-resident
2392  * page-table internal information.
2393  * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
2394  * @mmu_shadow_hop0: shadow array of hop0 tables.
2395  */
2396 struct hl_mmu_dr_priv {
2397 	struct gen_pool *mmu_pgt_pool;
2398 	void *mmu_shadow_hop0;
2399 };
2400 
2401 /**
2402  * struct hl_mmu_priv - used for holding per-device mmu internal information.
2403  * @dr: information on the device-resident MMU, when exists.
2404  * @hr: information on the host-resident MMU, when exists.
2405  */
2406 struct hl_mmu_priv {
2407 	struct hl_mmu_dr_priv dr;
2408 	struct hl_mmu_hr_priv hr;
2409 };
2410 
2411 /**
2412  * struct hl_mmu_per_hop_info - A structure describing one TLB HOP and its entry
2413  *                that was created in order to translate a virtual address to a
2414  *                physical one.
2415  * @hop_addr: The address of the hop.
2416  * @hop_pte_addr: The address of the hop entry.
2417  * @hop_pte_val: The value in the hop entry.
2418  */
2419 struct hl_mmu_per_hop_info {
2420 	u64 hop_addr;
2421 	u64 hop_pte_addr;
2422 	u64 hop_pte_val;
2423 };
2424 
2425 /**
2426  * struct hl_mmu_hop_info - A structure describing the TLB hops and their
2427  * hop-entries that were created in order to translate a virtual address to a
2428  * physical one.
2429  * @scrambled_vaddr: The value of the virtual address after scrambling. This
2430  *                   address replaces the original virtual-address when mapped
2431  *                   in the MMU tables.
2432  * @unscrambled_paddr: The un-scrambled physical address.
2433  * @hop_info: Array holding the per-hop information used for the translation.
2434  * @used_hops: The number of hops used for the translation.
2435  * @range_type: virtual address range type.
2436  */
2437 struct hl_mmu_hop_info {
2438 	u64 scrambled_vaddr;
2439 	u64 unscrambled_paddr;
2440 	struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS];
2441 	u32 used_hops;
2442 	enum hl_va_range_type range_type;
2443 };
2444 
2445 /**
2446  * struct hl_mmu_funcs - Device related MMU functions.
2447  * @init: initialize the MMU module.
2448  * @fini: release the MMU module.
2449  * @ctx_init: Initialize a context for using the MMU module.
2450  * @ctx_fini: disable a ctx from using the mmu module.
2451  * @map: maps a virtual address to physical address for a context.
2452  * @unmap: unmap a virtual address of a context.
2453  * @flush: flush all writes from all cores to reach device MMU.
2454  * @swap_out: marks all mapping of the given context as swapped out.
2455  * @swap_in: marks all mapping of the given context as swapped in.
2456  * @get_tlb_info: returns the list of hops and hop-entries used that were
2457  *                created in order to translate the giver virtual address to a
2458  *                physical one.
2459  */
2460 struct hl_mmu_funcs {
2461 	int (*init)(struct hl_device *hdev);
2462 	void (*fini)(struct hl_device *hdev);
2463 	int (*ctx_init)(struct hl_ctx *ctx);
2464 	void (*ctx_fini)(struct hl_ctx *ctx);
2465 	int (*map)(struct hl_ctx *ctx,
2466 			u64 virt_addr, u64 phys_addr, u32 page_size,
2467 			bool is_dram_addr);
2468 	int (*unmap)(struct hl_ctx *ctx,
2469 			u64 virt_addr, bool is_dram_addr);
2470 	void (*flush)(struct hl_ctx *ctx);
2471 	void (*swap_out)(struct hl_ctx *ctx);
2472 	void (*swap_in)(struct hl_ctx *ctx);
2473 	int (*get_tlb_info)(struct hl_ctx *ctx,
2474 			u64 virt_addr, struct hl_mmu_hop_info *hops);
2475 };
2476 
2477 /**
2478  * struct hl_prefetch_work - prefetch work structure handler
2479  * @pf_work: actual work struct.
2480  * @ctx: compute context.
2481  * @va: virtual address to pre-fetch.
2482  * @size: pre-fetch size.
2483  * @flags: operation flags.
2484  * @asid: ASID for maintenance operation.
2485  */
2486 struct hl_prefetch_work {
2487 	struct work_struct	pf_work;
2488 	struct hl_ctx		*ctx;
2489 	u64			va;
2490 	u64			size;
2491 	u32			flags;
2492 	u32			asid;
2493 };
2494 
2495 /*
2496  * number of user contexts allowed to call wait_for_multi_cs ioctl in
2497  * parallel
2498  */
2499 #define MULTI_CS_MAX_USER_CTX	2
2500 
2501 /**
2502  * struct multi_cs_completion - multi CS wait completion.
2503  * @completion: completion of any of the CS in the list
2504  * @lock: spinlock for the completion structure
2505  * @timestamp: timestamp for the multi-CS completion
2506  * @stream_master_qid_map: bitmap of all stream masters on which the multi-CS
2507  *                        is waiting
2508  * @used: 1 if in use, otherwise 0
2509  */
2510 struct multi_cs_completion {
2511 	struct completion	completion;
2512 	spinlock_t		lock;
2513 	s64			timestamp;
2514 	u32			stream_master_qid_map;
2515 	u8			used;
2516 };
2517 
2518 /**
2519  * struct multi_cs_data - internal data for multi CS call
2520  * @ctx: pointer to the context structure
2521  * @fence_arr: array of fences of all CSs
2522  * @seq_arr: array of CS sequence numbers
2523  * @timeout_jiffies: timeout in jiffies for waiting for CS to complete
2524  * @timestamp: timestamp of first completed CS
2525  * @wait_status: wait for CS status
2526  * @completion_bitmap: bitmap of completed CSs (1- completed, otherwise 0)
2527  * @arr_len: fence_arr and seq_arr array length
2528  * @gone_cs: indication of gone CS (1- there was gone CS, otherwise 0)
2529  * @update_ts: update timestamp. 1- update the timestamp, otherwise 0.
2530  */
2531 struct multi_cs_data {
2532 	struct hl_ctx	*ctx;
2533 	struct hl_fence	**fence_arr;
2534 	u64		*seq_arr;
2535 	s64		timeout_jiffies;
2536 	s64		timestamp;
2537 	long		wait_status;
2538 	u32		completion_bitmap;
2539 	u8		arr_len;
2540 	u8		gone_cs;
2541 	u8		update_ts;
2542 };
2543 
2544 /**
2545  * struct hl_clk_throttle_timestamp - current/last clock throttling timestamp
2546  * @start: timestamp taken when 'start' event is received in driver
2547  * @end: timestamp taken when 'end' event is received in driver
2548  */
2549 struct hl_clk_throttle_timestamp {
2550 	ktime_t		start;
2551 	ktime_t		end;
2552 };
2553 
2554 /**
2555  * struct hl_clk_throttle - keeps current/last clock throttling timestamps
2556  * @timestamp: timestamp taken by driver and firmware, index 0 refers to POWER
2557  *             index 1 refers to THERMAL
2558  * @lock: protects this structure as it can be accessed from both event queue
2559  *        context and info_ioctl context
2560  * @current_reason: bitmask represents the current clk throttling reasons
2561  * @aggregated_reason: bitmask represents aggregated clk throttling reasons since driver load
2562  */
2563 struct hl_clk_throttle {
2564 	struct hl_clk_throttle_timestamp timestamp[HL_CLK_THROTTLE_TYPE_MAX];
2565 	struct mutex	lock;
2566 	u32		current_reason;
2567 	u32		aggregated_reason;
2568 };
2569 
2570 /**
2571  * struct cs_timeout_info - info of last CS timeout occurred.
2572  * @timestamp: CS timeout timestamp.
2573  * @write_disable: if set writing to CS parameters in the structure is disabled so,
2574  *                 the first (root cause) CS timeout will not be overwritten.
2575  * @seq: CS timeout sequence number.
2576  */
2577 struct cs_timeout_info {
2578 	ktime_t		timestamp;
2579 	atomic_t	write_disable;
2580 	u64		seq;
2581 };
2582 
2583 /**
2584  * struct razwi_info - info about last razwi error occurred.
2585  * @timestamp: razwi timestamp.
2586  * @write_disable: if set writing to razwi parameters in the structure is disabled so the
2587  *                 first (root cause) razwi will not be overwritten.
2588  * @addr: address that caused razwi.
2589  * @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
2590  *               not have engine id it will be set to U16_MAX.
2591  * @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
2592  *               engines which one them caused the razwi. In that case, it will contain the
2593  *               second possible engine id, otherwise it will be set to U16_MAX.
2594  * @non_engine_initiator: in case the initiator of the razwi does not have engine id.
2595  * @type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
2596  */
2597 struct razwi_info {
2598 	ktime_t		timestamp;
2599 	atomic_t	write_disable;
2600 	u64		addr;
2601 	u16		engine_id_1;
2602 	u16		engine_id_2;
2603 	u8		non_engine_initiator;
2604 	u8		type;
2605 };
2606 
2607 /**
2608  * struct last_error_session_info - info about last session errors occurred.
2609  * @cs_timeout: CS timeout error last information.
2610  * @razwi: razwi last information.
2611  */
2612 struct last_error_session_info {
2613 	struct	cs_timeout_info	cs_timeout;
2614 	struct	razwi_info	razwi;
2615 };
2616 
2617 /**
2618  * struct hl_reset_info - holds current device reset information.
2619  * @lock: lock to protect critical reset flows.
2620  * @soft_reset_cnt: number of soft reset since the driver was loaded.
2621  * @hard_reset_cnt: number of hard reset since the driver was loaded.
2622  * @hard_reset_schedule_flags: hard reset is scheduled to after current soft reset,
2623  *                             here we hold the hard reset flags.
2624  * @in_reset: is device in reset flow.
2625  * @is_in_soft_reset: Device is currently in soft reset process.
2626  * @needs_reset: true if reset_on_lockup is false and device should be reset
2627  *               due to lockup.
2628  * @hard_reset_pending: is there a hard reset work pending.
2629  * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
2630  *                    triggered, and cleared after it is shared with preboot.
2631  * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
2632  *                      with a new value on next reset
2633  * @reset_trigger_repeated: set if device reset is triggered more than once with
2634  *                          same cause.
2635  * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
2636  *                         complete instead.
2637  */
2638 struct hl_reset_info {
2639 	spinlock_t	lock;
2640 	u32		soft_reset_cnt;
2641 	u32		hard_reset_cnt;
2642 	u32		hard_reset_schedule_flags;
2643 	u8		in_reset;
2644 	u8		is_in_soft_reset;
2645 	u8		needs_reset;
2646 	u8		hard_reset_pending;
2647 
2648 	u8		curr_reset_cause;
2649 	u8		prev_reset_trigger;
2650 	u8		reset_trigger_repeated;
2651 
2652 	u8		skip_reset_on_timeout;
2653 };
2654 
2655 /**
2656  * struct hl_device - habanalabs device structure.
2657  * @pdev: pointer to PCI device, can be NULL in case of simulator device.
2658  * @pcie_bar_phys: array of available PCIe bars physical addresses.
2659  *		   (required only for PCI address match mode)
2660  * @pcie_bar: array of available PCIe bars virtual addresses.
2661  * @rmmio: configuration area address on SRAM.
2662  * @cdev: related char device.
2663  * @cdev_ctrl: char device for control operations only (INFO IOCTL)
2664  * @dev: related kernel basic device structure.
2665  * @dev_ctrl: related kernel device structure for the control device
2666  * @work_heartbeat: delayed work for CPU-CP is-alive check.
2667  * @device_reset_work: delayed work which performs hard reset
2668  * @asic_name: ASIC specific name.
2669  * @asic_type: ASIC specific type.
2670  * @completion_queue: array of hl_cq.
2671  * @user_interrupt: array of hl_user_interrupt. upon the corresponding user
2672  *                  interrupt, driver will monitor the list of fences
2673  *                  registered to this interrupt.
2674  * @common_user_interrupt: common user interrupt for all user interrupts.
2675  *                         upon any user interrupt, driver will monitor the
2676  *                         list of fences registered to this common structure.
2677  * @cq_wq: work queues of completion queues for executing work in process
2678  *         context.
2679  * @eq_wq: work queue of event queue for executing work in process context.
2680  * @ts_free_obj_wq: work queue for timestamp registration objects release.
2681  * @pf_wq: work queue for MMU pre-fetch operations.
2682  * @kernel_ctx: Kernel driver context structure.
2683  * @kernel_queues: array of hl_hw_queue.
2684  * @cs_mirror_list: CS mirror list for TDR.
2685  * @cs_mirror_lock: protects cs_mirror_list.
2686  * @kernel_mem_mgr: memory manager for memory buffers with lifespan of driver.
2687  * @event_queue: event queue for IRQ from CPU-CP.
2688  * @dma_pool: DMA pool for small allocations.
2689  * @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
2690  * @cpu_accessible_dma_address: Host <-> CPU-CP shared memory DMA address.
2691  * @cpu_accessible_dma_pool: Host <-> CPU-CP shared memory pool.
2692  * @asid_bitmap: holds used/available ASIDs.
2693  * @asid_mutex: protects asid_bitmap.
2694  * @send_cpu_message_lock: enforces only one message in Host <-> CPU-CP queue.
2695  * @debug_lock: protects critical section of setting debug mode for device
2696  * @asic_prop: ASIC specific immutable properties.
2697  * @asic_funcs: ASIC specific functions.
2698  * @asic_specific: ASIC specific information to use only from ASIC files.
2699  * @vm: virtual memory manager for MMU.
2700  * @hwmon_dev: H/W monitor device.
2701  * @hl_chip_info: ASIC's sensors information.
2702  * @device_status_description: device status description.
2703  * @hl_debugfs: device's debugfs manager.
2704  * @cb_pool: list of preallocated CBs.
2705  * @cb_pool_lock: protects the CB pool.
2706  * @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
2707  * @internal_cb_pool_dma_addr: internal command buffer pool dma address.
2708  * @internal_cb_pool: internal command buffer memory pool.
2709  * @internal_cb_va_base: internal cb pool mmu virtual address base
2710  * @fpriv_list: list of file private data structures. Each structure is created
2711  *              when a user opens the device
2712  * @fpriv_ctrl_list: list of file private data structures. Each structure is created
2713  *              when a user opens the control device
2714  * @fpriv_list_lock: protects the fpriv_list
2715  * @fpriv_ctrl_list_lock: protects the fpriv_ctrl_list
2716  * @aggregated_cs_counters: aggregated cs counters among all contexts
2717  * @mmu_priv: device-specific MMU data.
2718  * @mmu_func: device-related MMU functions.
2719  * @fw_loader: FW loader manager.
2720  * @pci_mem_region: array of memory regions in the PCI
2721  * @state_dump_specs: constants and dictionaries needed to dump system state.
2722  * @multi_cs_completion: array of multi-CS completion.
2723  * @clk_throttling: holds information about current/previous clock throttling events
2724  * @last_error: holds information about last session in which CS timeout or razwi error occurred.
2725  * @reset_info: holds current device reset information.
2726  * @stream_master_qid_arr: pointer to array with QIDs of master streams.
2727  * @fw_major_version: major version of current loaded preboot
2728  * @dram_used_mem: current DRAM memory consumption.
2729  * @timeout_jiffies: device CS timeout value.
2730  * @max_power: the max power of the device, as configured by the sysadmin. This
2731  *             value is saved so in case of hard-reset, the driver will restore
2732  *             this value and update the F/W after the re-initialization
2733  * @boot_error_status_mask: contains a mask of the device boot error status.
2734  *                          Each bit represents a different error, according to
2735  *                          the defines in hl_boot_if.h. If the bit is cleared,
2736  *                          the error will be ignored by the driver during
2737  *                          device initialization. Mainly used to debug and
2738  *                          workaround firmware bugs
2739  * @dram_pci_bar_start: start bus address of PCIe bar towards DRAM.
2740  * @last_successful_open_ktime: timestamp (ktime) of the last successful device open.
2741  * @last_successful_open_jif: timestamp (jiffies) of the last successful
2742  *                            device open.
2743  * @last_open_session_duration_jif: duration (jiffies) of the last device open
2744  *                                  session.
2745  * @open_counter: number of successful device open operations.
2746  * @fw_poll_interval_usec: FW status poll interval in usec.
2747  *                         used for CPU boot status
2748  * @fw_comms_poll_interval_usec: FW comms/protocol poll interval in usec.
2749  *                                  used for COMMs protocols cmds(COMMS_STS_*)
2750  * @card_type: Various ASICs have several card types. This indicates the card
2751  *             type of the current device.
2752  * @major: habanalabs kernel driver major.
2753  * @high_pll: high PLL profile frequency.
2754  * @id: device minor.
2755  * @id_control: minor of the control device
2756  * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
2757  *                    addresses.
2758  * @is_in_dram_scrub: true if dram scrub operation is on going.
2759  * @disabled: is device disabled.
2760  * @late_init_done: is late init stage was done during initialization.
2761  * @hwmon_initialized: is H/W monitor sensors was initialized.
2762  * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
2763  * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
2764  *                   otherwise.
2765  * @dram_default_page_mapping: is DRAM default page mapping enabled.
2766  * @memory_scrub: true to perform device memory scrub in various locations,
2767  *                such as context-switch, context close, page free, etc.
2768  * @pmmu_huge_range: is a different virtual addresses range used for PMMU with
2769  *                   huge pages.
2770  * @init_done: is the initialization of the device done.
2771  * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
2772  * @in_debug: whether the device is in a state where the profiling/tracing infrastructure
2773  *            can be used. This indication is needed because in some ASICs we need to do
2774  *            specific operations to enable that infrastructure.
2775  * @cdev_sysfs_created: were char devices and sysfs nodes created.
2776  * @stop_on_err: true if engines should stop on error.
2777  * @supports_sync_stream: is sync stream supported.
2778  * @sync_stream_queue_idx: helper index for sync stream queues initialization.
2779  * @collective_mon_idx: helper index for collective initialization
2780  * @supports_coresight: is CoreSight supported.
2781  * @supports_cb_mapping: is mapping a CB to the device's MMU supported.
2782  * @process_kill_trial_cnt: number of trials reset thread tried killing
2783  *                          user processes
2784  * @device_fini_pending: true if device_fini was called and might be
2785  *                       waiting for the reset thread to finish
2786  * @supports_staged_submission: true if staged submissions are supported
2787  * @device_cpu_is_halted: Flag to indicate whether the device CPU was already
2788  *                        halted. We can't halt it again because the COMMS
2789  *                        protocol will throw an error. Relevant only for
2790  *                        cases where Linux was not loaded to device CPU
2791  * @supports_wait_for_multi_cs: true if wait for multi CS is supported
2792  * @is_compute_ctx_active: Whether there is an active compute context executing.
2793  * @compute_ctx_in_release: true if the current compute context is being released.
2794  * @supports_mmu_prefetch: true if prefetch is supported, otherwise false.
2795  */
2796 struct hl_device {
2797 	struct pci_dev			*pdev;
2798 	u64				pcie_bar_phys[HL_PCI_NUM_BARS];
2799 	void __iomem			*pcie_bar[HL_PCI_NUM_BARS];
2800 	void __iomem			*rmmio;
2801 	struct cdev			cdev;
2802 	struct cdev			cdev_ctrl;
2803 	struct device			*dev;
2804 	struct device			*dev_ctrl;
2805 	struct delayed_work		work_heartbeat;
2806 	struct hl_device_reset_work	device_reset_work;
2807 	char				asic_name[HL_STR_MAX];
2808 	char				status[HL_DEV_STS_MAX][HL_STR_MAX];
2809 	enum hl_asic_type		asic_type;
2810 	struct hl_cq			*completion_queue;
2811 	struct hl_user_interrupt	*user_interrupt;
2812 	struct hl_user_interrupt	common_user_interrupt;
2813 	struct workqueue_struct		**cq_wq;
2814 	struct workqueue_struct		*eq_wq;
2815 	struct workqueue_struct		*ts_free_obj_wq;
2816 	struct workqueue_struct		*pf_wq;
2817 	struct hl_ctx			*kernel_ctx;
2818 	struct hl_hw_queue		*kernel_queues;
2819 	struct list_head		cs_mirror_list;
2820 	spinlock_t			cs_mirror_lock;
2821 	struct hl_mem_mgr		kernel_mem_mgr;
2822 	struct hl_eq			event_queue;
2823 	struct dma_pool			*dma_pool;
2824 	void				*cpu_accessible_dma_mem;
2825 	dma_addr_t			cpu_accessible_dma_address;
2826 	struct gen_pool			*cpu_accessible_dma_pool;
2827 	unsigned long			*asid_bitmap;
2828 	struct mutex			asid_mutex;
2829 	struct mutex			send_cpu_message_lock;
2830 	struct mutex			debug_lock;
2831 	struct asic_fixed_properties	asic_prop;
2832 	const struct hl_asic_funcs	*asic_funcs;
2833 	void				*asic_specific;
2834 	struct hl_vm			vm;
2835 	struct device			*hwmon_dev;
2836 	struct hwmon_chip_info		*hl_chip_info;
2837 
2838 	struct hl_dbg_device_entry	hl_debugfs;
2839 
2840 	struct list_head		cb_pool;
2841 	spinlock_t			cb_pool_lock;
2842 
2843 	void				*internal_cb_pool_virt_addr;
2844 	dma_addr_t			internal_cb_pool_dma_addr;
2845 	struct gen_pool			*internal_cb_pool;
2846 	u64				internal_cb_va_base;
2847 
2848 	struct list_head		fpriv_list;
2849 	struct list_head		fpriv_ctrl_list;
2850 	struct mutex			fpriv_list_lock;
2851 	struct mutex			fpriv_ctrl_list_lock;
2852 
2853 	struct hl_cs_counters_atomic	aggregated_cs_counters;
2854 
2855 	struct hl_mmu_priv		mmu_priv;
2856 	struct hl_mmu_funcs		mmu_func[MMU_NUM_PGT_LOCATIONS];
2857 
2858 	struct fw_load_mgr		fw_loader;
2859 
2860 	struct pci_mem_region		pci_mem_region[PCI_REGION_NUMBER];
2861 
2862 	struct hl_state_dump_specs	state_dump_specs;
2863 
2864 	struct multi_cs_completion	multi_cs_completion[
2865 							MULTI_CS_MAX_USER_CTX];
2866 	struct hl_clk_throttle		clk_throttling;
2867 	struct last_error_session_info	last_error;
2868 
2869 	struct hl_reset_info		reset_info;
2870 
2871 	u32				*stream_master_qid_arr;
2872 	u32				fw_major_version;
2873 	atomic64_t			dram_used_mem;
2874 	u64				timeout_jiffies;
2875 	u64				max_power;
2876 	u64				boot_error_status_mask;
2877 	u64				dram_pci_bar_start;
2878 	u64				last_successful_open_jif;
2879 	u64				last_open_session_duration_jif;
2880 	u64				open_counter;
2881 	u64				fw_poll_interval_usec;
2882 	ktime_t				last_successful_open_ktime;
2883 	u64				fw_comms_poll_interval_usec;
2884 
2885 	enum cpucp_card_types		card_type;
2886 	u32				major;
2887 	u32				high_pll;
2888 	u16				id;
2889 	u16				id_control;
2890 	u16				cpu_pci_msb_addr;
2891 	u8				is_in_dram_scrub;
2892 	u8				disabled;
2893 	u8				late_init_done;
2894 	u8				hwmon_initialized;
2895 	u8				heartbeat;
2896 	u8				reset_on_lockup;
2897 	u8				dram_default_page_mapping;
2898 	u8				memory_scrub;
2899 	u8				pmmu_huge_range;
2900 	u8				init_done;
2901 	u8				device_cpu_disabled;
2902 	u8				in_debug;
2903 	u8				cdev_sysfs_created;
2904 	u8				stop_on_err;
2905 	u8				supports_sync_stream;
2906 	u8				sync_stream_queue_idx;
2907 	u8				collective_mon_idx;
2908 	u8				supports_coresight;
2909 	u8				supports_cb_mapping;
2910 	u8				process_kill_trial_cnt;
2911 	u8				device_fini_pending;
2912 	u8				supports_staged_submission;
2913 	u8				device_cpu_is_halted;
2914 	u8				supports_wait_for_multi_cs;
2915 	u8				stream_master_qid_arr_size;
2916 	u8				is_compute_ctx_active;
2917 	u8				compute_ctx_in_release;
2918 	u8				supports_mmu_prefetch;
2919 
2920 	/* Parameters for bring-up */
2921 	u64				nic_ports_mask;
2922 	u64				fw_components;
2923 	u8				mmu_enable;
2924 	u8				mmu_huge_page_opt;
2925 	u8				reset_pcilink;
2926 	u8				cpu_queues_enable;
2927 	u8				pldm;
2928 	u8				axi_drain;
2929 	u8				sram_scrambler_enable;
2930 	u8				dram_scrambler_enable;
2931 	u8				hard_reset_on_fw_events;
2932 	u8				bmc_enable;
2933 	u8				rl_enable;
2934 	u8				reset_on_preboot_fail;
2935 	u8				reset_upon_device_release;
2936 	u8				reset_if_device_not_idle;
2937 };
2938 
2939 
2940 /**
2941  * struct hl_cs_encaps_sig_handle - encapsulated signals handle structure
2942  * @refcount: refcount used to protect removing this id when several
2943  *            wait cs are used to wait of the reserved encaps signals.
2944  * @hdev: pointer to habanalabs device structure.
2945  * @hw_sob: pointer to  H/W SOB used in the reservation.
2946  * @ctx: pointer to the user's context data structure
2947  * @cs_seq: staged cs sequence which contains encapsulated signals
2948  * @id: idr handler id to be used to fetch the handler info
2949  * @q_idx: stream queue index
2950  * @pre_sob_val: current SOB value before reservation
2951  * @count: signals number
2952  */
2953 struct hl_cs_encaps_sig_handle {
2954 	struct kref refcount;
2955 	struct hl_device *hdev;
2956 	struct hl_hw_sob *hw_sob;
2957 	struct hl_ctx *ctx;
2958 	u64  cs_seq;
2959 	u32  id;
2960 	u32  q_idx;
2961 	u32  pre_sob_val;
2962 	u32  count;
2963 };
2964 
2965 /*
2966  * IOCTLs
2967  */
2968 
2969 /**
2970  * typedef hl_ioctl_t - typedef for ioctl function in the driver
2971  * @hpriv: pointer to the FD's private data, which contains state of
2972  *		user process
2973  * @data: pointer to the input/output arguments structure of the IOCTL
2974  *
2975  * Return: 0 for success, negative value for error
2976  */
2977 typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);
2978 
2979 /**
2980  * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
2981  * @cmd: the IOCTL code as created by the kernel macros.
2982  * @func: pointer to the driver's function that should be called for this IOCTL.
2983  */
2984 struct hl_ioctl_desc {
2985 	unsigned int cmd;
2986 	hl_ioctl_t *func;
2987 };
2988 
2989 
2990 /*
2991  * Kernel module functions that can be accessed by entire module
2992  */
2993 
2994 /**
2995  * hl_get_sg_info() - get number of pages and the DMA address from SG list.
2996  * @sg: the SG list.
2997  * @dma_addr: pointer to DMA address to return.
2998  *
2999  * Calculate the number of consecutive pages described by the SG list. Take the
3000  * offset of the address in the first page, add to it the length and round it up
3001  * to the number of needed pages.
3002  */
hl_get_sg_info(struct scatterlist * sg,dma_addr_t * dma_addr)3003 static inline u32 hl_get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
3004 {
3005 	*dma_addr = sg_dma_address(sg);
3006 
3007 	return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
3008 			(PAGE_SIZE - 1)) >> PAGE_SHIFT;
3009 }
3010 
3011 /**
3012  * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
3013  * @address: The start address of the area we want to validate.
3014  * @size: The size in bytes of the area we want to validate.
3015  * @range_start_address: The start address of the valid range.
3016  * @range_end_address: The end address of the valid range.
3017  *
3018  * Return: true if the area is inside the valid range, false otherwise.
3019  */
hl_mem_area_inside_range(u64 address,u64 size,u64 range_start_address,u64 range_end_address)3020 static inline bool hl_mem_area_inside_range(u64 address, u64 size,
3021 				u64 range_start_address, u64 range_end_address)
3022 {
3023 	u64 end_address = address + size;
3024 
3025 	if ((address >= range_start_address) &&
3026 			(end_address <= range_end_address) &&
3027 			(end_address > address))
3028 		return true;
3029 
3030 	return false;
3031 }
3032 
3033 /**
3034  * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
3035  * @address: The start address of the area we want to validate.
3036  * @size: The size in bytes of the area we want to validate.
3037  * @range_start_address: The start address of the valid range.
3038  * @range_end_address: The end address of the valid range.
3039  *
3040  * Return: true if the area overlaps part or all of the valid range,
3041  *		false otherwise.
3042  */
hl_mem_area_crosses_range(u64 address,u32 size,u64 range_start_address,u64 range_end_address)3043 static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
3044 				u64 range_start_address, u64 range_end_address)
3045 {
3046 	u64 end_address = address + size - 1;
3047 
3048 	return ((address <= range_end_address) && (range_start_address <= end_address));
3049 }
3050 
3051 uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
3052 int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
3053 void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
3054 				enum dma_data_direction dir);
3055 int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
3056 	enum debugfs_access_type acc_type);
3057 int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
3058 		enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
3059 int hl_device_open(struct inode *inode, struct file *filp);
3060 int hl_device_open_ctrl(struct inode *inode, struct file *filp);
3061 bool hl_device_operational(struct hl_device *hdev,
3062 		enum hl_device_status *status);
3063 enum hl_device_status hl_device_status(struct hl_device *hdev);
3064 int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable);
3065 int hl_hw_queues_create(struct hl_device *hdev);
3066 void hl_hw_queues_destroy(struct hl_device *hdev);
3067 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
3068 		u32 cb_size, u64 cb_ptr);
3069 void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
3070 		u32 ctl, u32 len, u64 ptr);
3071 int hl_hw_queue_schedule_cs(struct hl_cs *cs);
3072 u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
3073 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
3074 void hl_hw_queue_update_ci(struct hl_cs *cs);
3075 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
3076 
3077 #define hl_queue_inc_ptr(p)		hl_hw_queue_add_ptr(p, 1)
3078 #define hl_pi_2_offset(pi)		((pi) & (HL_QUEUE_LENGTH - 1))
3079 
3080 int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
3081 void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
3082 int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
3083 void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
3084 void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
3085 void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
3086 irqreturn_t hl_irq_handler_cq(int irq, void *arg);
3087 irqreturn_t hl_irq_handler_eq(int irq, void *arg);
3088 irqreturn_t hl_irq_handler_user_cq(int irq, void *arg);
3089 irqreturn_t hl_irq_handler_default(int irq, void *arg);
3090 u32 hl_cq_inc_ptr(u32 ptr);
3091 
3092 int hl_asid_init(struct hl_device *hdev);
3093 void hl_asid_fini(struct hl_device *hdev);
3094 unsigned long hl_asid_alloc(struct hl_device *hdev);
3095 void hl_asid_free(struct hl_device *hdev, unsigned long asid);
3096 
3097 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
3098 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
3099 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
3100 void hl_ctx_do_release(struct kref *ref);
3101 void hl_ctx_get(struct hl_ctx *ctx);
3102 int hl_ctx_put(struct hl_ctx *ctx);
3103 struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev);
3104 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
3105 int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
3106 				struct hl_fence **fence, u32 arr_len);
3107 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
3108 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
3109 
3110 int hl_device_init(struct hl_device *hdev, struct class *hclass);
3111 void hl_device_fini(struct hl_device *hdev);
3112 int hl_device_suspend(struct hl_device *hdev);
3113 int hl_device_resume(struct hl_device *hdev);
3114 int hl_device_reset(struct hl_device *hdev, u32 flags);
3115 void hl_hpriv_get(struct hl_fpriv *hpriv);
3116 int hl_hpriv_put(struct hl_fpriv *hpriv);
3117 int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
3118 
3119 int hl_build_hwmon_channel_info(struct hl_device *hdev,
3120 		struct cpucp_sensor *sensors_arr);
3121 
3122 void hl_notifier_event_send_all(struct hl_device *hdev, u64 event);
3123 
3124 int hl_sysfs_init(struct hl_device *hdev);
3125 void hl_sysfs_fini(struct hl_device *hdev);
3126 
3127 int hl_hwmon_init(struct hl_device *hdev);
3128 void hl_hwmon_fini(struct hl_device *hdev);
3129 
3130 int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
3131 			struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
3132 			bool map_cb, u64 *handle);
3133 int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle);
3134 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
3135 struct hl_cb *hl_cb_get(struct hl_mem_mgr *mmg, u64 handle);
3136 void hl_cb_put(struct hl_cb *cb);
3137 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
3138 					bool internal_cb);
3139 int hl_cb_pool_init(struct hl_device *hdev);
3140 int hl_cb_pool_fini(struct hl_device *hdev);
3141 int hl_cb_va_pool_init(struct hl_ctx *ctx);
3142 void hl_cb_va_pool_fini(struct hl_ctx *ctx);
3143 
3144 void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush);
3145 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
3146 		enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
3147 void hl_sob_reset_error(struct kref *ref);
3148 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
3149 void hl_fence_put(struct hl_fence *fence);
3150 void hl_fences_put(struct hl_fence **fence, int len);
3151 void hl_fence_get(struct hl_fence *fence);
3152 void cs_get(struct hl_cs *cs);
3153 bool cs_needs_completion(struct hl_cs *cs);
3154 bool cs_needs_timeout(struct hl_cs *cs);
3155 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
3156 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
3157 void hl_multi_cs_completion_init(struct hl_device *hdev);
3158 
3159 void goya_set_asic_funcs(struct hl_device *hdev);
3160 void gaudi_set_asic_funcs(struct hl_device *hdev);
3161 
3162 int hl_vm_ctx_init(struct hl_ctx *ctx);
3163 void hl_vm_ctx_fini(struct hl_ctx *ctx);
3164 
3165 int hl_vm_init(struct hl_device *hdev);
3166 void hl_vm_fini(struct hl_device *hdev);
3167 
3168 void hl_hw_block_mem_init(struct hl_ctx *ctx);
3169 void hl_hw_block_mem_fini(struct hl_ctx *ctx);
3170 
3171 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
3172 		enum hl_va_range_type type, u32 size, u32 alignment);
3173 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
3174 		u64 start_addr, u64 size);
3175 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
3176 			struct hl_userptr *userptr);
3177 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
3178 void hl_userptr_delete_list(struct hl_device *hdev,
3179 				struct list_head *userptr_list);
3180 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
3181 				struct list_head *userptr_list,
3182 				struct hl_userptr **userptr);
3183 
3184 int hl_mmu_init(struct hl_device *hdev);
3185 void hl_mmu_fini(struct hl_device *hdev);
3186 int hl_mmu_ctx_init(struct hl_ctx *ctx);
3187 void hl_mmu_ctx_fini(struct hl_ctx *ctx);
3188 int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
3189 		u32 page_size, bool flush_pte);
3190 int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
3191 				u32 page_size, u32 *real_page_size, bool is_dram_addr);
3192 int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
3193 		bool flush_pte);
3194 int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
3195 					u64 phys_addr, u32 size);
3196 int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size);
3197 int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags);
3198 int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
3199 					u32 flags, u32 asid, u64 va, u64 size);
3200 int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size);
3201 u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte);
3202 u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
3203 					u8 hop_idx, u64 hop_addr, u64 virt_addr);
3204 void hl_mmu_swap_out(struct hl_ctx *ctx);
3205 void hl_mmu_swap_in(struct hl_ctx *ctx);
3206 int hl_mmu_if_set_funcs(struct hl_device *hdev);
3207 void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
3208 int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
3209 int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
3210 			struct hl_mmu_hop_info *hops);
3211 u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr);
3212 u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr);
3213 bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
3214 
3215 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
3216 				void __iomem *dst, u32 src_offset, u32 size);
3217 int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
3218 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
3219 				u16 len, u32 timeout, u64 *result);
3220 int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
3221 int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
3222 		size_t irq_arr_size);
3223 int hl_fw_test_cpu_queue(struct hl_device *hdev);
3224 void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
3225 						dma_addr_t *dma_handle);
3226 void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
3227 					void *vaddr);
3228 int hl_fw_send_heartbeat(struct hl_device *hdev);
3229 int hl_fw_cpucp_info_get(struct hl_device *hdev,
3230 				u32 sts_boot_dev_sts0_reg,
3231 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
3232 				u32 boot_err1_reg);
3233 int hl_fw_cpucp_handshake(struct hl_device *hdev,
3234 				u32 sts_boot_dev_sts0_reg,
3235 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
3236 				u32 boot_err1_reg);
3237 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
3238 int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data);
3239 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
3240 		struct hl_info_pci_counters *counters);
3241 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
3242 			u64 *total_energy);
3243 int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
3244 						enum pll_index *pll_index);
3245 int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
3246 		u16 *pll_freq_arr);
3247 int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
3248 void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev);
3249 void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev);
3250 int hl_fw_init_cpu(struct hl_device *hdev);
3251 int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
3252 				u32 sts_boot_dev_sts0_reg,
3253 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
3254 				u32 boot_err1_reg, u32 timeout);
3255 int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
3256 				struct fw_load_mgr *fw_loader,
3257 				enum comms_cmd cmd, unsigned int size,
3258 				bool wait_ok, u32 timeout);
3259 int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
3260 				struct cpucp_hbm_row_info *info);
3261 int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num);
3262 int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid);
3263 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
3264 			bool is_wc[3]);
3265 int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
3266 int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
3267 int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
3268 		struct hl_inbound_pci_region *pci_region);
3269 int hl_pci_set_outbound_region(struct hl_device *hdev,
3270 		struct hl_outbound_pci_region *pci_region);
3271 enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr);
3272 int hl_pci_init(struct hl_device *hdev);
3273 void hl_pci_fini(struct hl_device *hdev);
3274 
3275 long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
3276 void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
3277 int hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3278 int hl_set_temperature(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3279 int hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3280 int hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3281 int hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3282 int hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3283 void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3284 long hl_fw_get_max_power(struct hl_device *hdev);
3285 void hl_fw_set_max_power(struct hl_device *hdev);
3286 int hl_set_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3287 int hl_set_current(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3288 int hl_set_power(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3289 int hl_get_power(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3290 int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
3291 void hl_fw_set_pll_profile(struct hl_device *hdev);
3292 void hl_sysfs_add_dev_clk_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp);
3293 void hl_sysfs_add_dev_vrm_attr(struct hl_device *hdev, struct attribute_group *dev_vrm_attr_grp);
3294 
3295 void hw_sob_get(struct hl_hw_sob *hw_sob);
3296 void hw_sob_put(struct hl_hw_sob *hw_sob);
3297 void hl_encaps_handle_do_release(struct kref *ref);
3298 void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
3299 			struct hl_cs *cs, struct hl_cs_job *job,
3300 			struct hl_cs_compl *cs_cmpl);
3301 void hl_release_pending_user_interrupts(struct hl_device *hdev);
3302 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
3303 			struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
3304 
3305 int hl_state_dump(struct hl_device *hdev);
3306 const char *hl_state_dump_get_sync_name(struct hl_device *hdev, u32 sync_id);
3307 const char *hl_state_dump_get_monitor_name(struct hl_device *hdev,
3308 					struct hl_mon_state_dump *mon);
3309 void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map);
3310 __printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
3311 					const char *format, ...);
3312 char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
3313 const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
3314 
3315 void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
3316 void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
3317 int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
3318 		    void *args);
3319 struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg,
3320 						   u64 handle);
3321 int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle);
3322 int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf);
3323 struct hl_mmap_mem_buf *
3324 hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
3325 		      struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
3326 		      void *args);
3327 
3328 #ifdef CONFIG_DEBUG_FS
3329 
3330 void hl_debugfs_init(void);
3331 void hl_debugfs_fini(void);
3332 void hl_debugfs_add_device(struct hl_device *hdev);
3333 void hl_debugfs_remove_device(struct hl_device *hdev);
3334 void hl_debugfs_add_file(struct hl_fpriv *hpriv);
3335 void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
3336 void hl_debugfs_add_cb(struct hl_cb *cb);
3337 void hl_debugfs_remove_cb(struct hl_cb *cb);
3338 void hl_debugfs_add_cs(struct hl_cs *cs);
3339 void hl_debugfs_remove_cs(struct hl_cs *cs);
3340 void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
3341 void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
3342 void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
3343 void hl_debugfs_remove_userptr(struct hl_device *hdev,
3344 				struct hl_userptr *userptr);
3345 void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
3346 void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
3347 void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
3348 					unsigned long length);
3349 
3350 #else
3351 
hl_debugfs_init(void)3352 static inline void __init hl_debugfs_init(void)
3353 {
3354 }
3355 
hl_debugfs_fini(void)3356 static inline void hl_debugfs_fini(void)
3357 {
3358 }
3359 
hl_debugfs_add_device(struct hl_device * hdev)3360 static inline void hl_debugfs_add_device(struct hl_device *hdev)
3361 {
3362 }
3363 
hl_debugfs_remove_device(struct hl_device * hdev)3364 static inline void hl_debugfs_remove_device(struct hl_device *hdev)
3365 {
3366 }
3367 
hl_debugfs_add_file(struct hl_fpriv * hpriv)3368 static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
3369 {
3370 }
3371 
hl_debugfs_remove_file(struct hl_fpriv * hpriv)3372 static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
3373 {
3374 }
3375 
hl_debugfs_add_cb(struct hl_cb * cb)3376 static inline void hl_debugfs_add_cb(struct hl_cb *cb)
3377 {
3378 }
3379 
hl_debugfs_remove_cb(struct hl_cb * cb)3380 static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
3381 {
3382 }
3383 
hl_debugfs_add_cs(struct hl_cs * cs)3384 static inline void hl_debugfs_add_cs(struct hl_cs *cs)
3385 {
3386 }
3387 
hl_debugfs_remove_cs(struct hl_cs * cs)3388 static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
3389 {
3390 }
3391 
hl_debugfs_add_job(struct hl_device * hdev,struct hl_cs_job * job)3392 static inline void hl_debugfs_add_job(struct hl_device *hdev,
3393 					struct hl_cs_job *job)
3394 {
3395 }
3396 
hl_debugfs_remove_job(struct hl_device * hdev,struct hl_cs_job * job)3397 static inline void hl_debugfs_remove_job(struct hl_device *hdev,
3398 					struct hl_cs_job *job)
3399 {
3400 }
3401 
hl_debugfs_add_userptr(struct hl_device * hdev,struct hl_userptr * userptr)3402 static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
3403 					struct hl_userptr *userptr)
3404 {
3405 }
3406 
hl_debugfs_remove_userptr(struct hl_device * hdev,struct hl_userptr * userptr)3407 static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
3408 					struct hl_userptr *userptr)
3409 {
3410 }
3411 
hl_debugfs_add_ctx_mem_hash(struct hl_device * hdev,struct hl_ctx * ctx)3412 static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
3413 					struct hl_ctx *ctx)
3414 {
3415 }
3416 
hl_debugfs_remove_ctx_mem_hash(struct hl_device * hdev,struct hl_ctx * ctx)3417 static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
3418 					struct hl_ctx *ctx)
3419 {
3420 }
3421 
hl_debugfs_set_state_dump(struct hl_device * hdev,char * data,unsigned long length)3422 static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
3423 					char *data, unsigned long length)
3424 {
3425 }
3426 
3427 #endif
3428 
3429 /* IOCTLs */
3430 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
3431 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
3432 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
3433 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
3434 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data);
3435 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
3436 
3437 #endif /* HABANALABSP_H_ */
3438