1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2020 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 
10 /**
11  * hl_get_pb_block - return the relevant block within the block array
12  *
13  * @hdev: pointer to hl_device structure
14  * @mm_reg_addr: register address in the desired block
15  * @pb_blocks: blocks array
16  * @array_size: blocks array size
17  *
18  */
hl_get_pb_block(struct hl_device * hdev,u32 mm_reg_addr,const u32 pb_blocks[],int array_size)19 static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
20 		const u32 pb_blocks[], int array_size)
21 {
22 	int i;
23 	u32 start_addr, end_addr;
24 
25 	for (i = 0 ; i < array_size ; i++) {
26 		start_addr = pb_blocks[i];
27 		end_addr = start_addr + HL_BLOCK_SIZE;
28 
29 		if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr))
30 			return i;
31 	}
32 
33 	dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
34 			mm_reg_addr);
35 	return -EDOM;
36 }
37 
38 /**
39  * hl_unset_pb_in_block - clear a specific protection bit in a block
40  *
41  * @hdev: pointer to hl_device structure
42  * @reg_offset: register offset will be converted to bit offset in pb block
43  * @sgs_entry: pb array
44  *
45  */
hl_unset_pb_in_block(struct hl_device * hdev,u32 reg_offset,struct hl_block_glbl_sec * sgs_entry)46 static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
47 				struct hl_block_glbl_sec *sgs_entry)
48 {
49 	if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) {
50 		dev_err(hdev->dev,
51 			"Register offset(%d) is out of range(%d) or invalid\n",
52 			reg_offset, HL_BLOCK_SIZE);
53 		return -EINVAL;
54 	}
55 
56 	UNSET_GLBL_SEC_BIT(sgs_entry->sec_array,
57 			 (reg_offset & (HL_BLOCK_SIZE - 1)) >> 2);
58 
59 	return 0;
60 }
61 
62 /**
63  * hl_unsecure_register - locate the relevant block for this register and
64  *                        remove corresponding protection bit
65  *
66  * @hdev: pointer to hl_device structure
67  * @mm_reg_addr: register address to unsecure
68  * @offset: additional offset to the register address
69  * @pb_blocks: blocks array
70  * @sgs_array: pb array
71  * @array_size: blocks array size
72  *
73  */
hl_unsecure_register(struct hl_device * hdev,u32 mm_reg_addr,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int array_size)74 int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
75 		const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
76 		int array_size)
77 {
78 	u32 reg_offset;
79 	int block_num;
80 
81 	block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
82 			array_size);
83 	if (block_num < 0)
84 		return block_num;
85 
86 	reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num];
87 
88 	return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
89 }
90 
91 /**
92  * hl_unsecure_register_range - locate the relevant block for this register
93  *                              range and remove corresponding protection bit
94  *
95  * @hdev: pointer to hl_device structure
96  * @mm_reg_range: register address range to unsecure
97  * @offset: additional offset to the register address
98  * @pb_blocks: blocks array
99  * @sgs_array: pb array
100  * @array_size: blocks array size
101  *
102  */
hl_unsecure_register_range(struct hl_device * hdev,struct range mm_reg_range,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int array_size)103 static int hl_unsecure_register_range(struct hl_device *hdev,
104 		struct range mm_reg_range, int offset, const u32 pb_blocks[],
105 		struct hl_block_glbl_sec sgs_array[],
106 		int array_size)
107 {
108 	u32 reg_offset;
109 	int i, block_num, rc = 0;
110 
111 	block_num = hl_get_pb_block(hdev,
112 			mm_reg_range.start + offset, pb_blocks,
113 			array_size);
114 	if (block_num < 0)
115 		return block_num;
116 
117 	for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) {
118 		reg_offset = (i + offset) - pb_blocks[block_num];
119 		rc |= hl_unset_pb_in_block(hdev, reg_offset,
120 					&sgs_array[block_num]);
121 	}
122 
123 	return rc;
124 }
125 
126 /**
127  * hl_unsecure_registers - locate the relevant block for all registers and
128  *                        remove corresponding protection bit
129  *
130  * @hdev: pointer to hl_device structure
131  * @mm_reg_array: register address array to unsecure
132  * @mm_array_size: register array size
133  * @offset: additional offset to the register address
134  * @pb_blocks: blocks array
135  * @sgs_array: pb array
136  * @blocks_array_size: blocks array size
137  *
138  */
hl_unsecure_registers(struct hl_device * hdev,const u32 mm_reg_array[],int mm_array_size,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int blocks_array_size)139 int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
140 		int mm_array_size, int offset, const u32 pb_blocks[],
141 		struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
142 {
143 	int i, rc = 0;
144 
145 	for (i = 0 ; i < mm_array_size ; i++) {
146 		rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
147 				pb_blocks, sgs_array, blocks_array_size);
148 
149 		if (rc)
150 			return rc;
151 	}
152 
153 	return rc;
154 }
155 
156 /**
157  * hl_unsecure_registers_range - locate the relevant block for all register
158  *                        ranges and remove corresponding protection bit
159  *
160  * @hdev: pointer to hl_device structure
161  * @mm_reg_range_array: register address range array to unsecure
162  * @mm_array_size: register array size
163  * @offset: additional offset to the register address
164  * @pb_blocks: blocks array
165  * @sgs_array: pb array
166  * @blocks_array_size: blocks array size
167  *
168  */
hl_unsecure_registers_range(struct hl_device * hdev,const struct range mm_reg_range_array[],int mm_array_size,int offset,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],int blocks_array_size)169 static int hl_unsecure_registers_range(struct hl_device *hdev,
170 		const struct range mm_reg_range_array[], int mm_array_size,
171 		int offset, const u32 pb_blocks[],
172 		struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
173 {
174 	int i, rc = 0;
175 
176 	for (i = 0 ; i < mm_array_size ; i++) {
177 		rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
178 			offset, pb_blocks, sgs_array, blocks_array_size);
179 
180 		if (rc)
181 			return rc;
182 	}
183 
184 	return rc;
185 }
186 
187 /**
188  * hl_ack_pb_security_violations - Ack security violation
189  *
190  * @hdev: pointer to hl_device structure
191  * @pb_blocks: blocks array
192  * @block_offset: additional offset to the block
193  * @array_size: blocks array size
194  *
195  */
hl_ack_pb_security_violations(struct hl_device * hdev,const u32 pb_blocks[],u32 block_offset,int array_size)196 static void hl_ack_pb_security_violations(struct hl_device *hdev,
197 		const u32 pb_blocks[], u32 block_offset, int array_size)
198 {
199 	int i;
200 	u32 cause, addr, block_base;
201 
202 	for (i = 0 ; i < array_size ; i++) {
203 		block_base = pb_blocks[i] + block_offset;
204 		cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE);
205 		if (cause) {
206 			addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR);
207 			hdev->asic_funcs->pb_print_security_errors(hdev,
208 					block_base, cause, addr);
209 			WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause);
210 		}
211 	}
212 }
213 
214 /**
215  * hl_config_glbl_sec - set pb in HW according to given pb array
216  *
217  * @hdev: pointer to hl_device structure
218  * @pb_blocks: blocks array
219  * @sgs_array: pb array
220  * @block_offset: additional offset to the block
221  * @array_size: blocks array size
222  *
223  */
hl_config_glbl_sec(struct hl_device * hdev,const u32 pb_blocks[],struct hl_block_glbl_sec sgs_array[],u32 block_offset,int array_size)224 void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
225 		struct hl_block_glbl_sec sgs_array[], u32 block_offset,
226 		int array_size)
227 {
228 	int i, j;
229 	u32 sgs_base;
230 
231 	if (hdev->pldm)
232 		usleep_range(100, 1000);
233 
234 	for (i = 0 ; i < array_size ; i++) {
235 		sgs_base = block_offset + pb_blocks[i] +
236 				HL_BLOCK_GLBL_SEC_OFFS;
237 
238 		for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++)
239 			WREG32(sgs_base + j * sizeof(u32),
240 				sgs_array[i].sec_array[j]);
241 	}
242 }
243 
244 /**
245  * hl_secure_block - locally memsets a block to 0
246  *
247  * @hdev: pointer to hl_device structure
248  * @sgs_array: pb array to clear
249  * @array_size: blocks array size
250  *
251  */
hl_secure_block(struct hl_device * hdev,struct hl_block_glbl_sec sgs_array[],int array_size)252 void hl_secure_block(struct hl_device *hdev,
253 		struct hl_block_glbl_sec sgs_array[], int array_size)
254 {
255 	int i;
256 
257 	for (i = 0 ; i < array_size ; i++)
258 		memset((char *)(sgs_array[i].sec_array), 0,
259 			HL_BLOCK_GLBL_SEC_SIZE);
260 }
261 
262 /**
263  * hl_init_pb_with_mask - set selected pb instances with mask in HW according
264  *                        to given configuration
265  *
266  * @hdev: pointer to hl_device structure
267  * @num_dcores: number of decores to apply configuration to
268  *              set to HL_PB_SHARED if need to apply only once
269  * @dcore_offset: offset between dcores
270  * @num_instances: number of instances to apply configuration to
271  * @instance_offset: offset between instances
272  * @pb_blocks: blocks array
273  * @blocks_array_size: blocks array size
274  * @regs_array: register array
275  * @regs_array_size: register array size
276  * @mask: enabled instances mask: 1- enabled, 0- disabled
277  */
hl_init_pb_with_mask(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const u32 * regs_array,u32 regs_array_size,u64 mask)278 int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
279 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
280 		const u32 pb_blocks[], u32 blocks_array_size,
281 		const u32 *regs_array, u32 regs_array_size, u64 mask)
282 {
283 	int i, j;
284 	struct hl_block_glbl_sec *glbl_sec;
285 
286 	glbl_sec = kcalloc(blocks_array_size,
287 			sizeof(struct hl_block_glbl_sec),
288 			GFP_KERNEL);
289 	if (!glbl_sec)
290 		return -ENOMEM;
291 
292 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
293 	hl_unsecure_registers(hdev, regs_array, regs_array_size, 0, pb_blocks,
294 			glbl_sec, blocks_array_size);
295 
296 	/* Fill all blocks with the same configuration */
297 	for (i = 0 ; i < num_dcores ; i++) {
298 		for (j = 0 ; j < num_instances ; j++) {
299 			int seq = i * num_instances + j;
300 
301 			if (!(mask & BIT_ULL(seq)))
302 				continue;
303 
304 			hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
305 					i * dcore_offset + j * instance_offset,
306 					blocks_array_size);
307 		}
308 	}
309 
310 	kfree(glbl_sec);
311 
312 	return 0;
313 }
314 
315 /**
316  * hl_init_pb - set pb in HW according to given configuration
317  *
318  * @hdev: pointer to hl_device structure
319  * @num_dcores: number of decores to apply configuration to
320  *              set to HL_PB_SHARED if need to apply only once
321  * @dcore_offset: offset between dcores
322  * @num_instances: number of instances to apply configuration to
323  * @instance_offset: offset between instances
324  * @pb_blocks: blocks array
325  * @blocks_array_size: blocks array size
326  * @regs_array: register array
327  * @regs_array_size: register array size
328  *
329  */
hl_init_pb(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const u32 * regs_array,u32 regs_array_size)330 int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
331 		u32 num_instances, u32 instance_offset,
332 		const u32 pb_blocks[], u32 blocks_array_size,
333 		const u32 *regs_array, u32 regs_array_size)
334 {
335 	return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
336 			num_instances, instance_offset, pb_blocks,
337 			blocks_array_size, regs_array, regs_array_size,
338 			ULLONG_MAX);
339 }
340 
341 /**
342  * hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to
343  *                               given configuration unsecurring registers
344  *                               ranges instead of specific registers
345  *
346  * @hdev: pointer to hl_device structure
347  * @num_dcores: number of decores to apply configuration to
348  *              set to HL_PB_SHARED if need to apply only once
349  * @dcore_offset: offset between dcores
350  * @num_instances: number of instances to apply configuration to
351  * @instance_offset: offset between instances
352  * @pb_blocks: blocks array
353  * @blocks_array_size: blocks array size
354  * @regs_range_array: register range array
355  * @regs_range_array_size: register range array size
356  * @mask: enabled instances mask: 1- enabled, 0- disabled
357  */
hl_init_pb_ranges_with_mask(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const struct range * regs_range_array,u32 regs_range_array_size,u64 mask)358 int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
359 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
360 		const u32 pb_blocks[], u32 blocks_array_size,
361 		const struct range *regs_range_array, u32 regs_range_array_size,
362 		u64 mask)
363 {
364 	int i, j, rc = 0;
365 	struct hl_block_glbl_sec *glbl_sec;
366 
367 	glbl_sec = kcalloc(blocks_array_size,
368 			sizeof(struct hl_block_glbl_sec),
369 			GFP_KERNEL);
370 	if (!glbl_sec)
371 		return -ENOMEM;
372 
373 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
374 	rc = hl_unsecure_registers_range(hdev, regs_range_array,
375 			regs_range_array_size, 0, pb_blocks, glbl_sec,
376 			blocks_array_size);
377 	if (rc)
378 		goto free_glbl_sec;
379 
380 	/* Fill all blocks with the same configuration */
381 	for (i = 0 ; i < num_dcores ; i++) {
382 		for (j = 0 ; j < num_instances ; j++) {
383 			int seq = i * num_instances + j;
384 
385 			if (!(mask & BIT_ULL(seq)))
386 				continue;
387 
388 			hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
389 					i * dcore_offset + j * instance_offset,
390 					blocks_array_size);
391 		}
392 	}
393 
394 free_glbl_sec:
395 	kfree(glbl_sec);
396 
397 	return rc;
398 }
399 
400 /**
401  * hl_init_pb_ranges - set pb in HW according to given configuration unsecurring
402  *                     registers ranges instead of specific registers
403  *
404  * @hdev: pointer to hl_device structure
405  * @num_dcores: number of decores to apply configuration to
406  *              set to HL_PB_SHARED if need to apply only once
407  * @dcore_offset: offset between dcores
408  * @num_instances: number of instances to apply configuration to
409  * @instance_offset: offset between instances
410  * @pb_blocks: blocks array
411  * @blocks_array_size: blocks array size
412  * @regs_range_array: register range array
413  * @regs_range_array_size: register range array size
414  *
415  */
hl_init_pb_ranges(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const struct range * regs_range_array,u32 regs_range_array_size)416 int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
417 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
418 		const u32 pb_blocks[], u32 blocks_array_size,
419 		const struct range *regs_range_array, u32 regs_range_array_size)
420 {
421 	return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
422 			num_instances, instance_offset, pb_blocks,
423 			blocks_array_size, regs_range_array,
424 			regs_range_array_size, ULLONG_MAX);
425 }
426 
427 /**
428  * hl_init_pb_single_dcore - set pb for a single docre in HW
429  * according to given configuration
430  *
431  * @hdev: pointer to hl_device structure
432  * @dcore_offset: offset from the dcore0
433  * @num_instances: number of instances to apply configuration to
434  * @instance_offset: offset between instances
435  * @pb_blocks: blocks array
436  * @blocks_array_size: blocks array size
437  * @regs_array: register array
438  * @regs_array_size: register array size
439  *
440  */
hl_init_pb_single_dcore(struct hl_device * hdev,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const u32 * regs_array,u32 regs_array_size)441 int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
442 		u32 num_instances, u32 instance_offset,
443 		const u32 pb_blocks[], u32 blocks_array_size,
444 		const u32 *regs_array, u32 regs_array_size)
445 {
446 	int i, rc = 0;
447 	struct hl_block_glbl_sec *glbl_sec;
448 
449 	glbl_sec = kcalloc(blocks_array_size,
450 			sizeof(struct hl_block_glbl_sec),
451 			GFP_KERNEL);
452 	if (!glbl_sec)
453 		return -ENOMEM;
454 
455 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
456 	rc = hl_unsecure_registers(hdev, regs_array, regs_array_size, 0,
457 			pb_blocks, glbl_sec, blocks_array_size);
458 	if (rc)
459 		goto free_glbl_sec;
460 
461 	/* Fill all blocks with the same configuration */
462 	for (i = 0 ; i < num_instances ; i++)
463 		hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
464 				dcore_offset + i * instance_offset,
465 				blocks_array_size);
466 
467 free_glbl_sec:
468 	kfree(glbl_sec);
469 
470 	return rc;
471 }
472 
473 /**
474  * hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according
475  *                                  to given configuration unsecurring
476  *                                  registers ranges instead of specific
477  *                                  registers
478  *
479  * @hdev: pointer to hl_device structure
480  * @dcore_offset: offset from the dcore0
481  * @num_instances: number of instances to apply configuration to
482  * @instance_offset: offset between instances
483  * @pb_blocks: blocks array
484  * @blocks_array_size: blocks array size
485  * @regs_range_array: register range array
486  * @regs_range_array_size: register range array size
487  *
488  */
hl_init_pb_ranges_single_dcore(struct hl_device * hdev,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,const struct range * regs_range_array,u32 regs_range_array_size)489 int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
490 		u32 num_instances, u32 instance_offset,
491 		const u32 pb_blocks[], u32 blocks_array_size,
492 		const struct range *regs_range_array, u32 regs_range_array_size)
493 {
494 	int i;
495 	struct hl_block_glbl_sec *glbl_sec;
496 
497 	glbl_sec = kcalloc(blocks_array_size,
498 			sizeof(struct hl_block_glbl_sec),
499 			GFP_KERNEL);
500 	if (!glbl_sec)
501 		return -ENOMEM;
502 
503 	hl_secure_block(hdev, glbl_sec, blocks_array_size);
504 	hl_unsecure_registers_range(hdev, regs_range_array,
505 			regs_range_array_size, 0, pb_blocks, glbl_sec,
506 			blocks_array_size);
507 
508 	/* Fill all blocks with the same configuration */
509 	for (i = 0 ; i < num_instances ; i++)
510 		hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
511 				dcore_offset + i * instance_offset,
512 				blocks_array_size);
513 
514 	kfree(glbl_sec);
515 
516 	return 0;
517 }
518 
519 /**
520  * hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration
521  *
522  * @hdev: pointer to hl_device structure
523  * @num_dcores: number of decores to apply configuration to
524  *              set to HL_PB_SHARED if need to apply only once
525  * @dcore_offset: offset between dcores
526  * @num_instances: number of instances to apply configuration to
527  * @instance_offset: offset between instances
528  * @pb_blocks: blocks array
529  * @blocks_array_size: blocks array size
530  * @mask: enabled instances mask: 1- enabled, 0- disabled
531  *
532  */
hl_ack_pb_with_mask(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size,u64 mask)533 void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
534 		u32 dcore_offset, u32 num_instances, u32 instance_offset,
535 		const u32 pb_blocks[], u32 blocks_array_size, u64 mask)
536 {
537 	int i, j;
538 
539 	/* ack all blocks */
540 	for (i = 0 ; i < num_dcores ; i++) {
541 		for (j = 0 ; j < num_instances ; j++) {
542 			int seq = i * num_instances + j;
543 
544 			if (!(mask & BIT_ULL(seq)))
545 				continue;
546 
547 			hl_ack_pb_security_violations(hdev, pb_blocks,
548 					i * dcore_offset + j * instance_offset,
549 					blocks_array_size);
550 		}
551 	}
552 }
553 
554 /**
555  * hl_ack_pb - ack pb in HW according to given configuration
556  *
557  * @hdev: pointer to hl_device structure
558  * @num_dcores: number of decores to apply configuration to
559  *              set to HL_PB_SHARED if need to apply only once
560  * @dcore_offset: offset between dcores
561  * @num_instances: number of instances to apply configuration to
562  * @instance_offset: offset between instances
563  * @pb_blocks: blocks array
564  * @blocks_array_size: blocks array size
565  *
566  */
hl_ack_pb(struct hl_device * hdev,u32 num_dcores,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size)567 void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
568 		u32 num_instances, u32 instance_offset,
569 		const u32 pb_blocks[], u32 blocks_array_size)
570 {
571 	hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
572 			instance_offset, pb_blocks, blocks_array_size,
573 			ULLONG_MAX);
574 }
575 
576 /**
577  * hl_ack_pb_single_dcore - ack pb for single docre in HW
578  * according to given configuration
579  *
580  * @hdev: pointer to hl_device structure
581  * @dcore_offset: offset from dcore0
582  * @num_instances: number of instances to apply configuration to
583  * @instance_offset: offset between instances
584  * @pb_blocks: blocks array
585  * @blocks_array_size: blocks array size
586  *
587  */
hl_ack_pb_single_dcore(struct hl_device * hdev,u32 dcore_offset,u32 num_instances,u32 instance_offset,const u32 pb_blocks[],u32 blocks_array_size)588 void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
589 		u32 num_instances, u32 instance_offset,
590 		const u32 pb_blocks[], u32 blocks_array_size)
591 {
592 	int i;
593 
594 	/* ack all blocks */
595 	for (i = 0 ; i < num_instances ; i++)
596 		hl_ack_pb_security_violations(hdev, pb_blocks,
597 				dcore_offset + i * instance_offset,
598 				blocks_array_size);
599 
600 }
601