1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 #include "../include/common/hl_boot_if.h"
10 
11 #include <linux/firmware.h>
12 #include <linux/crc32.h>
13 #include <linux/slab.h>
14 #include <linux/ctype.h>
15 
16 #define FW_FILE_MAX_SIZE		0x1400000 /* maximum size of 20MB */
17 
extract_fw_ver_from_str(const char * fw_str)18 static char *extract_fw_ver_from_str(const char *fw_str)
19 {
20 	char *str, *fw_ver, *whitespace;
21 	u32 ver_offset;
22 
23 	fw_ver = kmalloc(VERSION_MAX_LEN, GFP_KERNEL);
24 	if (!fw_ver)
25 		return NULL;
26 
27 	str = strnstr(fw_str, "fw-", VERSION_MAX_LEN);
28 	if (!str)
29 		goto free_fw_ver;
30 
31 	/* Skip the fw- part */
32 	str += 3;
33 	ver_offset = str - fw_str;
34 
35 	/* Copy until the next whitespace */
36 	whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
37 	if (!whitespace)
38 		goto free_fw_ver;
39 
40 	strscpy(fw_ver, str, whitespace - str + 1);
41 
42 	return fw_ver;
43 
44 free_fw_ver:
45 	kfree(fw_ver);
46 	return NULL;
47 }
48 
extract_fw_sub_versions(struct hl_device * hdev,char * preboot_ver)49 static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
50 {
51 	char major[8], minor[8], *first_dot, *second_dot;
52 	int rc;
53 
54 	first_dot = strnstr(preboot_ver, ".", 10);
55 	if (first_dot) {
56 		strscpy(major, preboot_ver, first_dot - preboot_ver + 1);
57 		rc = kstrtou32(major, 10, &hdev->fw_major_version);
58 	} else {
59 		rc = -EINVAL;
60 	}
61 
62 	if (rc) {
63 		dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
64 		goto out;
65 	}
66 
67 	/* skip the first dot */
68 	first_dot++;
69 
70 	second_dot = strnstr(first_dot, ".", 10);
71 	if (second_dot) {
72 		strscpy(minor, first_dot, second_dot - first_dot + 1);
73 		rc = kstrtou32(minor, 10, &hdev->fw_minor_version);
74 	} else {
75 		rc = -EINVAL;
76 	}
77 
78 	if (rc)
79 		dev_err(hdev->dev, "Error %d parsing preboot minor version\n", rc);
80 
81 out:
82 	kfree(preboot_ver);
83 	return rc;
84 }
85 
hl_request_fw(struct hl_device * hdev,const struct firmware ** firmware_p,const char * fw_name)86 static int hl_request_fw(struct hl_device *hdev,
87 				const struct firmware **firmware_p,
88 				const char *fw_name)
89 {
90 	size_t fw_size;
91 	int rc;
92 
93 	rc = request_firmware(firmware_p, fw_name, hdev->dev);
94 	if (rc) {
95 		dev_err(hdev->dev, "Firmware file %s is not found! (error %d)\n",
96 				fw_name, rc);
97 		goto out;
98 	}
99 
100 	fw_size = (*firmware_p)->size;
101 	if ((fw_size % 4) != 0) {
102 		dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
103 				fw_name, fw_size);
104 		rc = -EINVAL;
105 		goto release_fw;
106 	}
107 
108 	dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
109 
110 	if (fw_size > FW_FILE_MAX_SIZE) {
111 		dev_err(hdev->dev,
112 			"FW file size %zu exceeds maximum of %u bytes\n",
113 			fw_size, FW_FILE_MAX_SIZE);
114 		rc = -EINVAL;
115 		goto release_fw;
116 	}
117 
118 	return 0;
119 
120 release_fw:
121 	release_firmware(*firmware_p);
122 out:
123 	return rc;
124 }
125 
126 /**
127  * hl_release_firmware() - release FW
128  *
129  * @fw: fw descriptor
130  *
131  * note: this inline function added to serve as a comprehensive mirror for the
132  *       hl_request_fw function.
133  */
hl_release_firmware(const struct firmware * fw)134 static inline void hl_release_firmware(const struct firmware *fw)
135 {
136 	release_firmware(fw);
137 }
138 
139 /**
140  * hl_fw_copy_fw_to_device() - copy FW to device
141  *
142  * @hdev: pointer to hl_device structure.
143  * @fw: fw descriptor
144  * @dst: IO memory mapped address space to copy firmware to
145  * @src_offset: offset in src FW to copy from
146  * @size: amount of bytes to copy (0 to copy the whole binary)
147  *
148  * actual copy of FW binary data to device, shared by static and dynamic loaders
149  */
hl_fw_copy_fw_to_device(struct hl_device * hdev,const struct firmware * fw,void __iomem * dst,u32 src_offset,u32 size)150 static int hl_fw_copy_fw_to_device(struct hl_device *hdev,
151 				const struct firmware *fw, void __iomem *dst,
152 				u32 src_offset, u32 size)
153 {
154 	const void *fw_data;
155 
156 	/* size 0 indicates to copy the whole file */
157 	if (!size)
158 		size = fw->size;
159 
160 	if (src_offset + size > fw->size) {
161 		dev_err(hdev->dev,
162 			"size to copy(%u) and offset(%u) are invalid\n",
163 			size, src_offset);
164 		return -EINVAL;
165 	}
166 
167 	fw_data = (const void *) fw->data;
168 
169 	memcpy_toio(dst, fw_data + src_offset, size);
170 	return 0;
171 }
172 
173 /**
174  * hl_fw_copy_msg_to_device() - copy message to device
175  *
176  * @hdev: pointer to hl_device structure.
177  * @msg: message
178  * @dst: IO memory mapped address space to copy firmware to
179  * @src_offset: offset in src message to copy from
180  * @size: amount of bytes to copy (0 to copy the whole binary)
181  *
182  * actual copy of message data to device.
183  */
hl_fw_copy_msg_to_device(struct hl_device * hdev,struct lkd_msg_comms * msg,void __iomem * dst,u32 src_offset,u32 size)184 static int hl_fw_copy_msg_to_device(struct hl_device *hdev,
185 		struct lkd_msg_comms *msg, void __iomem *dst,
186 		u32 src_offset, u32 size)
187 {
188 	void *msg_data;
189 
190 	/* size 0 indicates to copy the whole file */
191 	if (!size)
192 		size = sizeof(struct lkd_msg_comms);
193 
194 	if (src_offset + size > sizeof(struct lkd_msg_comms)) {
195 		dev_err(hdev->dev,
196 			"size to copy(%u) and offset(%u) are invalid\n",
197 			size, src_offset);
198 		return -EINVAL;
199 	}
200 
201 	msg_data = (void *) msg;
202 
203 	memcpy_toio(dst, msg_data + src_offset, size);
204 
205 	return 0;
206 }
207 
208 /**
209  * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
210  *
211  * @hdev: pointer to hl_device structure.
212  * @fw_name: the firmware image name
213  * @dst: IO memory mapped address space to copy firmware to
214  * @src_offset: offset in src FW to copy from
215  * @size: amount of bytes to copy (0 to copy the whole binary)
216  *
217  * Copy fw code from firmware file to device memory.
218  *
219  * Return: 0 on success, non-zero for failure.
220  */
hl_fw_load_fw_to_device(struct hl_device * hdev,const char * fw_name,void __iomem * dst,u32 src_offset,u32 size)221 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
222 				void __iomem *dst, u32 src_offset, u32 size)
223 {
224 	const struct firmware *fw;
225 	int rc;
226 
227 	rc = hl_request_fw(hdev, &fw, fw_name);
228 	if (rc)
229 		return rc;
230 
231 	rc = hl_fw_copy_fw_to_device(hdev, fw, dst, src_offset, size);
232 
233 	hl_release_firmware(fw);
234 	return rc;
235 }
236 
hl_fw_send_pci_access_msg(struct hl_device * hdev,u32 opcode,u64 value)237 int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
238 {
239 	struct cpucp_packet pkt = {};
240 
241 	pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
242 	pkt.value = cpu_to_le64(value);
243 
244 	return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
245 }
246 
hl_fw_send_cpu_message(struct hl_device * hdev,u32 hw_queue_id,u32 * msg,u16 len,u32 timeout,u64 * result)247 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
248 				u16 len, u32 timeout, u64 *result)
249 {
250 	struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
251 	struct asic_fixed_properties *prop = &hdev->asic_prop;
252 	struct cpucp_packet *pkt;
253 	dma_addr_t pkt_dma_addr;
254 	struct hl_bd *sent_bd;
255 	u32 tmp, expected_ack_val, pi, opcode;
256 	int rc;
257 
258 	pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
259 	if (!pkt) {
260 		dev_err(hdev->dev,
261 			"Failed to allocate DMA memory for packet to CPU\n");
262 		return -ENOMEM;
263 	}
264 
265 	memcpy(pkt, msg, len);
266 
267 	mutex_lock(&hdev->send_cpu_message_lock);
268 
269 	/* CPU-CP messages can be sent during soft-reset */
270 	if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
271 		rc = 0;
272 		goto out;
273 	}
274 
275 	if (hdev->device_cpu_disabled) {
276 		rc = -EIO;
277 		goto out;
278 	}
279 
280 	/* set fence to a non valid value */
281 	pkt->fence = cpu_to_le32(UINT_MAX);
282 	pi = queue->pi;
283 
284 	/*
285 	 * The CPU queue is a synchronous queue with an effective depth of
286 	 * a single entry (although it is allocated with room for multiple
287 	 * entries). We lock on it using 'send_cpu_message_lock' which
288 	 * serializes accesses to the CPU queue.
289 	 * Which means that we don't need to lock the access to the entire H/W
290 	 * queues module when submitting a JOB to the CPU queue.
291 	 */
292 	hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
293 
294 	if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
295 		expected_ack_val = queue->pi;
296 	else
297 		expected_ack_val = CPUCP_PACKET_FENCE_VAL;
298 
299 	rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
300 				(tmp == expected_ack_val), 1000,
301 				timeout, true);
302 
303 	hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
304 
305 	if (rc == -ETIMEDOUT) {
306 		/* If FW performed reset just before sending it a packet, we will get a timeout.
307 		 * This is expected behavior, hence no need for error message.
308 		 */
309 		if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
310 			dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
311 					tmp);
312 		else
313 			dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
314 		hdev->device_cpu_disabled = true;
315 		goto out;
316 	}
317 
318 	tmp = le32_to_cpu(pkt->ctl);
319 
320 	rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
321 	if (rc) {
322 		opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
323 
324 		if (!prop->supports_advanced_cpucp_rc) {
325 			dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
326 			goto scrub_descriptor;
327 		}
328 
329 		switch (rc) {
330 		case cpucp_packet_invalid:
331 			dev_err(hdev->dev,
332 				"CPU packet %d is not supported by F/W\n", opcode);
333 			break;
334 		case cpucp_packet_fault:
335 			dev_err(hdev->dev,
336 				"F/W failed processing CPU packet %d\n", opcode);
337 			break;
338 		case cpucp_packet_invalid_pkt:
339 			dev_dbg(hdev->dev,
340 				"CPU packet %d is not supported by F/W\n", opcode);
341 			break;
342 		case cpucp_packet_invalid_params:
343 			dev_err(hdev->dev,
344 				"F/W reports invalid parameters for CPU packet %d\n", opcode);
345 			break;
346 
347 		default:
348 			dev_err(hdev->dev,
349 				"Unknown F/W ERROR %d for CPU packet %d\n", rc, opcode);
350 		}
351 
352 		/* propagate the return code from the f/w to the callers who want to check it */
353 		if (result)
354 			*result = rc;
355 
356 		rc = -EIO;
357 
358 	} else if (result) {
359 		*result = le64_to_cpu(pkt->result);
360 	}
361 
362 scrub_descriptor:
363 	/* Scrub previous buffer descriptor 'ctl' field which contains the
364 	 * previous PI value written during packet submission.
365 	 * We must do this or else F/W can read an old value upon queue wraparound.
366 	 */
367 	sent_bd = queue->kernel_address;
368 	sent_bd += hl_pi_2_offset(pi);
369 	sent_bd->ctl = cpu_to_le32(UINT_MAX);
370 
371 out:
372 	mutex_unlock(&hdev->send_cpu_message_lock);
373 
374 	hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
375 
376 	return rc;
377 }
378 
hl_fw_unmask_irq(struct hl_device * hdev,u16 event_type)379 int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
380 {
381 	struct cpucp_packet pkt;
382 	u64 result;
383 	int rc;
384 
385 	memset(&pkt, 0, sizeof(pkt));
386 
387 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
388 				CPUCP_PKT_CTL_OPCODE_SHIFT);
389 	pkt.value = cpu_to_le64(event_type);
390 
391 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
392 						0, &result);
393 
394 	if (rc)
395 		dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
396 
397 	return rc;
398 }
399 
hl_fw_unmask_irq_arr(struct hl_device * hdev,const u32 * irq_arr,size_t irq_arr_size)400 int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
401 		size_t irq_arr_size)
402 {
403 	struct cpucp_unmask_irq_arr_packet *pkt;
404 	size_t total_pkt_size;
405 	u64 result;
406 	int rc;
407 
408 	total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
409 			irq_arr_size;
410 
411 	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
412 	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
413 
414 	/* total_pkt_size is casted to u16 later on */
415 	if (total_pkt_size > USHRT_MAX) {
416 		dev_err(hdev->dev, "too many elements in IRQ array\n");
417 		return -EINVAL;
418 	}
419 
420 	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
421 	if (!pkt)
422 		return -ENOMEM;
423 
424 	pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
425 	memcpy(&pkt->irqs, irq_arr, irq_arr_size);
426 
427 	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
428 						CPUCP_PKT_CTL_OPCODE_SHIFT);
429 
430 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
431 						total_pkt_size, 0, &result);
432 
433 	if (rc)
434 		dev_err(hdev->dev, "failed to unmask IRQ array\n");
435 
436 	kfree(pkt);
437 
438 	return rc;
439 }
440 
hl_fw_test_cpu_queue(struct hl_device * hdev)441 int hl_fw_test_cpu_queue(struct hl_device *hdev)
442 {
443 	struct cpucp_packet test_pkt = {};
444 	u64 result;
445 	int rc;
446 
447 	test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
448 					CPUCP_PKT_CTL_OPCODE_SHIFT);
449 	test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
450 
451 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
452 						sizeof(test_pkt), 0, &result);
453 
454 	if (!rc) {
455 		if (result != CPUCP_PACKET_FENCE_VAL)
456 			dev_err(hdev->dev,
457 				"CPU queue test failed (%#08llx)\n", result);
458 	} else {
459 		dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
460 	}
461 
462 	return rc;
463 }
464 
hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle)465 void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
466 						dma_addr_t *dma_handle)
467 {
468 	u64 kernel_addr;
469 
470 	kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
471 
472 	*dma_handle = hdev->cpu_accessible_dma_address +
473 		(kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
474 
475 	return (void *) (uintptr_t) kernel_addr;
476 }
477 
hl_fw_cpu_accessible_dma_pool_free(struct hl_device * hdev,size_t size,void * vaddr)478 void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
479 					void *vaddr)
480 {
481 	gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
482 			size);
483 }
484 
hl_fw_send_device_activity(struct hl_device * hdev,bool open)485 int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
486 {
487 	struct cpucp_packet pkt;
488 	int rc;
489 
490 	memset(&pkt, 0, sizeof(pkt));
491 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_ACTIVE_STATUS_SET <<	CPUCP_PKT_CTL_OPCODE_SHIFT);
492 	pkt.value = cpu_to_le64(open);
493 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
494 	if (rc)
495 		dev_err(hdev->dev, "failed to send device activity msg(%u)\n", open);
496 
497 	return rc;
498 }
499 
hl_fw_send_heartbeat(struct hl_device * hdev)500 int hl_fw_send_heartbeat(struct hl_device *hdev)
501 {
502 	struct cpucp_packet hb_pkt;
503 	u64 result;
504 	int rc;
505 
506 	memset(&hb_pkt, 0, sizeof(hb_pkt));
507 	hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
508 					CPUCP_PKT_CTL_OPCODE_SHIFT);
509 	hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
510 
511 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
512 						sizeof(hb_pkt), 0, &result);
513 
514 	if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
515 		return -EIO;
516 
517 	if (le32_to_cpu(hb_pkt.status_mask) &
518 					CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK) {
519 		dev_warn(hdev->dev, "FW reported EQ fault during heartbeat\n");
520 		rc = -EIO;
521 	}
522 
523 	return rc;
524 }
525 
fw_report_boot_dev0(struct hl_device * hdev,u32 err_val,u32 sts_val)526 static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
527 								u32 sts_val)
528 {
529 	bool err_exists = false;
530 
531 	if (!(err_val & CPU_BOOT_ERR0_ENABLED))
532 		return false;
533 
534 	if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL) {
535 		dev_err(hdev->dev,
536 			"Device boot error - DRAM initialization failed\n");
537 		err_exists = true;
538 	}
539 
540 	if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED) {
541 		dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
542 		err_exists = true;
543 	}
544 
545 	if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL) {
546 		dev_err(hdev->dev,
547 			"Device boot error - Thermal Sensor initialization failed\n");
548 		err_exists = true;
549 	}
550 
551 	if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
552 		if (hdev->bmc_enable) {
553 			dev_err(hdev->dev,
554 				"Device boot error - Skipped waiting for BMC\n");
555 			err_exists = true;
556 		} else {
557 			dev_info(hdev->dev,
558 				"Device boot message - Skipped waiting for BMC\n");
559 			/* This is an info so we don't want it to disable the
560 			 * device
561 			 */
562 			err_val &= ~CPU_BOOT_ERR0_BMC_WAIT_SKIPPED;
563 		}
564 	}
565 
566 	if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY) {
567 		dev_err(hdev->dev,
568 			"Device boot error - Serdes data from BMC not available\n");
569 		err_exists = true;
570 	}
571 
572 	if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL) {
573 		dev_err(hdev->dev,
574 			"Device boot error - NIC F/W initialization failed\n");
575 		err_exists = true;
576 	}
577 
578 	if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
579 		dev_err(hdev->dev,
580 			"Device boot warning - security not ready\n");
581 		err_exists = true;
582 	}
583 
584 	if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
585 		dev_err(hdev->dev, "Device boot error - security failure\n");
586 		err_exists = true;
587 	}
588 
589 	if (err_val & CPU_BOOT_ERR0_EFUSE_FAIL) {
590 		dev_err(hdev->dev, "Device boot error - eFuse failure\n");
591 		err_exists = true;
592 	}
593 
594 	if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) {
595 		dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
596 		err_exists = true;
597 	}
598 
599 	if (err_val & CPU_BOOT_ERR0_PLL_FAIL) {
600 		dev_err(hdev->dev, "Device boot error - PLL failure\n");
601 		err_exists = true;
602 	}
603 
604 	if (err_val & CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL) {
605 		/* Ignore this bit, don't prevent driver loading */
606 		dev_dbg(hdev->dev, "device unusable status is set\n");
607 		err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
608 	}
609 
610 	if (err_val & CPU_BOOT_ERR0_BINNING_FAIL) {
611 		dev_err(hdev->dev, "Device boot error - binning failure\n");
612 		err_exists = true;
613 	}
614 
615 	if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
616 		dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
617 
618 	/* All warnings should go here in order not to reach the unknown error validation */
619 	if (err_val & CPU_BOOT_ERR0_EEPROM_FAIL) {
620 		dev_warn(hdev->dev,
621 			"Device boot warning - EEPROM failure detected, default settings applied\n");
622 		/* This is a warning so we don't want it to disable the
623 		 * device
624 		 */
625 		err_val &= ~CPU_BOOT_ERR0_EEPROM_FAIL;
626 	}
627 
628 	if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
629 		dev_warn(hdev->dev,
630 			"Device boot warning - Skipped DRAM initialization\n");
631 		/* This is a warning so we don't want it to disable the
632 		 * device
633 		 */
634 		err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
635 	}
636 
637 	if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
638 		dev_warn(hdev->dev,
639 			"Device boot warning - Failed to load preboot primary image\n");
640 		/* This is a warning so we don't want it to disable the
641 		 * device as we have a secondary preboot image
642 		 */
643 		err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
644 	}
645 
646 	if (err_val & CPU_BOOT_ERR0_TPM_FAIL) {
647 		dev_warn(hdev->dev,
648 			"Device boot warning - TPM failure\n");
649 		/* This is a warning so we don't want it to disable the
650 		 * device
651 		 */
652 		err_val &= ~CPU_BOOT_ERR0_TPM_FAIL;
653 	}
654 
655 	if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
656 		dev_err(hdev->dev,
657 			"Device boot error - unknown ERR0 error 0x%08x\n", err_val);
658 		err_exists = true;
659 	}
660 
661 	/* return error only if it's in the predefined mask */
662 	if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
663 				lower_32_bits(hdev->boot_error_status_mask)))
664 		return true;
665 
666 	return false;
667 }
668 
669 /* placeholder for ERR1 as no errors defined there yet */
fw_report_boot_dev1(struct hl_device * hdev,u32 err_val,u32 sts_val)670 static bool fw_report_boot_dev1(struct hl_device *hdev, u32 err_val,
671 								u32 sts_val)
672 {
673 	/*
674 	 * keep this variable to preserve the logic of the function.
675 	 * this way it would require less modifications when error will be
676 	 * added to DEV_ERR1
677 	 */
678 	bool err_exists = false;
679 
680 	if (!(err_val & CPU_BOOT_ERR1_ENABLED))
681 		return false;
682 
683 	if (sts_val & CPU_BOOT_DEV_STS1_ENABLED)
684 		dev_dbg(hdev->dev, "Device status1 %#x\n", sts_val);
685 
686 	if (!err_exists && (err_val & ~CPU_BOOT_ERR1_ENABLED)) {
687 		dev_err(hdev->dev,
688 			"Device boot error - unknown ERR1 error 0x%08x\n",
689 								err_val);
690 		err_exists = true;
691 	}
692 
693 	/* return error only if it's in the predefined mask */
694 	if (err_exists && ((err_val & ~CPU_BOOT_ERR1_ENABLED) &
695 				upper_32_bits(hdev->boot_error_status_mask)))
696 		return true;
697 
698 	return false;
699 }
700 
fw_read_errors(struct hl_device * hdev,u32 boot_err0_reg,u32 boot_err1_reg,u32 cpu_boot_dev_status0_reg,u32 cpu_boot_dev_status1_reg)701 static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
702 				u32 boot_err1_reg, u32 cpu_boot_dev_status0_reg,
703 				u32 cpu_boot_dev_status1_reg)
704 {
705 	u32 err_val, status_val;
706 	bool err_exists = false;
707 
708 	/* Some of the firmware status codes are deprecated in newer f/w
709 	 * versions. In those versions, the errors are reported
710 	 * in different registers. Therefore, we need to check those
711 	 * registers and print the exact errors. Moreover, there
712 	 * may be multiple errors, so we need to report on each error
713 	 * separately. Some of the error codes might indicate a state
714 	 * that is not an error per-se, but it is an error in production
715 	 * environment
716 	 */
717 	err_val = RREG32(boot_err0_reg);
718 	status_val = RREG32(cpu_boot_dev_status0_reg);
719 	err_exists = fw_report_boot_dev0(hdev, err_val, status_val);
720 
721 	err_val = RREG32(boot_err1_reg);
722 	status_val = RREG32(cpu_boot_dev_status1_reg);
723 	err_exists |= fw_report_boot_dev1(hdev, err_val, status_val);
724 
725 	if (err_exists)
726 		return -EIO;
727 
728 	return 0;
729 }
730 
hl_fw_cpucp_info_get(struct hl_device * hdev,u32 sts_boot_dev_sts0_reg,u32 sts_boot_dev_sts1_reg,u32 boot_err0_reg,u32 boot_err1_reg)731 int hl_fw_cpucp_info_get(struct hl_device *hdev,
732 				u32 sts_boot_dev_sts0_reg,
733 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
734 				u32 boot_err1_reg)
735 {
736 	struct asic_fixed_properties *prop = &hdev->asic_prop;
737 	struct cpucp_packet pkt = {};
738 	dma_addr_t cpucp_info_dma_addr;
739 	void *cpucp_info_cpu_addr;
740 	char *kernel_ver;
741 	u64 result;
742 	int rc;
743 
744 	cpucp_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, sizeof(struct cpucp_info),
745 								&cpucp_info_dma_addr);
746 	if (!cpucp_info_cpu_addr) {
747 		dev_err(hdev->dev,
748 			"Failed to allocate DMA memory for CPU-CP info packet\n");
749 		return -ENOMEM;
750 	}
751 
752 	memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info));
753 
754 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET <<
755 				CPUCP_PKT_CTL_OPCODE_SHIFT);
756 	pkt.addr = cpu_to_le64(cpucp_info_dma_addr);
757 	pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info));
758 
759 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
760 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
761 	if (rc) {
762 		dev_err(hdev->dev,
763 			"Failed to handle CPU-CP info pkt, error %d\n", rc);
764 		goto out;
765 	}
766 
767 	rc = fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
768 				sts_boot_dev_sts0_reg, sts_boot_dev_sts1_reg);
769 	if (rc) {
770 		dev_err(hdev->dev, "Errors in device boot\n");
771 		goto out;
772 	}
773 
774 	memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
775 			sizeof(prop->cpucp_info));
776 
777 	rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors);
778 	if (rc) {
779 		dev_err(hdev->dev,
780 			"Failed to build hwmon channel info, error %d\n", rc);
781 		rc = -EFAULT;
782 		goto out;
783 	}
784 
785 	kernel_ver = extract_fw_ver_from_str(prop->cpucp_info.kernel_version);
786 	if (kernel_ver) {
787 		dev_info(hdev->dev, "Linux version %s", kernel_ver);
788 		kfree(kernel_ver);
789 	}
790 
791 	/* assume EQ code doesn't need to check eqe index */
792 	hdev->event_queue.check_eqe_index = false;
793 
794 	/* Read FW application security bits again */
795 	if (prop->fw_cpu_boot_dev_sts0_valid) {
796 		prop->fw_app_cpu_boot_dev_sts0 = RREG32(sts_boot_dev_sts0_reg);
797 		if (prop->fw_app_cpu_boot_dev_sts0 &
798 				CPU_BOOT_DEV_STS0_EQ_INDEX_EN)
799 			hdev->event_queue.check_eqe_index = true;
800 	}
801 
802 	if (prop->fw_cpu_boot_dev_sts1_valid)
803 		prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
804 
805 out:
806 	hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr);
807 
808 	return rc;
809 }
810 
hl_fw_send_msi_info_msg(struct hl_device * hdev)811 static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
812 {
813 	struct cpucp_array_data_packet *pkt;
814 	size_t total_pkt_size, data_size;
815 	u64 result;
816 	int rc;
817 
818 	/* skip sending this info for unsupported ASICs */
819 	if (!hdev->asic_funcs->get_msi_info)
820 		return 0;
821 
822 	data_size = CPUCP_NUM_OF_MSI_TYPES * sizeof(u32);
823 	total_pkt_size = sizeof(struct cpucp_array_data_packet) + data_size;
824 
825 	/* data should be aligned to 8 bytes in order to CPU-CP to copy it */
826 	total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
827 
828 	/* total_pkt_size is casted to u16 later on */
829 	if (total_pkt_size > USHRT_MAX) {
830 		dev_err(hdev->dev, "CPUCP array data is too big\n");
831 		return -EINVAL;
832 	}
833 
834 	pkt = kzalloc(total_pkt_size, GFP_KERNEL);
835 	if (!pkt)
836 		return -ENOMEM;
837 
838 	pkt->length = cpu_to_le32(CPUCP_NUM_OF_MSI_TYPES);
839 
840 	memset((void *) &pkt->data, 0xFF, data_size);
841 	hdev->asic_funcs->get_msi_info(pkt->data);
842 
843 	pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_MSI_INFO_SET <<
844 						CPUCP_PKT_CTL_OPCODE_SHIFT);
845 
846 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)pkt,
847 						total_pkt_size, 0, &result);
848 
849 	/*
850 	 * in case packet result is invalid it means that FW does not support
851 	 * this feature and will use default/hard coded MSI values. no reason
852 	 * to stop the boot
853 	 */
854 	if (rc && result == cpucp_packet_invalid)
855 		rc = 0;
856 
857 	if (rc)
858 		dev_err(hdev->dev, "failed to send CPUCP array data\n");
859 
860 	kfree(pkt);
861 
862 	return rc;
863 }
864 
hl_fw_cpucp_handshake(struct hl_device * hdev,u32 sts_boot_dev_sts0_reg,u32 sts_boot_dev_sts1_reg,u32 boot_err0_reg,u32 boot_err1_reg)865 int hl_fw_cpucp_handshake(struct hl_device *hdev,
866 				u32 sts_boot_dev_sts0_reg,
867 				u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
868 				u32 boot_err1_reg)
869 {
870 	int rc;
871 
872 	rc = hl_fw_cpucp_info_get(hdev, sts_boot_dev_sts0_reg,
873 					sts_boot_dev_sts1_reg, boot_err0_reg,
874 					boot_err1_reg);
875 	if (rc)
876 		return rc;
877 
878 	return hl_fw_send_msi_info_msg(hdev);
879 }
880 
hl_fw_get_eeprom_data(struct hl_device * hdev,void * data,size_t max_size)881 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
882 {
883 	struct cpucp_packet pkt = {};
884 	void *eeprom_info_cpu_addr;
885 	dma_addr_t eeprom_info_dma_addr;
886 	u64 result;
887 	int rc;
888 
889 	eeprom_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, max_size,
890 									&eeprom_info_dma_addr);
891 	if (!eeprom_info_cpu_addr) {
892 		dev_err(hdev->dev,
893 			"Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
894 		return -ENOMEM;
895 	}
896 
897 	memset(eeprom_info_cpu_addr, 0, max_size);
898 
899 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET <<
900 				CPUCP_PKT_CTL_OPCODE_SHIFT);
901 	pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
902 	pkt.data_max_size = cpu_to_le32(max_size);
903 
904 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
905 			HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
906 
907 	if (rc) {
908 		dev_err(hdev->dev,
909 			"Failed to handle CPU-CP EEPROM packet, error %d\n",
910 			rc);
911 		goto out;
912 	}
913 
914 	/* result contains the actual size */
915 	memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
916 
917 out:
918 	hl_cpu_accessible_dma_pool_free(hdev, max_size, eeprom_info_cpu_addr);
919 
920 	return rc;
921 }
922 
hl_fw_get_monitor_dump(struct hl_device * hdev,void * data)923 int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
924 {
925 	struct cpucp_monitor_dump *mon_dump_cpu_addr;
926 	dma_addr_t mon_dump_dma_addr;
927 	struct cpucp_packet pkt = {};
928 	size_t data_size;
929 	__le32 *src_ptr;
930 	u32 *dst_ptr;
931 	u64 result;
932 	int i, rc;
933 
934 	data_size = sizeof(struct cpucp_monitor_dump);
935 	mon_dump_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, data_size, &mon_dump_dma_addr);
936 	if (!mon_dump_cpu_addr) {
937 		dev_err(hdev->dev,
938 			"Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
939 		return -ENOMEM;
940 	}
941 
942 	memset(mon_dump_cpu_addr, 0, data_size);
943 
944 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MONITOR_DUMP_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
945 	pkt.addr = cpu_to_le64(mon_dump_dma_addr);
946 	pkt.data_max_size = cpu_to_le32(data_size);
947 
948 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
949 							HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
950 	if (rc) {
951 		dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
952 		goto out;
953 	}
954 
955 	/* result contains the actual size */
956 	src_ptr = (__le32 *) mon_dump_cpu_addr;
957 	dst_ptr = data;
958 	for (i = 0; i < (data_size / sizeof(u32)); i++) {
959 		*dst_ptr = le32_to_cpu(*src_ptr);
960 		src_ptr++;
961 		dst_ptr++;
962 	}
963 
964 out:
965 	hl_cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
966 
967 	return rc;
968 }
969 
hl_fw_cpucp_pci_counters_get(struct hl_device * hdev,struct hl_info_pci_counters * counters)970 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
971 		struct hl_info_pci_counters *counters)
972 {
973 	struct cpucp_packet pkt = {};
974 	u64 result;
975 	int rc;
976 
977 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
978 			CPUCP_PKT_CTL_OPCODE_SHIFT);
979 
980 	/* Fetch PCI rx counter */
981 	pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx);
982 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
983 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
984 	if (rc) {
985 		dev_err(hdev->dev,
986 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
987 		return rc;
988 	}
989 	counters->rx_throughput = result;
990 
991 	memset(&pkt, 0, sizeof(pkt));
992 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
993 			CPUCP_PKT_CTL_OPCODE_SHIFT);
994 
995 	/* Fetch PCI tx counter */
996 	pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
997 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
998 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
999 	if (rc) {
1000 		dev_err(hdev->dev,
1001 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1002 		return rc;
1003 	}
1004 	counters->tx_throughput = result;
1005 
1006 	/* Fetch PCI replay counter */
1007 	memset(&pkt, 0, sizeof(pkt));
1008 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
1009 			CPUCP_PKT_CTL_OPCODE_SHIFT);
1010 
1011 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1012 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1013 	if (rc) {
1014 		dev_err(hdev->dev,
1015 			"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
1016 		return rc;
1017 	}
1018 	counters->replay_cnt = (u32) result;
1019 
1020 	return rc;
1021 }
1022 
hl_fw_cpucp_total_energy_get(struct hl_device * hdev,u64 * total_energy)1023 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
1024 {
1025 	struct cpucp_packet pkt = {};
1026 	u64 result;
1027 	int rc;
1028 
1029 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
1030 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1031 
1032 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1033 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1034 	if (rc) {
1035 		dev_err(hdev->dev,
1036 			"Failed to handle CpuCP total energy pkt, error %d\n",
1037 				rc);
1038 		return rc;
1039 	}
1040 
1041 	*total_energy = result;
1042 
1043 	return rc;
1044 }
1045 
get_used_pll_index(struct hl_device * hdev,u32 input_pll_index,enum pll_index * pll_index)1046 int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
1047 						enum pll_index *pll_index)
1048 {
1049 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1050 	u8 pll_byte, pll_bit_off;
1051 	bool dynamic_pll;
1052 	int fw_pll_idx;
1053 
1054 	dynamic_pll = !!(prop->fw_app_cpu_boot_dev_sts0 &
1055 						CPU_BOOT_DEV_STS0_DYN_PLL_EN);
1056 
1057 	if (!dynamic_pll) {
1058 		/*
1059 		 * in case we are working with legacy FW (each asic has unique
1060 		 * PLL numbering) use the driver based index as they are
1061 		 * aligned with fw legacy numbering
1062 		 */
1063 		*pll_index = input_pll_index;
1064 		return 0;
1065 	}
1066 
1067 	/* retrieve a FW compatible PLL index based on
1068 	 * ASIC specific user request
1069 	 */
1070 	fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
1071 	if (fw_pll_idx < 0) {
1072 		dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
1073 			input_pll_index, fw_pll_idx);
1074 		return -EINVAL;
1075 	}
1076 
1077 	/* PLL map is a u8 array */
1078 	pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
1079 	pll_bit_off = fw_pll_idx & 0x7;
1080 
1081 	if (!(pll_byte & BIT(pll_bit_off))) {
1082 		dev_err(hdev->dev, "PLL index %d is not supported\n",
1083 			fw_pll_idx);
1084 		return -EINVAL;
1085 	}
1086 
1087 	*pll_index = fw_pll_idx;
1088 
1089 	return 0;
1090 }
1091 
hl_fw_cpucp_pll_info_get(struct hl_device * hdev,u32 pll_index,u16 * pll_freq_arr)1092 int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
1093 		u16 *pll_freq_arr)
1094 {
1095 	struct cpucp_packet pkt;
1096 	enum pll_index used_pll_idx;
1097 	u64 result;
1098 	int rc;
1099 
1100 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
1101 	if (rc)
1102 		return rc;
1103 
1104 	memset(&pkt, 0, sizeof(pkt));
1105 
1106 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_PLL_INFO_GET <<
1107 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1108 	pkt.pll_type = __cpu_to_le16((u16)used_pll_idx);
1109 
1110 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1111 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1112 	if (rc) {
1113 		dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
1114 		return rc;
1115 	}
1116 
1117 	pll_freq_arr[0] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT0_MASK, result);
1118 	pll_freq_arr[1] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT1_MASK, result);
1119 	pll_freq_arr[2] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT2_MASK, result);
1120 	pll_freq_arr[3] = FIELD_GET(CPUCP_PKT_RES_PLL_OUT3_MASK, result);
1121 
1122 	return 0;
1123 }
1124 
hl_fw_cpucp_power_get(struct hl_device * hdev,u64 * power)1125 int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
1126 {
1127 	struct cpucp_packet pkt;
1128 	u64 result;
1129 	int rc;
1130 
1131 	memset(&pkt, 0, sizeof(pkt));
1132 
1133 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
1134 				CPUCP_PKT_CTL_OPCODE_SHIFT);
1135 	pkt.type = cpu_to_le16(CPUCP_POWER_INPUT);
1136 
1137 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1138 			HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1139 	if (rc) {
1140 		dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
1141 		return rc;
1142 	}
1143 
1144 	*power = result;
1145 
1146 	return rc;
1147 }
1148 
hl_fw_dram_replaced_row_get(struct hl_device * hdev,struct cpucp_hbm_row_info * info)1149 int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
1150 				struct cpucp_hbm_row_info *info)
1151 {
1152 	struct cpucp_hbm_row_info *cpucp_repl_rows_info_cpu_addr;
1153 	dma_addr_t cpucp_repl_rows_info_dma_addr;
1154 	struct cpucp_packet pkt = {};
1155 	u64 result;
1156 	int rc;
1157 
1158 	cpucp_repl_rows_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev,
1159 							sizeof(struct cpucp_hbm_row_info),
1160 							&cpucp_repl_rows_info_dma_addr);
1161 	if (!cpucp_repl_rows_info_cpu_addr) {
1162 		dev_err(hdev->dev,
1163 			"Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
1164 		return -ENOMEM;
1165 	}
1166 
1167 	memset(cpucp_repl_rows_info_cpu_addr, 0, sizeof(struct cpucp_hbm_row_info));
1168 
1169 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET <<
1170 					CPUCP_PKT_CTL_OPCODE_SHIFT);
1171 	pkt.addr = cpu_to_le64(cpucp_repl_rows_info_dma_addr);
1172 	pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_hbm_row_info));
1173 
1174 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1175 					HL_CPUCP_INFO_TIMEOUT_USEC, &result);
1176 	if (rc) {
1177 		dev_err(hdev->dev,
1178 			"Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
1179 		goto out;
1180 	}
1181 
1182 	memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
1183 
1184 out:
1185 	hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_hbm_row_info),
1186 						cpucp_repl_rows_info_cpu_addr);
1187 
1188 	return rc;
1189 }
1190 
hl_fw_dram_pending_row_get(struct hl_device * hdev,u32 * pend_rows_num)1191 int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
1192 {
1193 	struct cpucp_packet pkt;
1194 	u64 result;
1195 	int rc;
1196 
1197 	memset(&pkt, 0, sizeof(pkt));
1198 
1199 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_PENDING_ROWS_STATUS << CPUCP_PKT_CTL_OPCODE_SHIFT);
1200 
1201 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
1202 	if (rc) {
1203 		dev_err(hdev->dev,
1204 				"Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
1205 		goto out;
1206 	}
1207 
1208 	*pend_rows_num = (u32) result;
1209 out:
1210 	return rc;
1211 }
1212 
hl_fw_cpucp_engine_core_asid_set(struct hl_device * hdev,u32 asid)1213 int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid)
1214 {
1215 	struct cpucp_packet pkt;
1216 	int rc;
1217 
1218 	memset(&pkt, 0, sizeof(pkt));
1219 
1220 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_ENGINE_CORE_ASID_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
1221 	pkt.value = cpu_to_le64(asid);
1222 
1223 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
1224 						HL_CPUCP_INFO_TIMEOUT_USEC, NULL);
1225 	if (rc)
1226 		dev_err(hdev->dev,
1227 			"Failed on ASID configuration request for engine core, error %d\n",
1228 			rc);
1229 
1230 	return rc;
1231 }
1232 
hl_fw_ask_hard_reset_without_linux(struct hl_device * hdev)1233 void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
1234 {
1235 	struct static_fw_load_mgr *static_loader =
1236 			&hdev->fw_loader.static_loader;
1237 	int rc;
1238 
1239 	if (hdev->asic_prop.dynamic_fw_load) {
1240 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
1241 				COMMS_RST_DEV, 0, false,
1242 				hdev->fw_loader.cpu_timeout);
1243 		if (rc)
1244 			dev_warn(hdev->dev, "Failed sending COMMS_RST_DEV\n");
1245 	} else {
1246 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_RST_DEV);
1247 	}
1248 }
1249 
hl_fw_ask_halt_machine_without_linux(struct hl_device * hdev)1250 void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev)
1251 {
1252 	struct static_fw_load_mgr *static_loader =
1253 			&hdev->fw_loader.static_loader;
1254 	int rc;
1255 
1256 	if (hdev->device_cpu_is_halted)
1257 		return;
1258 
1259 	/* Stop device CPU to make sure nothing bad happens */
1260 	if (hdev->asic_prop.dynamic_fw_load) {
1261 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, &hdev->fw_loader,
1262 				COMMS_GOTO_WFE, 0, true,
1263 				hdev->fw_loader.cpu_timeout);
1264 		if (rc)
1265 			dev_warn(hdev->dev, "Failed sending COMMS_GOTO_WFE\n");
1266 	} else {
1267 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_GOTO_WFE);
1268 		msleep(static_loader->cpu_reset_wait_msec);
1269 
1270 		/* Must clear this register in order to prevent preboot
1271 		 * from reading WFE after reboot
1272 		 */
1273 		WREG32(static_loader->kmd_msg_to_cpu_reg, KMD_MSG_NA);
1274 	}
1275 
1276 	hdev->device_cpu_is_halted = true;
1277 }
1278 
detect_cpu_boot_status(struct hl_device * hdev,u32 status)1279 static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
1280 {
1281 	/* Some of the status codes below are deprecated in newer f/w
1282 	 * versions but we keep them here for backward compatibility
1283 	 */
1284 	switch (status) {
1285 	case CPU_BOOT_STATUS_NA:
1286 		dev_err(hdev->dev,
1287 			"Device boot progress - BTL/ROM did NOT run\n");
1288 		break;
1289 	case CPU_BOOT_STATUS_IN_WFE:
1290 		dev_err(hdev->dev,
1291 			"Device boot progress - Stuck inside WFE loop\n");
1292 		break;
1293 	case CPU_BOOT_STATUS_IN_BTL:
1294 		dev_err(hdev->dev,
1295 			"Device boot progress - Stuck in BTL\n");
1296 		break;
1297 	case CPU_BOOT_STATUS_IN_PREBOOT:
1298 		dev_err(hdev->dev,
1299 			"Device boot progress - Stuck in Preboot\n");
1300 		break;
1301 	case CPU_BOOT_STATUS_IN_SPL:
1302 		dev_err(hdev->dev,
1303 			"Device boot progress - Stuck in SPL\n");
1304 		break;
1305 	case CPU_BOOT_STATUS_IN_UBOOT:
1306 		dev_err(hdev->dev,
1307 			"Device boot progress - Stuck in u-boot\n");
1308 		break;
1309 	case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
1310 		dev_err(hdev->dev,
1311 			"Device boot progress - DRAM initialization failed\n");
1312 		break;
1313 	case CPU_BOOT_STATUS_UBOOT_NOT_READY:
1314 		dev_err(hdev->dev,
1315 			"Device boot progress - Cannot boot\n");
1316 		break;
1317 	case CPU_BOOT_STATUS_TS_INIT_FAIL:
1318 		dev_err(hdev->dev,
1319 			"Device boot progress - Thermal Sensor initialization failed\n");
1320 		break;
1321 	case CPU_BOOT_STATUS_SECURITY_READY:
1322 		dev_err(hdev->dev,
1323 			"Device boot progress - Stuck in preboot after security initialization\n");
1324 		break;
1325 	default:
1326 		dev_err(hdev->dev,
1327 			"Device boot progress - Invalid status code %d\n",
1328 			status);
1329 		break;
1330 	}
1331 }
1332 
hl_fw_wait_preboot_ready(struct hl_device * hdev)1333 static int hl_fw_wait_preboot_ready(struct hl_device *hdev)
1334 {
1335 	struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
1336 	u32 status;
1337 	int rc;
1338 
1339 	/* Need to check two possible scenarios:
1340 	 *
1341 	 * CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where
1342 	 * the preboot is waiting for the boot fit
1343 	 *
1344 	 * All other status values - for older firmwares where the uboot was
1345 	 * loaded from the FLASH
1346 	 */
1347 	rc = hl_poll_timeout(
1348 		hdev,
1349 		pre_fw_load->cpu_boot_status_reg,
1350 		status,
1351 		(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
1352 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
1353 		(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
1354 		hdev->fw_poll_interval_usec,
1355 		pre_fw_load->wait_for_preboot_timeout);
1356 
1357 	if (rc) {
1358 		dev_err(hdev->dev, "CPU boot ready status timeout\n");
1359 		detect_cpu_boot_status(hdev, status);
1360 
1361 		/* If we read all FF, then something is totally wrong, no point
1362 		 * of reading specific errors
1363 		 */
1364 		if (status != -1)
1365 			fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
1366 						pre_fw_load->boot_err1_reg,
1367 						pre_fw_load->sts_boot_dev_sts0_reg,
1368 						pre_fw_load->sts_boot_dev_sts1_reg);
1369 		return -EIO;
1370 	}
1371 
1372 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_PREBOOT_CPU;
1373 
1374 	return 0;
1375 }
1376 
hl_fw_read_preboot_caps(struct hl_device * hdev)1377 static int hl_fw_read_preboot_caps(struct hl_device *hdev)
1378 {
1379 	struct pre_fw_load_props *pre_fw_load;
1380 	struct asic_fixed_properties *prop;
1381 	u32 reg_val;
1382 	int rc;
1383 
1384 	prop = &hdev->asic_prop;
1385 	pre_fw_load = &hdev->fw_loader.pre_fw_load;
1386 
1387 	rc = hl_fw_wait_preboot_ready(hdev);
1388 	if (rc)
1389 		return rc;
1390 
1391 	/*
1392 	 * the registers DEV_STS* contain FW capabilities/features.
1393 	 * We can rely on this registers only if bit CPU_BOOT_DEV_STS*_ENABLED
1394 	 * is set.
1395 	 * In the first read of this register we store the value of this
1396 	 * register ONLY if the register is enabled (which will be propagated
1397 	 * to next stages) and also mark the register as valid.
1398 	 * In case it is not enabled the stored value will be left 0- all
1399 	 * caps/features are off
1400 	 */
1401 	reg_val = RREG32(pre_fw_load->sts_boot_dev_sts0_reg);
1402 	if (reg_val & CPU_BOOT_DEV_STS0_ENABLED) {
1403 		prop->fw_cpu_boot_dev_sts0_valid = true;
1404 		prop->fw_preboot_cpu_boot_dev_sts0 = reg_val;
1405 	}
1406 
1407 	reg_val = RREG32(pre_fw_load->sts_boot_dev_sts1_reg);
1408 	if (reg_val & CPU_BOOT_DEV_STS1_ENABLED) {
1409 		prop->fw_cpu_boot_dev_sts1_valid = true;
1410 		prop->fw_preboot_cpu_boot_dev_sts1 = reg_val;
1411 	}
1412 
1413 	prop->dynamic_fw_load = !!(prop->fw_preboot_cpu_boot_dev_sts0 &
1414 						CPU_BOOT_DEV_STS0_FW_LD_COM_EN);
1415 
1416 	/* initialize FW loader once we know what load protocol is used */
1417 	hdev->asic_funcs->init_firmware_loader(hdev);
1418 
1419 	dev_dbg(hdev->dev, "Attempting %s FW load\n",
1420 			prop->dynamic_fw_load ? "dynamic" : "legacy");
1421 	return 0;
1422 }
1423 
hl_fw_static_read_device_fw_version(struct hl_device * hdev,enum hl_fw_component fwc)1424 static int hl_fw_static_read_device_fw_version(struct hl_device *hdev,
1425 					enum hl_fw_component fwc)
1426 {
1427 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1428 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
1429 	struct static_fw_load_mgr *static_loader;
1430 	char *dest, *boot_ver, *preboot_ver;
1431 	u32 ver_off, limit;
1432 	const char *name;
1433 	char btl_ver[32];
1434 
1435 	static_loader = &hdev->fw_loader.static_loader;
1436 
1437 	switch (fwc) {
1438 	case FW_COMP_BOOT_FIT:
1439 		ver_off = RREG32(static_loader->boot_fit_version_offset_reg);
1440 		dest = prop->uboot_ver;
1441 		name = "Boot-fit";
1442 		limit = static_loader->boot_fit_version_max_off;
1443 		break;
1444 	case FW_COMP_PREBOOT:
1445 		ver_off = RREG32(static_loader->preboot_version_offset_reg);
1446 		dest = prop->preboot_ver;
1447 		name = "Preboot";
1448 		limit = static_loader->preboot_version_max_off;
1449 		break;
1450 	default:
1451 		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
1452 		return -EIO;
1453 	}
1454 
1455 	ver_off &= static_loader->sram_offset_mask;
1456 
1457 	if (ver_off < limit) {
1458 		memcpy_fromio(dest,
1459 			hdev->pcie_bar[fw_loader->sram_bar_id] + ver_off,
1460 			VERSION_MAX_LEN);
1461 	} else {
1462 		dev_err(hdev->dev, "%s version offset (0x%x) is above SRAM\n",
1463 								name, ver_off);
1464 		strscpy(dest, "unavailable", VERSION_MAX_LEN);
1465 		return -EIO;
1466 	}
1467 
1468 	if (fwc == FW_COMP_BOOT_FIT) {
1469 		boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
1470 		if (boot_ver) {
1471 			dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
1472 			kfree(boot_ver);
1473 		}
1474 	} else if (fwc == FW_COMP_PREBOOT) {
1475 		preboot_ver = strnstr(prop->preboot_ver, "Preboot",
1476 						VERSION_MAX_LEN);
1477 		if (preboot_ver && preboot_ver != prop->preboot_ver) {
1478 			strscpy(btl_ver, prop->preboot_ver,
1479 				min((int) (preboot_ver - prop->preboot_ver),
1480 									31));
1481 			dev_info(hdev->dev, "%s\n", btl_ver);
1482 		}
1483 
1484 		preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
1485 		if (preboot_ver) {
1486 			dev_info(hdev->dev, "preboot version %s\n",
1487 								preboot_ver);
1488 			kfree(preboot_ver);
1489 		}
1490 	}
1491 
1492 	return 0;
1493 }
1494 
1495 /**
1496  * hl_fw_preboot_update_state - update internal data structures during
1497  *                              handshake with preboot
1498  *
1499  *
1500  * @hdev: pointer to the habanalabs device structure
1501  *
1502  * @return 0 on success, otherwise non-zero error code
1503  */
hl_fw_preboot_update_state(struct hl_device * hdev)1504 static void hl_fw_preboot_update_state(struct hl_device *hdev)
1505 {
1506 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1507 	u32 cpu_boot_dev_sts0, cpu_boot_dev_sts1;
1508 
1509 	cpu_boot_dev_sts0 = prop->fw_preboot_cpu_boot_dev_sts0;
1510 	cpu_boot_dev_sts1 = prop->fw_preboot_cpu_boot_dev_sts1;
1511 
1512 	/* We read boot_dev_sts registers multiple times during boot:
1513 	 * 1. preboot - a. Check whether the security status bits are valid
1514 	 *              b. Check whether fw security is enabled
1515 	 *              c. Check whether hard reset is done by preboot
1516 	 * 2. boot cpu - a. Fetch boot cpu security status
1517 	 *               b. Check whether hard reset is done by boot cpu
1518 	 * 3. FW application - a. Fetch fw application security status
1519 	 *                     b. Check whether hard reset is done by fw app
1520 	 */
1521 	prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
1522 
1523 	prop->fw_security_enabled = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_SECURITY_EN);
1524 
1525 	dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
1526 							cpu_boot_dev_sts0);
1527 
1528 	dev_dbg(hdev->dev, "Firmware preboot boot device status1 %#x\n",
1529 							cpu_boot_dev_sts1);
1530 
1531 	dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
1532 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
1533 
1534 	dev_dbg(hdev->dev, "firmware-level security is %s\n",
1535 			prop->fw_security_enabled ? "enabled" : "disabled");
1536 
1537 	dev_dbg(hdev->dev, "GIC controller is %s\n",
1538 			prop->gic_interrupts_enable ? "enabled" : "disabled");
1539 }
1540 
hl_fw_static_read_preboot_status(struct hl_device * hdev)1541 static int hl_fw_static_read_preboot_status(struct hl_device *hdev)
1542 {
1543 	int rc;
1544 
1545 	rc = hl_fw_static_read_device_fw_version(hdev, FW_COMP_PREBOOT);
1546 	if (rc)
1547 		return rc;
1548 
1549 	return 0;
1550 }
1551 
hl_fw_read_preboot_status(struct hl_device * hdev)1552 int hl_fw_read_preboot_status(struct hl_device *hdev)
1553 {
1554 	int rc;
1555 
1556 	if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
1557 		return 0;
1558 
1559 	/* get FW pre-load parameters  */
1560 	hdev->asic_funcs->init_firmware_preload_params(hdev);
1561 
1562 	/*
1563 	 * In order to determine boot method (static VS dynamic) we need to
1564 	 * read the boot caps register
1565 	 */
1566 	rc = hl_fw_read_preboot_caps(hdev);
1567 	if (rc)
1568 		return rc;
1569 
1570 	hl_fw_preboot_update_state(hdev);
1571 
1572 	/* no need to read preboot status in dynamic load */
1573 	if (hdev->asic_prop.dynamic_fw_load)
1574 		return 0;
1575 
1576 	return hl_fw_static_read_preboot_status(hdev);
1577 }
1578 
1579 /* associate string with COMM status */
1580 static char *hl_dynamic_fw_status_str[COMMS_STS_INVLD_LAST] = {
1581 	[COMMS_STS_NOOP] = "NOOP",
1582 	[COMMS_STS_ACK] = "ACK",
1583 	[COMMS_STS_OK] = "OK",
1584 	[COMMS_STS_ERR] = "ERR",
1585 	[COMMS_STS_VALID_ERR] = "VALID_ERR",
1586 	[COMMS_STS_TIMEOUT_ERR] = "TIMEOUT_ERR",
1587 };
1588 
1589 /**
1590  * hl_fw_dynamic_report_error_status - report error status
1591  *
1592  * @hdev: pointer to the habanalabs device structure
1593  * @status: value of FW status register
1594  * @expected_status: the expected status
1595  */
hl_fw_dynamic_report_error_status(struct hl_device * hdev,u32 status,enum comms_sts expected_status)1596 static void hl_fw_dynamic_report_error_status(struct hl_device *hdev,
1597 						u32 status,
1598 						enum comms_sts expected_status)
1599 {
1600 	enum comms_sts comm_status =
1601 				FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
1602 
1603 	if (comm_status < COMMS_STS_INVLD_LAST)
1604 		dev_err(hdev->dev, "Device status %s, expected status: %s\n",
1605 				hl_dynamic_fw_status_str[comm_status],
1606 				hl_dynamic_fw_status_str[expected_status]);
1607 	else
1608 		dev_err(hdev->dev, "Device status unknown %d, expected status: %s\n",
1609 				comm_status,
1610 				hl_dynamic_fw_status_str[expected_status]);
1611 }
1612 
1613 /**
1614  * hl_fw_dynamic_send_cmd - send LKD to FW cmd
1615  *
1616  * @hdev: pointer to the habanalabs device structure
1617  * @fw_loader: managing structure for loading device's FW
1618  * @cmd: LKD to FW cmd code
1619  * @size: size of next FW component to be loaded (0 if not necessary)
1620  *
1621  * LDK to FW exact command layout is defined at struct comms_command.
1622  * note: the size argument is used only when the next FW component should be
1623  *       loaded, otherwise it shall be 0. the size is used by the FW in later
1624  *       protocol stages and when sending only indicating the amount of memory
1625  *       to be allocated by the FW to receive the next boot component.
1626  */
hl_fw_dynamic_send_cmd(struct hl_device * hdev,struct fw_load_mgr * fw_loader,enum comms_cmd cmd,unsigned int size)1627 static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
1628 				struct fw_load_mgr *fw_loader,
1629 				enum comms_cmd cmd, unsigned int size)
1630 {
1631 	struct cpu_dyn_regs *dyn_regs;
1632 	u32 val;
1633 
1634 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
1635 
1636 	val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
1637 	val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
1638 
1639 	WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
1640 }
1641 
1642 /**
1643  * hl_fw_dynamic_extract_fw_response - update the FW response
1644  *
1645  * @hdev: pointer to the habanalabs device structure
1646  * @fw_loader: managing structure for loading device's FW
1647  * @response: FW response
1648  * @status: the status read from CPU status register
1649  *
1650  * @return 0 on success, otherwise non-zero error code
1651  */
hl_fw_dynamic_extract_fw_response(struct hl_device * hdev,struct fw_load_mgr * fw_loader,struct fw_response * response,u32 status)1652 static int hl_fw_dynamic_extract_fw_response(struct hl_device *hdev,
1653 						struct fw_load_mgr *fw_loader,
1654 						struct fw_response *response,
1655 						u32 status)
1656 {
1657 	response->status = FIELD_GET(COMMS_STATUS_STATUS_MASK, status);
1658 	response->ram_offset = FIELD_GET(COMMS_STATUS_OFFSET_MASK, status) <<
1659 						COMMS_STATUS_OFFSET_ALIGN_SHIFT;
1660 	response->ram_type = FIELD_GET(COMMS_STATUS_RAM_TYPE_MASK, status);
1661 
1662 	if ((response->ram_type != COMMS_SRAM) &&
1663 					(response->ram_type != COMMS_DRAM)) {
1664 		dev_err(hdev->dev, "FW status: invalid RAM type %u\n",
1665 							response->ram_type);
1666 		return -EIO;
1667 	}
1668 
1669 	return 0;
1670 }
1671 
1672 /**
1673  * hl_fw_dynamic_wait_for_status - wait for status in dynamic FW load
1674  *
1675  * @hdev: pointer to the habanalabs device structure
1676  * @fw_loader: managing structure for loading device's FW
1677  * @expected_status: expected status to wait for
1678  * @timeout: timeout for status wait
1679  *
1680  * @return 0 on success, otherwise non-zero error code
1681  *
1682  * waiting for status from FW include polling the FW status register until
1683  * expected status is received or timeout occurs (whatever occurs first).
1684  */
hl_fw_dynamic_wait_for_status(struct hl_device * hdev,struct fw_load_mgr * fw_loader,enum comms_sts expected_status,u32 timeout)1685 static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
1686 						struct fw_load_mgr *fw_loader,
1687 						enum comms_sts expected_status,
1688 						u32 timeout)
1689 {
1690 	struct cpu_dyn_regs *dyn_regs;
1691 	u32 status;
1692 	int rc;
1693 
1694 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
1695 
1696 	/* Wait for expected status */
1697 	rc = hl_poll_timeout(
1698 		hdev,
1699 		le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
1700 		status,
1701 		FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
1702 		hdev->fw_comms_poll_interval_usec,
1703 		timeout);
1704 
1705 	if (rc) {
1706 		hl_fw_dynamic_report_error_status(hdev, status,
1707 							expected_status);
1708 		return -EIO;
1709 	}
1710 
1711 	/*
1712 	 * skip storing FW response for NOOP to preserve the actual desired
1713 	 * FW status
1714 	 */
1715 	if (expected_status == COMMS_STS_NOOP)
1716 		return 0;
1717 
1718 	rc = hl_fw_dynamic_extract_fw_response(hdev, fw_loader,
1719 					&fw_loader->dynamic_loader.response,
1720 					status);
1721 	return rc;
1722 }
1723 
1724 /**
1725  * hl_fw_dynamic_send_clear_cmd - send clear command to FW
1726  *
1727  * @hdev: pointer to the habanalabs device structure
1728  * @fw_loader: managing structure for loading device's FW
1729  *
1730  * @return 0 on success, otherwise non-zero error code
1731  *
1732  * after command cycle between LKD to FW CPU (i.e. LKD got an expected status
1733  * from FW) we need to clear the CPU status register in order to avoid garbage
1734  * between command cycles.
1735  * This is done by sending clear command and polling the CPU to LKD status
1736  * register to hold the status NOOP
1737  */
hl_fw_dynamic_send_clear_cmd(struct hl_device * hdev,struct fw_load_mgr * fw_loader)1738 static int hl_fw_dynamic_send_clear_cmd(struct hl_device *hdev,
1739 						struct fw_load_mgr *fw_loader)
1740 {
1741 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_CLR_STS, 0);
1742 
1743 	return hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_NOOP,
1744 							fw_loader->cpu_timeout);
1745 }
1746 
1747 /**
1748  * hl_fw_dynamic_send_protocol_cmd - send LKD to FW cmd and wait for ACK
1749  *
1750  * @hdev: pointer to the habanalabs device structure
1751  * @fw_loader: managing structure for loading device's FW
1752  * @cmd: LKD to FW cmd code
1753  * @size: size of next FW component to be loaded (0 if not necessary)
1754  * @wait_ok: if true also wait for OK response from FW
1755  * @timeout: timeout for status wait
1756  *
1757  * @return 0 on success, otherwise non-zero error code
1758  *
1759  * brief:
1760  * when sending protocol command we have the following steps:
1761  * - send clear (clear command and verify clear status register)
1762  * - send the actual protocol command
1763  * - wait for ACK on the protocol command
1764  * - send clear
1765  * - send NOOP
1766  * if, in addition, the specific protocol command should wait for OK then:
1767  * - wait for OK
1768  * - send clear
1769  * - send NOOP
1770  *
1771  * NOTES:
1772  * send clear: this is necessary in order to clear the status register to avoid
1773  *             leftovers between command
1774  * NOOP command: necessary to avoid loop on the clear command by the FW
1775  */
hl_fw_dynamic_send_protocol_cmd(struct hl_device * hdev,struct fw_load_mgr * fw_loader,enum comms_cmd cmd,unsigned int size,bool wait_ok,u32 timeout)1776 int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
1777 				struct fw_load_mgr *fw_loader,
1778 				enum comms_cmd cmd, unsigned int size,
1779 				bool wait_ok, u32 timeout)
1780 {
1781 	int rc;
1782 
1783 	/* first send clear command to clean former commands */
1784 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1785 
1786 	/* send the actual command */
1787 	hl_fw_dynamic_send_cmd(hdev, fw_loader, cmd, size);
1788 
1789 	/* wait for ACK for the command */
1790 	rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_ACK,
1791 								timeout);
1792 	if (rc)
1793 		return rc;
1794 
1795 	/* clear command to prepare for NOOP command */
1796 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1797 	if (rc)
1798 		return rc;
1799 
1800 	/* send the actual NOOP command */
1801 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
1802 
1803 	if (!wait_ok)
1804 		return 0;
1805 
1806 	rc = hl_fw_dynamic_wait_for_status(hdev, fw_loader, COMMS_STS_OK,
1807 								timeout);
1808 	if (rc)
1809 		return rc;
1810 
1811 	/* clear command to prepare for NOOP command */
1812 	rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
1813 	if (rc)
1814 		return rc;
1815 
1816 	/* send the actual NOOP command */
1817 	hl_fw_dynamic_send_cmd(hdev, fw_loader, COMMS_NOOP, 0);
1818 
1819 	return 0;
1820 }
1821 
1822 /**
1823  * hl_fw_compat_crc32 - CRC compatible with FW
1824  *
1825  * @data: pointer to the data
1826  * @size: size of the data
1827  *
1828  * @return the CRC32 result
1829  *
1830  * NOTE: kernel's CRC32 differs from standard CRC32 calculation.
1831  *       in order to be aligned we need to flip the bits of both the input
1832  *       initial CRC and kernel's CRC32 result.
1833  *       in addition both sides use initial CRC of 0,
1834  */
hl_fw_compat_crc32(u8 * data,size_t size)1835 static u32 hl_fw_compat_crc32(u8 *data, size_t size)
1836 {
1837 	return ~crc32_le(~((u32)0), data, size);
1838 }
1839 
1840 /**
1841  * hl_fw_dynamic_validate_memory_bound - validate memory bounds for memory
1842  *                                        transfer (image or descriptor) between
1843  *                                        host and FW
1844  *
1845  * @hdev: pointer to the habanalabs device structure
1846  * @addr: device address of memory transfer
1847  * @size: memory transfer size
1848  * @region: PCI memory region
1849  *
1850  * @return 0 on success, otherwise non-zero error code
1851  */
hl_fw_dynamic_validate_memory_bound(struct hl_device * hdev,u64 addr,size_t size,struct pci_mem_region * region)1852 static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev,
1853 						u64 addr, size_t size,
1854 						struct pci_mem_region *region)
1855 {
1856 	u64 end_addr;
1857 
1858 	/* now make sure that the memory transfer is within region's bounds */
1859 	end_addr = addr + size;
1860 	if (end_addr >= region->region_base + region->region_size) {
1861 		dev_err(hdev->dev,
1862 			"dynamic FW load: memory transfer end address out of memory region bounds. addr: %llx\n",
1863 							end_addr);
1864 		return -EIO;
1865 	}
1866 
1867 	/*
1868 	 * now make sure memory transfer is within predefined BAR bounds.
1869 	 * this is to make sure we do not need to set the bar (e.g. for DRAM
1870 	 * memory transfers)
1871 	 */
1872 	if (end_addr >= region->region_base - region->offset_in_bar +
1873 							region->bar_size) {
1874 		dev_err(hdev->dev,
1875 			"FW image beyond PCI BAR bounds\n");
1876 		return -EIO;
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 /**
1883  * hl_fw_dynamic_validate_descriptor - validate FW descriptor
1884  *
1885  * @hdev: pointer to the habanalabs device structure
1886  * @fw_loader: managing structure for loading device's FW
1887  * @fw_desc: the descriptor form FW
1888  *
1889  * @return 0 on success, otherwise non-zero error code
1890  */
hl_fw_dynamic_validate_descriptor(struct hl_device * hdev,struct fw_load_mgr * fw_loader,struct lkd_fw_comms_desc * fw_desc)1891 static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
1892 					struct fw_load_mgr *fw_loader,
1893 					struct lkd_fw_comms_desc *fw_desc)
1894 {
1895 	struct pci_mem_region *region;
1896 	enum pci_region region_id;
1897 	size_t data_size;
1898 	u32 data_crc32;
1899 	u8 *data_ptr;
1900 	u64 addr;
1901 	int rc;
1902 
1903 	if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
1904 		dev_warn(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
1905 				fw_desc->header.magic);
1906 
1907 	if (fw_desc->header.version != HL_COMMS_DESC_VER)
1908 		dev_warn(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
1909 				fw_desc->header.version);
1910 
1911 	/*
1912 	 * Calc CRC32 of data without header. use the size of the descriptor
1913 	 * reported by firmware, without calculating it ourself, to allow adding
1914 	 * more fields to the lkd_fw_comms_desc structure.
1915 	 * note that no alignment/stride address issues here as all structures
1916 	 * are 64 bit padded.
1917 	 */
1918 	data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
1919 	data_size = le16_to_cpu(fw_desc->header.size);
1920 
1921 	data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
1922 	if (data_crc32 != le32_to_cpu(fw_desc->header.crc32)) {
1923 		dev_err(hdev->dev, "CRC32 mismatch for dynamic FW descriptor (%x:%x)\n",
1924 			data_crc32, fw_desc->header.crc32);
1925 		return -EIO;
1926 	}
1927 
1928 	/* find memory region to which to copy the image */
1929 	addr = le64_to_cpu(fw_desc->img_addr);
1930 	region_id = hl_get_pci_memory_region(hdev, addr);
1931 	if ((region_id != PCI_REGION_SRAM) && ((region_id != PCI_REGION_DRAM))) {
1932 		dev_err(hdev->dev, "Invalid region to copy FW image address=%llx\n", addr);
1933 		return -EIO;
1934 	}
1935 
1936 	region = &hdev->pci_mem_region[region_id];
1937 
1938 	/* store the region for the copy stage */
1939 	fw_loader->dynamic_loader.image_region = region;
1940 
1941 	/*
1942 	 * here we know that the start address is valid, now make sure that the
1943 	 * image is within region's bounds
1944 	 */
1945 	rc = hl_fw_dynamic_validate_memory_bound(hdev, addr,
1946 					fw_loader->dynamic_loader.fw_image_size,
1947 					region);
1948 	if (rc) {
1949 		dev_err(hdev->dev, "invalid mem transfer request for FW image\n");
1950 		return rc;
1951 	}
1952 
1953 	/* here we can mark the descriptor as valid as the content has been validated */
1954 	fw_loader->dynamic_loader.fw_desc_valid = true;
1955 
1956 	return 0;
1957 }
1958 
hl_fw_dynamic_validate_response(struct hl_device * hdev,struct fw_response * response,struct pci_mem_region * region)1959 static int hl_fw_dynamic_validate_response(struct hl_device *hdev,
1960 						struct fw_response *response,
1961 						struct pci_mem_region *region)
1962 {
1963 	u64 device_addr;
1964 	int rc;
1965 
1966 	device_addr = region->region_base + response->ram_offset;
1967 
1968 	/*
1969 	 * validate that the descriptor is within region's bounds
1970 	 * Note that as the start address was supplied according to the RAM
1971 	 * type- testing only the end address is enough
1972 	 */
1973 	rc = hl_fw_dynamic_validate_memory_bound(hdev, device_addr,
1974 					sizeof(struct lkd_fw_comms_desc),
1975 					region);
1976 	return rc;
1977 }
1978 
1979 /**
1980  * hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor
1981  *
1982  * @hdev: pointer to the habanalabs device structure
1983  * @fw_loader: managing structure for loading device's FW
1984  *
1985  * @return 0 on success, otherwise non-zero error code
1986  */
hl_fw_dynamic_read_and_validate_descriptor(struct hl_device * hdev,struct fw_load_mgr * fw_loader)1987 static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
1988 						struct fw_load_mgr *fw_loader)
1989 {
1990 	struct lkd_fw_comms_desc *fw_desc;
1991 	struct pci_mem_region *region;
1992 	struct fw_response *response;
1993 	enum pci_region region_id;
1994 	void __iomem *src;
1995 	int rc;
1996 
1997 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
1998 	response = &fw_loader->dynamic_loader.response;
1999 
2000 	region_id = (response->ram_type == COMMS_SRAM) ?
2001 					PCI_REGION_SRAM : PCI_REGION_DRAM;
2002 
2003 	region = &hdev->pci_mem_region[region_id];
2004 
2005 	rc = hl_fw_dynamic_validate_response(hdev, response, region);
2006 	if (rc) {
2007 		dev_err(hdev->dev,
2008 			"invalid mem transfer request for FW descriptor\n");
2009 		return rc;
2010 	}
2011 
2012 	/*
2013 	 * extract address to copy the descriptor from
2014 	 * in addition, as the descriptor value is going to be over-ridden by new data- we mark it
2015 	 * as invalid.
2016 	 * it will be marked again as valid once validated
2017 	 */
2018 	fw_loader->dynamic_loader.fw_desc_valid = false;
2019 	src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2020 							response->ram_offset;
2021 	memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
2022 
2023 	return hl_fw_dynamic_validate_descriptor(hdev, fw_loader, fw_desc);
2024 }
2025 
2026 /**
2027  * hl_fw_dynamic_request_descriptor - handshake with CPU to get FW descriptor
2028  *
2029  * @hdev: pointer to the habanalabs device structure
2030  * @fw_loader: managing structure for loading device's FW
2031  * @next_image_size: size to allocate for next FW component
2032  *
2033  * @return 0 on success, otherwise non-zero error code
2034  */
hl_fw_dynamic_request_descriptor(struct hl_device * hdev,struct fw_load_mgr * fw_loader,size_t next_image_size)2035 static int hl_fw_dynamic_request_descriptor(struct hl_device *hdev,
2036 						struct fw_load_mgr *fw_loader,
2037 						size_t next_image_size)
2038 {
2039 	int rc;
2040 
2041 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_PREP_DESC,
2042 						next_image_size, true,
2043 						fw_loader->cpu_timeout);
2044 	if (rc)
2045 		return rc;
2046 
2047 	return hl_fw_dynamic_read_and_validate_descriptor(hdev, fw_loader);
2048 }
2049 
2050 /**
2051  * hl_fw_dynamic_read_device_fw_version - read FW version to exposed properties
2052  *
2053  * @hdev: pointer to the habanalabs device structure
2054  * @fwc: the firmware component
2055  * @fw_version: fw component's version string
2056  */
hl_fw_dynamic_read_device_fw_version(struct hl_device * hdev,enum hl_fw_component fwc,const char * fw_version)2057 static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
2058 					enum hl_fw_component fwc,
2059 					const char *fw_version)
2060 {
2061 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2062 	char *preboot_ver, *boot_ver;
2063 	char btl_ver[32];
2064 
2065 	switch (fwc) {
2066 	case FW_COMP_BOOT_FIT:
2067 		strscpy(prop->uboot_ver, fw_version, VERSION_MAX_LEN);
2068 		boot_ver = extract_fw_ver_from_str(prop->uboot_ver);
2069 		if (boot_ver) {
2070 			dev_info(hdev->dev, "boot-fit version %s\n", boot_ver);
2071 			kfree(boot_ver);
2072 		}
2073 
2074 		break;
2075 	case FW_COMP_PREBOOT:
2076 		strscpy(prop->preboot_ver, fw_version, VERSION_MAX_LEN);
2077 		preboot_ver = strnstr(prop->preboot_ver, "Preboot",
2078 						VERSION_MAX_LEN);
2079 		if (preboot_ver && preboot_ver != prop->preboot_ver) {
2080 			strscpy(btl_ver, prop->preboot_ver,
2081 				min((int) (preboot_ver - prop->preboot_ver), 31));
2082 			dev_info(hdev->dev, "%s\n", btl_ver);
2083 		}
2084 
2085 		preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
2086 		if (preboot_ver) {
2087 			int rc;
2088 
2089 			dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
2090 
2091 			/* This function takes care of freeing preboot_ver */
2092 			rc = extract_fw_sub_versions(hdev, preboot_ver);
2093 			if (rc)
2094 				return rc;
2095 		}
2096 
2097 		break;
2098 	default:
2099 		dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc);
2100 		return -EINVAL;
2101 	}
2102 
2103 	return 0;
2104 }
2105 
2106 /**
2107  * hl_fw_dynamic_copy_image - copy image to memory allocated by the FW
2108  *
2109  * @hdev: pointer to the habanalabs device structure
2110  * @fw: fw descriptor
2111  * @fw_loader: managing structure for loading device's FW
2112  */
hl_fw_dynamic_copy_image(struct hl_device * hdev,const struct firmware * fw,struct fw_load_mgr * fw_loader)2113 static int hl_fw_dynamic_copy_image(struct hl_device *hdev,
2114 						const struct firmware *fw,
2115 						struct fw_load_mgr *fw_loader)
2116 {
2117 	struct lkd_fw_comms_desc *fw_desc;
2118 	struct pci_mem_region *region;
2119 	void __iomem *dest;
2120 	u64 addr;
2121 	int rc;
2122 
2123 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2124 	addr = le64_to_cpu(fw_desc->img_addr);
2125 
2126 	/* find memory region to which to copy the image */
2127 	region = fw_loader->dynamic_loader.image_region;
2128 
2129 	dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2130 					(addr - region->region_base);
2131 
2132 	rc = hl_fw_copy_fw_to_device(hdev, fw, dest,
2133 					fw_loader->boot_fit_img.src_off,
2134 					fw_loader->boot_fit_img.copy_size);
2135 
2136 	return rc;
2137 }
2138 
2139 /**
2140  * hl_fw_dynamic_copy_msg - copy msg to memory allocated by the FW
2141  *
2142  * @hdev: pointer to the habanalabs device structure
2143  * @msg: message
2144  * @fw_loader: managing structure for loading device's FW
2145  */
hl_fw_dynamic_copy_msg(struct hl_device * hdev,struct lkd_msg_comms * msg,struct fw_load_mgr * fw_loader)2146 static int hl_fw_dynamic_copy_msg(struct hl_device *hdev,
2147 		struct lkd_msg_comms *msg, struct fw_load_mgr *fw_loader)
2148 {
2149 	struct lkd_fw_comms_desc *fw_desc;
2150 	struct pci_mem_region *region;
2151 	void __iomem *dest;
2152 	u64 addr;
2153 	int rc;
2154 
2155 	fw_desc = &fw_loader->dynamic_loader.comm_desc;
2156 	addr = le64_to_cpu(fw_desc->img_addr);
2157 
2158 	/* find memory region to which to copy the image */
2159 	region = fw_loader->dynamic_loader.image_region;
2160 
2161 	dest = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
2162 					(addr - region->region_base);
2163 
2164 	rc = hl_fw_copy_msg_to_device(hdev, msg, dest, 0, 0);
2165 
2166 	return rc;
2167 }
2168 
2169 /**
2170  * hl_fw_boot_fit_update_state - update internal data structures after boot-fit
2171  *                               is loaded
2172  *
2173  * @hdev: pointer to the habanalabs device structure
2174  * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
2175  * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
2176  *
2177  * @return 0 on success, otherwise non-zero error code
2178  */
hl_fw_boot_fit_update_state(struct hl_device * hdev,u32 cpu_boot_dev_sts0_reg,u32 cpu_boot_dev_sts1_reg)2179 static void hl_fw_boot_fit_update_state(struct hl_device *hdev,
2180 						u32 cpu_boot_dev_sts0_reg,
2181 						u32 cpu_boot_dev_sts1_reg)
2182 {
2183 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2184 
2185 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_BOOT_CPU;
2186 
2187 	/* Read boot_cpu status bits */
2188 	if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) {
2189 		prop->fw_bootfit_cpu_boot_dev_sts0 =
2190 				RREG32(cpu_boot_dev_sts0_reg);
2191 
2192 		prop->hard_reset_done_by_fw = !!(prop->fw_bootfit_cpu_boot_dev_sts0 &
2193 							CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
2194 
2195 		dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n",
2196 					prop->fw_bootfit_cpu_boot_dev_sts0);
2197 	}
2198 
2199 	if (prop->fw_cpu_boot_dev_sts1_valid) {
2200 		prop->fw_bootfit_cpu_boot_dev_sts1 =
2201 				RREG32(cpu_boot_dev_sts1_reg);
2202 
2203 		dev_dbg(hdev->dev, "Firmware boot CPU status1 %#x\n",
2204 					prop->fw_bootfit_cpu_boot_dev_sts1);
2205 	}
2206 
2207 	dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
2208 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
2209 }
2210 
hl_fw_dynamic_update_linux_interrupt_if(struct hl_device * hdev)2211 static void hl_fw_dynamic_update_linux_interrupt_if(struct hl_device *hdev)
2212 {
2213 	struct cpu_dyn_regs *dyn_regs =
2214 			&hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs;
2215 
2216 	/* Check whether all 3 interrupt interfaces are set, if not use a
2217 	 * single interface
2218 	 */
2219 	if (!hdev->asic_prop.gic_interrupts_enable &&
2220 			!(hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
2221 				CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN)) {
2222 		dyn_regs->gic_host_halt_irq = dyn_regs->gic_host_pi_upd_irq;
2223 		dyn_regs->gic_host_ints_irq = dyn_regs->gic_host_pi_upd_irq;
2224 
2225 		dev_warn(hdev->dev,
2226 			"Using a single interrupt interface towards cpucp");
2227 	}
2228 }
2229 /**
2230  * hl_fw_dynamic_load_image - load FW image using dynamic protocol
2231  *
2232  * @hdev: pointer to the habanalabs device structure
2233  * @fw_loader: managing structure for loading device's FW
2234  * @load_fwc: the FW component to be loaded
2235  * @img_ld_timeout: image load timeout
2236  *
2237  * @return 0 on success, otherwise non-zero error code
2238  */
hl_fw_dynamic_load_image(struct hl_device * hdev,struct fw_load_mgr * fw_loader,enum hl_fw_component load_fwc,u32 img_ld_timeout)2239 static int hl_fw_dynamic_load_image(struct hl_device *hdev,
2240 						struct fw_load_mgr *fw_loader,
2241 						enum hl_fw_component load_fwc,
2242 						u32 img_ld_timeout)
2243 {
2244 	enum hl_fw_component cur_fwc;
2245 	const struct firmware *fw;
2246 	char *fw_name;
2247 	int rc = 0;
2248 
2249 	/*
2250 	 * when loading image we have one of 2 scenarios:
2251 	 * 1. current FW component is preboot and we want to load boot-fit
2252 	 * 2. current FW component is boot-fit and we want to load linux
2253 	 */
2254 	if (load_fwc == FW_COMP_BOOT_FIT) {
2255 		cur_fwc = FW_COMP_PREBOOT;
2256 		fw_name = fw_loader->boot_fit_img.image_name;
2257 	} else {
2258 		cur_fwc = FW_COMP_BOOT_FIT;
2259 		fw_name = fw_loader->linux_img.image_name;
2260 	}
2261 
2262 	/* request FW in order to communicate to FW the size to be allocated */
2263 	rc = hl_request_fw(hdev, &fw, fw_name);
2264 	if (rc)
2265 		return rc;
2266 
2267 	/* store the image size for future validation */
2268 	fw_loader->dynamic_loader.fw_image_size = fw->size;
2269 
2270 	rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, fw->size);
2271 	if (rc)
2272 		goto release_fw;
2273 
2274 	/* read preboot version */
2275 	rc = hl_fw_dynamic_read_device_fw_version(hdev, cur_fwc,
2276 				fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
2277 	if (rc)
2278 		goto release_fw;
2279 
2280 	/* update state according to boot stage */
2281 	if (cur_fwc == FW_COMP_BOOT_FIT) {
2282 		struct cpu_dyn_regs *dyn_regs;
2283 
2284 		dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
2285 		hl_fw_boot_fit_update_state(hdev,
2286 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2287 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2288 	}
2289 
2290 	/* copy boot fit to space allocated by FW */
2291 	rc = hl_fw_dynamic_copy_image(hdev, fw, fw_loader);
2292 	if (rc)
2293 		goto release_fw;
2294 
2295 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
2296 						0, true,
2297 						fw_loader->cpu_timeout);
2298 	if (rc)
2299 		goto release_fw;
2300 
2301 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
2302 						0, false,
2303 						img_ld_timeout);
2304 
2305 release_fw:
2306 	hl_release_firmware(fw);
2307 	return rc;
2308 }
2309 
hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device * hdev,struct fw_load_mgr * fw_loader)2310 static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
2311 					struct fw_load_mgr *fw_loader)
2312 {
2313 	struct dynamic_fw_load_mgr *dyn_loader;
2314 	u32 status;
2315 	int rc;
2316 
2317 	dyn_loader = &fw_loader->dynamic_loader;
2318 
2319 	/*
2320 	 * Make sure CPU boot-loader is running
2321 	 * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
2322 	 * yet there is a debug scenario in which we loading uboot (without Linux)
2323 	 * which at later stage is relocated to DRAM. In this case we expect
2324 	 * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
2325 	 * poll flags
2326 	 */
2327 	rc = hl_poll_timeout(
2328 		hdev,
2329 		le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
2330 		status,
2331 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
2332 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2333 		hdev->fw_poll_interval_usec,
2334 		dyn_loader->wait_for_bl_timeout);
2335 	if (rc) {
2336 		dev_err(hdev->dev, "failed to wait for boot\n");
2337 		return rc;
2338 	}
2339 
2340 	dev_dbg(hdev->dev, "uboot status = %d\n", status);
2341 	return 0;
2342 }
2343 
hl_fw_dynamic_wait_for_linux_active(struct hl_device * hdev,struct fw_load_mgr * fw_loader)2344 static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
2345 						struct fw_load_mgr *fw_loader)
2346 {
2347 	struct dynamic_fw_load_mgr *dyn_loader;
2348 	u32 status;
2349 	int rc;
2350 
2351 	dyn_loader = &fw_loader->dynamic_loader;
2352 
2353 	/* Make sure CPU linux is running */
2354 
2355 	rc = hl_poll_timeout(
2356 		hdev,
2357 		le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
2358 		status,
2359 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2360 		hdev->fw_poll_interval_usec,
2361 		fw_loader->cpu_timeout);
2362 	if (rc) {
2363 		dev_err(hdev->dev, "failed to wait for Linux\n");
2364 		return rc;
2365 	}
2366 
2367 	dev_dbg(hdev->dev, "Boot status = %d\n", status);
2368 	return 0;
2369 }
2370 
2371 /**
2372  * hl_fw_linux_update_state -	update internal data structures after Linux
2373  *				is loaded.
2374  *				Note: Linux initialization is comprised mainly
2375  *				of two stages - loading kernel (SRAM_AVAIL)
2376  *				& loading ARMCP.
2377  *				Therefore reading boot device status in any of
2378  *				these stages might result in different values.
2379  *
2380  * @hdev: pointer to the habanalabs device structure
2381  * @cpu_boot_dev_sts0_reg: register holding CPU boot dev status 0
2382  * @cpu_boot_dev_sts1_reg: register holding CPU boot dev status 1
2383  *
2384  * @return 0 on success, otherwise non-zero error code
2385  */
hl_fw_linux_update_state(struct hl_device * hdev,u32 cpu_boot_dev_sts0_reg,u32 cpu_boot_dev_sts1_reg)2386 static void hl_fw_linux_update_state(struct hl_device *hdev,
2387 						u32 cpu_boot_dev_sts0_reg,
2388 						u32 cpu_boot_dev_sts1_reg)
2389 {
2390 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2391 
2392 	hdev->fw_loader.fw_comp_loaded |= FW_TYPE_LINUX;
2393 
2394 	/* Read FW application security bits */
2395 	if (prop->fw_cpu_boot_dev_sts0_valid) {
2396 		prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
2397 
2398 		prop->hard_reset_done_by_fw = !!(prop->fw_app_cpu_boot_dev_sts0 &
2399 							CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
2400 
2401 		if (prop->fw_app_cpu_boot_dev_sts0 &
2402 				CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN)
2403 			prop->gic_interrupts_enable = false;
2404 
2405 		dev_dbg(hdev->dev,
2406 			"Firmware application CPU status0 %#x\n",
2407 			prop->fw_app_cpu_boot_dev_sts0);
2408 
2409 		dev_dbg(hdev->dev, "GIC controller is %s\n",
2410 				prop->gic_interrupts_enable ?
2411 						"enabled" : "disabled");
2412 	}
2413 
2414 	if (prop->fw_cpu_boot_dev_sts1_valid) {
2415 		prop->fw_app_cpu_boot_dev_sts1 = RREG32(cpu_boot_dev_sts1_reg);
2416 
2417 		dev_dbg(hdev->dev,
2418 			"Firmware application CPU status1 %#x\n",
2419 			prop->fw_app_cpu_boot_dev_sts1);
2420 	}
2421 
2422 	dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
2423 			prop->hard_reset_done_by_fw ? "enabled" : "disabled");
2424 
2425 	dev_info(hdev->dev, "Successfully loaded firmware to device\n");
2426 }
2427 
2428 /**
2429  * hl_fw_dynamic_send_msg - send a COMMS message with attached data
2430  *
2431  * @hdev: pointer to the habanalabs device structure
2432  * @fw_loader: managing structure for loading device's FW
2433  * @msg_type: message type
2434  * @data: data to be sent
2435  *
2436  * @return 0 on success, otherwise non-zero error code
2437  */
hl_fw_dynamic_send_msg(struct hl_device * hdev,struct fw_load_mgr * fw_loader,u8 msg_type,void * data)2438 static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
2439 		struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
2440 {
2441 	struct lkd_msg_comms msg;
2442 	int rc;
2443 
2444 	memset(&msg, 0, sizeof(msg));
2445 
2446 	/* create message to be sent */
2447 	msg.header.type = msg_type;
2448 	msg.header.size = cpu_to_le16(sizeof(struct comms_msg_header));
2449 	msg.header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
2450 
2451 	switch (msg_type) {
2452 	case HL_COMMS_RESET_CAUSE_TYPE:
2453 		msg.reset_cause = *(__u8 *) data;
2454 		break;
2455 
2456 	default:
2457 		dev_err(hdev->dev,
2458 			"Send COMMS message - invalid message type %u\n",
2459 			msg_type);
2460 		return -EINVAL;
2461 	}
2462 
2463 	rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
2464 			sizeof(struct lkd_msg_comms));
2465 	if (rc)
2466 		return rc;
2467 
2468 	/* copy message to space allocated by FW */
2469 	rc = hl_fw_dynamic_copy_msg(hdev, &msg, fw_loader);
2470 	if (rc)
2471 		return rc;
2472 
2473 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
2474 						0, true,
2475 						fw_loader->cpu_timeout);
2476 	if (rc)
2477 		return rc;
2478 
2479 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
2480 						0, true,
2481 						fw_loader->cpu_timeout);
2482 	if (rc)
2483 		return rc;
2484 
2485 	return 0;
2486 }
2487 
2488 /**
2489  * hl_fw_dynamic_init_cpu - initialize the device CPU using dynamic protocol
2490  *
2491  * @hdev: pointer to the habanalabs device structure
2492  * @fw_loader: managing structure for loading device's FW
2493  *
2494  * @return 0 on success, otherwise non-zero error code
2495  *
2496  * brief: the dynamic protocol is master (LKD) slave (FW CPU) protocol.
2497  * the communication is done using registers:
2498  * - LKD command register
2499  * - FW status register
2500  * the protocol is race free. this goal is achieved by splitting the requests
2501  * and response to known synchronization points between the LKD and the FW.
2502  * each response to LKD request is known and bound to a predefined timeout.
2503  * in case of timeout expiration without the desired status from FW- the
2504  * protocol (and hence the boot) will fail.
2505  */
hl_fw_dynamic_init_cpu(struct hl_device * hdev,struct fw_load_mgr * fw_loader)2506 static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
2507 					struct fw_load_mgr *fw_loader)
2508 {
2509 	struct cpu_dyn_regs *dyn_regs;
2510 	int rc;
2511 
2512 	dev_info(hdev->dev,
2513 		"Loading %sfirmware to device, may take some time...\n",
2514 		hdev->asic_prop.fw_security_enabled ? "secured " : "");
2515 
2516 	/* initialize FW descriptor as invalid */
2517 	fw_loader->dynamic_loader.fw_desc_valid = false;
2518 
2519 	/*
2520 	 * In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
2521 	 * It will be updated from FW after hl_fw_dynamic_request_descriptor().
2522 	 */
2523 	dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
2524 
2525 	rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
2526 						0, true,
2527 						fw_loader->cpu_timeout);
2528 	if (rc)
2529 		goto protocol_err;
2530 
2531 	if (hdev->reset_info.curr_reset_cause) {
2532 		rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
2533 				HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause);
2534 		if (rc)
2535 			goto protocol_err;
2536 
2537 		/* Clear current reset cause */
2538 		hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
2539 	}
2540 
2541 	if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
2542 		rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, 0);
2543 		if (rc)
2544 			goto protocol_err;
2545 
2546 		/* read preboot version */
2547 		return hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
2548 				fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
2549 	}
2550 
2551 	/* load boot fit to FW */
2552 	rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_BOOT_FIT,
2553 						fw_loader->boot_fit_timeout);
2554 	if (rc) {
2555 		dev_err(hdev->dev, "failed to load boot fit\n");
2556 		goto protocol_err;
2557 	}
2558 
2559 	/*
2560 	 * when testing FW load (without Linux) on PLDM we don't want to
2561 	 * wait until boot fit is active as it may take several hours.
2562 	 * instead, we load the bootfit and let it do all initialization in
2563 	 * the background.
2564 	 */
2565 	if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
2566 		return 0;
2567 
2568 	rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
2569 	if (rc)
2570 		goto protocol_err;
2571 
2572 	/* Enable DRAM scrambling before Linux boot and after successful
2573 	 *  UBoot
2574 	 */
2575 	hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
2576 
2577 	if (!(hdev->fw_components & FW_TYPE_LINUX)) {
2578 		dev_info(hdev->dev, "Skip loading Linux F/W\n");
2579 		return 0;
2580 	}
2581 
2582 	if (fw_loader->skip_bmc) {
2583 		rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader,
2584 							COMMS_SKIP_BMC, 0,
2585 							true,
2586 							fw_loader->cpu_timeout);
2587 		if (rc) {
2588 			dev_err(hdev->dev, "failed to load boot fit\n");
2589 			goto protocol_err;
2590 		}
2591 	}
2592 
2593 	/* load Linux image to FW */
2594 	rc = hl_fw_dynamic_load_image(hdev, fw_loader, FW_COMP_LINUX,
2595 							fw_loader->cpu_timeout);
2596 	if (rc) {
2597 		dev_err(hdev->dev, "failed to load Linux\n");
2598 		goto protocol_err;
2599 	}
2600 
2601 	rc = hl_fw_dynamic_wait_for_linux_active(hdev, fw_loader);
2602 	if (rc)
2603 		goto protocol_err;
2604 
2605 	hl_fw_linux_update_state(hdev, le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2606 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2607 
2608 	hl_fw_dynamic_update_linux_interrupt_if(hdev);
2609 
2610 	return 0;
2611 
2612 protocol_err:
2613 	if (fw_loader->dynamic_loader.fw_desc_valid)
2614 		fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
2615 				le32_to_cpu(dyn_regs->cpu_boot_err1),
2616 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
2617 				le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
2618 	return rc;
2619 }
2620 
2621 /**
2622  * hl_fw_static_init_cpu - initialize the device CPU using static protocol
2623  *
2624  * @hdev: pointer to the habanalabs device structure
2625  * @fw_loader: managing structure for loading device's FW
2626  *
2627  * @return 0 on success, otherwise non-zero error code
2628  */
hl_fw_static_init_cpu(struct hl_device * hdev,struct fw_load_mgr * fw_loader)2629 static int hl_fw_static_init_cpu(struct hl_device *hdev,
2630 					struct fw_load_mgr *fw_loader)
2631 {
2632 	u32 cpu_msg_status_reg, cpu_timeout, msg_to_cpu_reg, status;
2633 	u32 cpu_boot_dev_status0_reg, cpu_boot_dev_status1_reg;
2634 	struct static_fw_load_mgr *static_loader;
2635 	u32 cpu_boot_status_reg;
2636 	int rc;
2637 
2638 	if (!(hdev->fw_components & FW_TYPE_BOOT_CPU))
2639 		return 0;
2640 
2641 	/* init common loader parameters */
2642 	cpu_timeout = fw_loader->cpu_timeout;
2643 
2644 	/* init static loader parameters */
2645 	static_loader = &fw_loader->static_loader;
2646 	cpu_msg_status_reg = static_loader->cpu_cmd_status_to_host_reg;
2647 	msg_to_cpu_reg = static_loader->kmd_msg_to_cpu_reg;
2648 	cpu_boot_dev_status0_reg = static_loader->cpu_boot_dev_status0_reg;
2649 	cpu_boot_dev_status1_reg = static_loader->cpu_boot_dev_status1_reg;
2650 	cpu_boot_status_reg = static_loader->cpu_boot_status_reg;
2651 
2652 	dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
2653 		cpu_timeout / USEC_PER_SEC);
2654 
2655 	/* Wait for boot FIT request */
2656 	rc = hl_poll_timeout(
2657 		hdev,
2658 		cpu_boot_status_reg,
2659 		status,
2660 		status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
2661 		hdev->fw_poll_interval_usec,
2662 		fw_loader->boot_fit_timeout);
2663 
2664 	if (rc) {
2665 		dev_dbg(hdev->dev,
2666 			"No boot fit request received, resuming boot\n");
2667 	} else {
2668 		rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
2669 		if (rc)
2670 			goto out;
2671 
2672 		/* Clear device CPU message status */
2673 		WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
2674 
2675 		/* Signal device CPU that boot loader is ready */
2676 		WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
2677 
2678 		/* Poll for CPU device ack */
2679 		rc = hl_poll_timeout(
2680 			hdev,
2681 			cpu_msg_status_reg,
2682 			status,
2683 			status == CPU_MSG_OK,
2684 			hdev->fw_poll_interval_usec,
2685 			fw_loader->boot_fit_timeout);
2686 
2687 		if (rc) {
2688 			dev_err(hdev->dev,
2689 				"Timeout waiting for boot fit load ack\n");
2690 			goto out;
2691 		}
2692 
2693 		/* Clear message */
2694 		WREG32(msg_to_cpu_reg, KMD_MSG_NA);
2695 	}
2696 
2697 	/*
2698 	 * Make sure CPU boot-loader is running
2699 	 * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
2700 	 * yet there is a debug scenario in which we loading uboot (without Linux)
2701 	 * which at later stage is relocated to DRAM. In this case we expect
2702 	 * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
2703 	 * poll flags
2704 	 */
2705 	rc = hl_poll_timeout(
2706 		hdev,
2707 		cpu_boot_status_reg,
2708 		status,
2709 		(status == CPU_BOOT_STATUS_DRAM_RDY) ||
2710 		(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
2711 		(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
2712 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2713 		hdev->fw_poll_interval_usec,
2714 		cpu_timeout);
2715 
2716 	dev_dbg(hdev->dev, "uboot status = %d\n", status);
2717 
2718 	/* Read U-Boot version now in case we will later fail */
2719 	hl_fw_static_read_device_fw_version(hdev, FW_COMP_BOOT_FIT);
2720 
2721 	/* update state according to boot stage */
2722 	hl_fw_boot_fit_update_state(hdev, cpu_boot_dev_status0_reg,
2723 						cpu_boot_dev_status1_reg);
2724 
2725 	if (rc) {
2726 		detect_cpu_boot_status(hdev, status);
2727 		rc = -EIO;
2728 		goto out;
2729 	}
2730 
2731 	/* Enable DRAM scrambling before Linux boot and after successful
2732 	 *  UBoot
2733 	 */
2734 	hdev->asic_funcs->init_cpu_scrambler_dram(hdev);
2735 
2736 	if (!(hdev->fw_components & FW_TYPE_LINUX)) {
2737 		dev_info(hdev->dev, "Skip loading Linux F/W\n");
2738 		rc = 0;
2739 		goto out;
2740 	}
2741 
2742 	if (status == CPU_BOOT_STATUS_SRAM_AVAIL) {
2743 		rc = 0;
2744 		goto out;
2745 	}
2746 
2747 	dev_info(hdev->dev,
2748 		"Loading firmware to device, may take some time...\n");
2749 
2750 	rc = hdev->asic_funcs->load_firmware_to_device(hdev);
2751 	if (rc)
2752 		goto out;
2753 
2754 	if (fw_loader->skip_bmc) {
2755 		WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
2756 
2757 		rc = hl_poll_timeout(
2758 			hdev,
2759 			cpu_boot_status_reg,
2760 			status,
2761 			(status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
2762 			hdev->fw_poll_interval_usec,
2763 			cpu_timeout);
2764 
2765 		if (rc) {
2766 			dev_err(hdev->dev,
2767 				"Failed to get ACK on skipping BMC, %d\n",
2768 				status);
2769 			WREG32(msg_to_cpu_reg, KMD_MSG_NA);
2770 			rc = -EIO;
2771 			goto out;
2772 		}
2773 	}
2774 
2775 	WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
2776 
2777 	rc = hl_poll_timeout(
2778 		hdev,
2779 		cpu_boot_status_reg,
2780 		status,
2781 		(status == CPU_BOOT_STATUS_SRAM_AVAIL),
2782 		hdev->fw_poll_interval_usec,
2783 		cpu_timeout);
2784 
2785 	/* Clear message */
2786 	WREG32(msg_to_cpu_reg, KMD_MSG_NA);
2787 
2788 	if (rc) {
2789 		if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
2790 			dev_err(hdev->dev,
2791 				"Device reports FIT image is corrupted\n");
2792 		else
2793 			dev_err(hdev->dev,
2794 				"Failed to load firmware to device, %d\n",
2795 				status);
2796 
2797 		rc = -EIO;
2798 		goto out;
2799 	}
2800 
2801 	rc = fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
2802 					fw_loader->static_loader.boot_err1_reg,
2803 					cpu_boot_dev_status0_reg,
2804 					cpu_boot_dev_status1_reg);
2805 	if (rc)
2806 		return rc;
2807 
2808 	hl_fw_linux_update_state(hdev, cpu_boot_dev_status0_reg,
2809 						cpu_boot_dev_status1_reg);
2810 
2811 	return 0;
2812 
2813 out:
2814 	fw_read_errors(hdev, fw_loader->static_loader.boot_err0_reg,
2815 					fw_loader->static_loader.boot_err1_reg,
2816 					cpu_boot_dev_status0_reg,
2817 					cpu_boot_dev_status1_reg);
2818 
2819 	return rc;
2820 }
2821 
2822 /**
2823  * hl_fw_init_cpu - initialize the device CPU
2824  *
2825  * @hdev: pointer to the habanalabs device structure
2826  *
2827  * @return 0 on success, otherwise non-zero error code
2828  *
2829  * perform necessary initializations for device's CPU. takes into account if
2830  * init protocol is static or dynamic.
2831  */
hl_fw_init_cpu(struct hl_device * hdev)2832 int hl_fw_init_cpu(struct hl_device *hdev)
2833 {
2834 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2835 	struct fw_load_mgr *fw_loader = &hdev->fw_loader;
2836 
2837 	return  prop->dynamic_fw_load ?
2838 			hl_fw_dynamic_init_cpu(hdev, fw_loader) :
2839 			hl_fw_static_init_cpu(hdev, fw_loader);
2840 }
2841 
hl_fw_set_pll_profile(struct hl_device * hdev)2842 void hl_fw_set_pll_profile(struct hl_device *hdev)
2843 {
2844 	hl_fw_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
2845 				hdev->asic_prop.max_freq_value);
2846 }
2847 
hl_fw_get_clk_rate(struct hl_device * hdev,u32 * cur_clk,u32 * max_clk)2848 int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
2849 {
2850 	long value;
2851 
2852 	if (!hl_device_operational(hdev, NULL))
2853 		return -ENODEV;
2854 
2855 	if (!hdev->pdev) {
2856 		*cur_clk = 0;
2857 		*max_clk = 0;
2858 		return 0;
2859 	}
2860 
2861 	value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
2862 
2863 	if (value < 0) {
2864 		dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n", value);
2865 		return value;
2866 	}
2867 
2868 	*max_clk = (value / 1000 / 1000);
2869 
2870 	value = hl_fw_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
2871 
2872 	if (value < 0) {
2873 		dev_err(hdev->dev, "Failed to retrieve device current clock %ld\n", value);
2874 		return value;
2875 	}
2876 
2877 	*cur_clk = (value / 1000 / 1000);
2878 
2879 	return 0;
2880 }
2881 
hl_fw_get_frequency(struct hl_device * hdev,u32 pll_index,bool curr)2882 long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
2883 {
2884 	struct cpucp_packet pkt;
2885 	u32 used_pll_idx;
2886 	u64 result;
2887 	int rc;
2888 
2889 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
2890 	if (rc)
2891 		return rc;
2892 
2893 	memset(&pkt, 0, sizeof(pkt));
2894 
2895 	if (curr)
2896 		pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_CURR_GET <<
2897 						CPUCP_PKT_CTL_OPCODE_SHIFT);
2898 	else
2899 		pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
2900 
2901 	pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
2902 
2903 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
2904 
2905 	if (rc) {
2906 		dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
2907 			used_pll_idx, rc);
2908 		return rc;
2909 	}
2910 
2911 	return (long) result;
2912 }
2913 
hl_fw_set_frequency(struct hl_device * hdev,u32 pll_index,u64 freq)2914 void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
2915 {
2916 	struct cpucp_packet pkt;
2917 	u32 used_pll_idx;
2918 	int rc;
2919 
2920 	rc = get_used_pll_index(hdev, pll_index, &used_pll_idx);
2921 	if (rc)
2922 		return;
2923 
2924 	memset(&pkt, 0, sizeof(pkt));
2925 
2926 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_FREQUENCY_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
2927 	pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
2928 	pkt.value = cpu_to_le64(freq);
2929 
2930 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
2931 
2932 	if (rc)
2933 		dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n",
2934 			used_pll_idx, rc);
2935 }
2936 
hl_fw_get_max_power(struct hl_device * hdev)2937 long hl_fw_get_max_power(struct hl_device *hdev)
2938 {
2939 	struct cpucp_packet pkt;
2940 	u64 result;
2941 	int rc;
2942 
2943 	memset(&pkt, 0, sizeof(pkt));
2944 
2945 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
2946 
2947 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
2948 
2949 	if (rc) {
2950 		dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
2951 		return rc;
2952 	}
2953 
2954 	return result;
2955 }
2956 
hl_fw_set_max_power(struct hl_device * hdev)2957 void hl_fw_set_max_power(struct hl_device *hdev)
2958 {
2959 	struct cpucp_packet pkt;
2960 	int rc;
2961 
2962 	/* TODO: remove this after simulator supports this packet */
2963 	if (!hdev->pdev)
2964 		return;
2965 
2966 	memset(&pkt, 0, sizeof(pkt));
2967 
2968 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
2969 	pkt.value = cpu_to_le64(hdev->max_power);
2970 
2971 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
2972 
2973 	if (rc)
2974 		dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
2975 }
2976 
hl_fw_get_sec_attest_data(struct hl_device * hdev,u32 packet_id,void * data,u32 size,u32 nonce,u32 timeout)2977 static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void *data, u32 size,
2978 					u32 nonce, u32 timeout)
2979 {
2980 	struct cpucp_packet pkt = {};
2981 	dma_addr_t req_dma_addr;
2982 	void *req_cpu_addr;
2983 	int rc;
2984 
2985 	req_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size, &req_dma_addr);
2986 	if (!req_cpu_addr) {
2987 		dev_err(hdev->dev,
2988 			"Failed to allocate DMA memory for CPU-CP packet %u\n", packet_id);
2989 		return -ENOMEM;
2990 	}
2991 
2992 	memset(data, 0, size);
2993 
2994 	pkt.ctl = cpu_to_le32(packet_id << CPUCP_PKT_CTL_OPCODE_SHIFT);
2995 	pkt.addr = cpu_to_le64(req_dma_addr);
2996 	pkt.data_max_size = cpu_to_le32(size);
2997 	pkt.nonce = cpu_to_le32(nonce);
2998 
2999 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
3000 					timeout, NULL);
3001 	if (rc) {
3002 		dev_err(hdev->dev,
3003 			"Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
3004 		goto out;
3005 	}
3006 
3007 	memcpy(data, req_cpu_addr, size);
3008 
3009 out:
3010 	hl_cpu_accessible_dma_pool_free(hdev, size, req_cpu_addr);
3011 
3012 	return rc;
3013 }
3014 
hl_fw_get_sec_attest_info(struct hl_device * hdev,struct cpucp_sec_attest_info * sec_attest_info,u32 nonce)3015 int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_info *sec_attest_info,
3016 				u32 nonce)
3017 {
3018 	return hl_fw_get_sec_attest_data(hdev, CPUCP_PACKET_SEC_ATTEST_GET, sec_attest_info,
3019 					sizeof(struct cpucp_sec_attest_info), nonce,
3020 					HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
3021 }
3022