1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/module.h>
25 
26 #ifdef CONFIG_X86
27 #include <asm/hypervisor.h>
28 #endif
29 
30 #include <drm/drm_drv.h>
31 #include <xen/xen.h>
32 
33 #include "amdgpu.h"
34 #include "amdgpu_ras.h"
35 #include "vi.h"
36 #include "soc15.h"
37 #include "nv.h"
38 
39 #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
40 	do { \
41 		vf2pf_info->ucode_info[ucode].id = ucode; \
42 		vf2pf_info->ucode_info[ucode].version = ver; \
43 	} while (0)
44 
amdgpu_virt_mmio_blocked(struct amdgpu_device * adev)45 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
46 {
47 	/* By now all MMIO pages except mailbox are blocked */
48 	/* if blocking is enabled in hypervisor. Choose the */
49 	/* SCRATCH_REG0 to test. */
50 	return RREG32_NO_KIQ(0xc040) == 0xffffffff;
51 }
52 
amdgpu_virt_init_setting(struct amdgpu_device * adev)53 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
54 {
55 	struct drm_device *ddev = adev_to_drm(adev);
56 
57 	/* enable virtual display */
58 	if (adev->asic_type != CHIP_ALDEBARAN &&
59 	    adev->asic_type != CHIP_ARCTURUS) {
60 		if (adev->mode_info.num_crtc == 0)
61 			adev->mode_info.num_crtc = 1;
62 		adev->enable_virtual_display = true;
63 	}
64 	ddev->driver_features &= ~DRIVER_ATOMIC;
65 	adev->cg_flags = 0;
66 	adev->pg_flags = 0;
67 }
68 
amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)69 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
70 					uint32_t reg0, uint32_t reg1,
71 					uint32_t ref, uint32_t mask)
72 {
73 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
74 	struct amdgpu_ring *ring = &kiq->ring;
75 	signed long r, cnt = 0;
76 	unsigned long flags;
77 	uint32_t seq;
78 
79 	if (adev->mes.ring.sched.ready) {
80 		amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
81 					      ref, mask);
82 		return;
83 	}
84 
85 	spin_lock_irqsave(&kiq->ring_lock, flags);
86 	amdgpu_ring_alloc(ring, 32);
87 	amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
88 					    ref, mask);
89 	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
90 	if (r)
91 		goto failed_undo;
92 
93 	amdgpu_ring_commit(ring);
94 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
95 
96 	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
97 
98 	/* don't wait anymore for IRQ context */
99 	if (r < 1 && in_interrupt())
100 		goto failed_kiq;
101 
102 	might_sleep();
103 	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
104 
105 		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
106 		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
107 	}
108 
109 	if (cnt > MAX_KIQ_REG_TRY)
110 		goto failed_kiq;
111 
112 	return;
113 
114 failed_undo:
115 	amdgpu_ring_undo(ring);
116 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
117 failed_kiq:
118 	dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
119 }
120 
121 /**
122  * amdgpu_virt_request_full_gpu() - request full gpu access
123  * @adev:	amdgpu device.
124  * @init:	is driver init time.
125  * When start to init/fini driver, first need to request full gpu access.
126  * Return: Zero if request success, otherwise will return error.
127  */
amdgpu_virt_request_full_gpu(struct amdgpu_device * adev,bool init)128 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
129 {
130 	struct amdgpu_virt *virt = &adev->virt;
131 	int r;
132 
133 	if (virt->ops && virt->ops->req_full_gpu) {
134 		r = virt->ops->req_full_gpu(adev, init);
135 		if (r)
136 			return r;
137 
138 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
139 	}
140 
141 	return 0;
142 }
143 
144 /**
145  * amdgpu_virt_release_full_gpu() - release full gpu access
146  * @adev:	amdgpu device.
147  * @init:	is driver init time.
148  * When finishing driver init/fini, need to release full gpu access.
149  * Return: Zero if release success, otherwise will returen error.
150  */
amdgpu_virt_release_full_gpu(struct amdgpu_device * adev,bool init)151 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
152 {
153 	struct amdgpu_virt *virt = &adev->virt;
154 	int r;
155 
156 	if (virt->ops && virt->ops->rel_full_gpu) {
157 		r = virt->ops->rel_full_gpu(adev, init);
158 		if (r)
159 			return r;
160 
161 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
162 	}
163 	return 0;
164 }
165 
166 /**
167  * amdgpu_virt_reset_gpu() - reset gpu
168  * @adev:	amdgpu device.
169  * Send reset command to GPU hypervisor to reset GPU that VM is using
170  * Return: Zero if reset success, otherwise will return error.
171  */
amdgpu_virt_reset_gpu(struct amdgpu_device * adev)172 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
173 {
174 	struct amdgpu_virt *virt = &adev->virt;
175 	int r;
176 
177 	if (virt->ops && virt->ops->reset_gpu) {
178 		r = virt->ops->reset_gpu(adev);
179 		if (r)
180 			return r;
181 
182 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
183 	}
184 
185 	return 0;
186 }
187 
amdgpu_virt_request_init_data(struct amdgpu_device * adev)188 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
189 {
190 	struct amdgpu_virt *virt = &adev->virt;
191 
192 	if (virt->ops && virt->ops->req_init_data)
193 		virt->ops->req_init_data(adev);
194 
195 	if (adev->virt.req_init_data_ver > 0)
196 		DRM_INFO("host supports REQ_INIT_DATA handshake\n");
197 	else
198 		DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
199 }
200 
201 /**
202  * amdgpu_virt_wait_reset() - wait for reset gpu completed
203  * @adev:	amdgpu device.
204  * Wait for GPU reset completed.
205  * Return: Zero if reset success, otherwise will return error.
206  */
amdgpu_virt_wait_reset(struct amdgpu_device * adev)207 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
208 {
209 	struct amdgpu_virt *virt = &adev->virt;
210 
211 	if (!virt->ops || !virt->ops->wait_reset)
212 		return -EINVAL;
213 
214 	return virt->ops->wait_reset(adev);
215 }
216 
217 /**
218  * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
219  * @adev:	amdgpu device.
220  * MM table is used by UVD and VCE for its initialization
221  * Return: Zero if allocate success.
222  */
amdgpu_virt_alloc_mm_table(struct amdgpu_device * adev)223 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
224 {
225 	int r;
226 
227 	if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
228 		return 0;
229 
230 	r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
231 				    AMDGPU_GEM_DOMAIN_VRAM,
232 				    &adev->virt.mm_table.bo,
233 				    &adev->virt.mm_table.gpu_addr,
234 				    (void *)&adev->virt.mm_table.cpu_addr);
235 	if (r) {
236 		DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
237 		return r;
238 	}
239 
240 	memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
241 	DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
242 		 adev->virt.mm_table.gpu_addr,
243 		 adev->virt.mm_table.cpu_addr);
244 	return 0;
245 }
246 
247 /**
248  * amdgpu_virt_free_mm_table() - free mm table memory
249  * @adev:	amdgpu device.
250  * Free MM table memory
251  */
amdgpu_virt_free_mm_table(struct amdgpu_device * adev)252 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
253 {
254 	if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
255 		return;
256 
257 	amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
258 			      &adev->virt.mm_table.gpu_addr,
259 			      (void *)&adev->virt.mm_table.cpu_addr);
260 	adev->virt.mm_table.gpu_addr = 0;
261 }
262 
263 
amd_sriov_msg_checksum(void * obj,unsigned long obj_size,unsigned int key,unsigned int checksum)264 unsigned int amd_sriov_msg_checksum(void *obj,
265 				unsigned long obj_size,
266 				unsigned int key,
267 				unsigned int checksum)
268 {
269 	unsigned int ret = key;
270 	unsigned long i = 0;
271 	unsigned char *pos;
272 
273 	pos = (char *)obj;
274 	/* calculate checksum */
275 	for (i = 0; i < obj_size; ++i)
276 		ret += *(pos + i);
277 	/* minus the checksum itself */
278 	pos = (char *)&checksum;
279 	for (i = 0; i < sizeof(checksum); ++i)
280 		ret -= *(pos + i);
281 	return ret;
282 }
283 
amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device * adev)284 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
285 {
286 	struct amdgpu_virt *virt = &adev->virt;
287 	struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
288 	/* GPU will be marked bad on host if bp count more then 10,
289 	 * so alloc 512 is enough.
290 	 */
291 	unsigned int align_space = 512;
292 	void *bps = NULL;
293 	struct amdgpu_bo **bps_bo = NULL;
294 
295 	*data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
296 	if (!*data)
297 		goto data_failure;
298 
299 	bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL);
300 	if (!bps)
301 		goto bps_failure;
302 
303 	bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL);
304 	if (!bps_bo)
305 		goto bps_bo_failure;
306 
307 	(*data)->bps = bps;
308 	(*data)->bps_bo = bps_bo;
309 	(*data)->count = 0;
310 	(*data)->last_reserved = 0;
311 
312 	virt->ras_init_done = true;
313 
314 	return 0;
315 
316 bps_bo_failure:
317 	kfree(bps);
318 bps_failure:
319 	kfree(*data);
320 data_failure:
321 	return -ENOMEM;
322 }
323 
amdgpu_virt_ras_release_bp(struct amdgpu_device * adev)324 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
325 {
326 	struct amdgpu_virt *virt = &adev->virt;
327 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
328 	struct amdgpu_bo *bo;
329 	int i;
330 
331 	if (!data)
332 		return;
333 
334 	for (i = data->last_reserved - 1; i >= 0; i--) {
335 		bo = data->bps_bo[i];
336 		amdgpu_bo_free_kernel(&bo, NULL, NULL);
337 		data->bps_bo[i] = bo;
338 		data->last_reserved = i;
339 	}
340 }
341 
amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device * adev)342 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
343 {
344 	struct amdgpu_virt *virt = &adev->virt;
345 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
346 
347 	virt->ras_init_done = false;
348 
349 	if (!data)
350 		return;
351 
352 	amdgpu_virt_ras_release_bp(adev);
353 
354 	kfree(data->bps);
355 	kfree(data->bps_bo);
356 	kfree(data);
357 	virt->virt_eh_data = NULL;
358 }
359 
amdgpu_virt_ras_add_bps(struct amdgpu_device * adev,struct eeprom_table_record * bps,int pages)360 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
361 		struct eeprom_table_record *bps, int pages)
362 {
363 	struct amdgpu_virt *virt = &adev->virt;
364 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
365 
366 	if (!data)
367 		return;
368 
369 	memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
370 	data->count += pages;
371 }
372 
amdgpu_virt_ras_reserve_bps(struct amdgpu_device * adev)373 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
374 {
375 	struct amdgpu_virt *virt = &adev->virt;
376 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
377 	struct amdgpu_bo *bo = NULL;
378 	uint64_t bp;
379 	int i;
380 
381 	if (!data)
382 		return;
383 
384 	for (i = data->last_reserved; i < data->count; i++) {
385 		bp = data->bps[i].retired_page;
386 
387 		/* There are two cases of reserve error should be ignored:
388 		 * 1) a ras bad page has been allocated (used by someone);
389 		 * 2) a ras bad page has been reserved (duplicate error injection
390 		 *    for one page);
391 		 */
392 		if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
393 					       AMDGPU_GPU_PAGE_SIZE,
394 					       AMDGPU_GEM_DOMAIN_VRAM,
395 					       &bo, NULL))
396 			DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
397 
398 		data->bps_bo[i] = bo;
399 		data->last_reserved = i + 1;
400 		bo = NULL;
401 	}
402 }
403 
amdgpu_virt_ras_check_bad_page(struct amdgpu_device * adev,uint64_t retired_page)404 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
405 		uint64_t retired_page)
406 {
407 	struct amdgpu_virt *virt = &adev->virt;
408 	struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
409 	int i;
410 
411 	if (!data)
412 		return true;
413 
414 	for (i = 0; i < data->count; i++)
415 		if (retired_page == data->bps[i].retired_page)
416 			return true;
417 
418 	return false;
419 }
420 
amdgpu_virt_add_bad_page(struct amdgpu_device * adev,uint64_t bp_block_offset,uint32_t bp_block_size)421 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
422 		uint64_t bp_block_offset, uint32_t bp_block_size)
423 {
424 	struct eeprom_table_record bp;
425 	uint64_t retired_page;
426 	uint32_t bp_idx, bp_cnt;
427 
428 	if (bp_block_size) {
429 		bp_cnt = bp_block_size / sizeof(uint64_t);
430 		for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
431 			retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
432 					bp_block_offset + bp_idx * sizeof(uint64_t));
433 			bp.retired_page = retired_page;
434 
435 			if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
436 				continue;
437 
438 			amdgpu_virt_ras_add_bps(adev, &bp, 1);
439 
440 			amdgpu_virt_ras_reserve_bps(adev);
441 		}
442 	}
443 }
444 
amdgpu_virt_read_pf2vf_data(struct amdgpu_device * adev)445 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
446 {
447 	struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
448 	uint32_t checksum;
449 	uint32_t checkval;
450 
451 	uint32_t i;
452 	uint32_t tmp;
453 
454 	if (adev->virt.fw_reserve.p_pf2vf == NULL)
455 		return -EINVAL;
456 
457 	if (pf2vf_info->size > 1024) {
458 		DRM_ERROR("invalid pf2vf message size\n");
459 		return -EINVAL;
460 	}
461 
462 	switch (pf2vf_info->version) {
463 	case 1:
464 		checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
465 		checkval = amd_sriov_msg_checksum(
466 			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
467 			adev->virt.fw_reserve.checksum_key, checksum);
468 		if (checksum != checkval) {
469 			DRM_ERROR("invalid pf2vf message\n");
470 			return -EINVAL;
471 		}
472 
473 		adev->virt.gim_feature =
474 			((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
475 		break;
476 	case 2:
477 		/* TODO: missing key, need to add it later */
478 		checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
479 		checkval = amd_sriov_msg_checksum(
480 			adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
481 			0, checksum);
482 		if (checksum != checkval) {
483 			DRM_ERROR("invalid pf2vf message\n");
484 			return -EINVAL;
485 		}
486 
487 		adev->virt.vf2pf_update_interval_ms =
488 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
489 		adev->virt.gim_feature =
490 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
491 		adev->virt.reg_access =
492 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
493 
494 		adev->virt.decode_max_dimension_pixels = 0;
495 		adev->virt.decode_max_frame_pixels = 0;
496 		adev->virt.encode_max_dimension_pixels = 0;
497 		adev->virt.encode_max_frame_pixels = 0;
498 		adev->virt.is_mm_bw_enabled = false;
499 		for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
500 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
501 			adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
502 
503 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
504 			adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
505 
506 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
507 			adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
508 
509 			tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
510 			adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
511 		}
512 		if((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
513 			adev->virt.is_mm_bw_enabled = true;
514 
515 		adev->unique_id =
516 			((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
517 		break;
518 	default:
519 		DRM_ERROR("invalid pf2vf version\n");
520 		return -EINVAL;
521 	}
522 
523 	/* correct too large or too little interval value */
524 	if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
525 		adev->virt.vf2pf_update_interval_ms = 2000;
526 
527 	return 0;
528 }
529 
amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device * adev)530 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
531 {
532 	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
533 	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
534 
535 	if (adev->virt.fw_reserve.p_vf2pf == NULL)
536 		return;
537 
538 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
539 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
540 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
541 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
542 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
543 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
544 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
545 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
546 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
547 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
548 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
549 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
550 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_IMU,      adev->gfx.imu_fw_version);
551 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
552 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
553 			    adev->psp.asd_context.bin_desc.fw_version);
554 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
555 			    adev->psp.ras_context.context.bin_desc.fw_version);
556 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
557 			    adev->psp.xgmi_context.context.bin_desc.fw_version);
558 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
559 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
560 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
561 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
562 	POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
563 }
564 
amdgpu_virt_write_vf2pf_data(struct amdgpu_device * adev)565 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
566 {
567 	struct amd_sriov_msg_vf2pf_info *vf2pf_info;
568 
569 	vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
570 
571 	if (adev->virt.fw_reserve.p_vf2pf == NULL)
572 		return -EINVAL;
573 
574 	memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
575 
576 	vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
577 	vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
578 
579 #ifdef MODULE
580 	if (THIS_MODULE->version != NULL)
581 		strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
582 	else
583 #endif
584 		strcpy(vf2pf_info->driver_version, "N/A");
585 
586 	vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
587 	vf2pf_info->driver_cert = 0;
588 	vf2pf_info->os_info.all = 0;
589 
590 	vf2pf_info->fb_usage =
591 		ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
592 	vf2pf_info->fb_vis_usage =
593 		amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
594 	vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
595 	vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
596 
597 	amdgpu_virt_populate_vf2pf_ucode_info(adev);
598 
599 	/* TODO: read dynamic info */
600 	vf2pf_info->gfx_usage = 0;
601 	vf2pf_info->compute_usage = 0;
602 	vf2pf_info->encode_usage = 0;
603 	vf2pf_info->decode_usage = 0;
604 
605 	vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
606 	vf2pf_info->checksum =
607 		amd_sriov_msg_checksum(
608 		vf2pf_info, vf2pf_info->header.size, 0, 0);
609 
610 	return 0;
611 }
612 
amdgpu_virt_update_vf2pf_work_item(struct work_struct * work)613 static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
614 {
615 	struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
616 	int ret;
617 
618 	ret = amdgpu_virt_read_pf2vf_data(adev);
619 	if (ret)
620 		goto out;
621 	amdgpu_virt_write_vf2pf_data(adev);
622 
623 out:
624 	schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
625 }
626 
amdgpu_virt_fini_data_exchange(struct amdgpu_device * adev)627 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
628 {
629 	if (adev->virt.vf2pf_update_interval_ms != 0) {
630 		DRM_INFO("clean up the vf2pf work item\n");
631 		cancel_delayed_work_sync(&adev->virt.vf2pf_work);
632 		adev->virt.vf2pf_update_interval_ms = 0;
633 	}
634 }
635 
amdgpu_virt_init_data_exchange(struct amdgpu_device * adev)636 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
637 {
638 	adev->virt.fw_reserve.p_pf2vf = NULL;
639 	adev->virt.fw_reserve.p_vf2pf = NULL;
640 	adev->virt.vf2pf_update_interval_ms = 0;
641 
642 	if (adev->mman.fw_vram_usage_va != NULL) {
643 		/* go through this logic in ip_init and reset to init workqueue*/
644 		amdgpu_virt_exchange_data(adev);
645 
646 		INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
647 		schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
648 	} else if (adev->bios != NULL) {
649 		/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
650 		adev->virt.fw_reserve.p_pf2vf =
651 			(struct amd_sriov_msg_pf2vf_info_header *)
652 			(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
653 
654 		amdgpu_virt_read_pf2vf_data(adev);
655 	}
656 }
657 
658 
amdgpu_virt_exchange_data(struct amdgpu_device * adev)659 void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
660 {
661 	uint64_t bp_block_offset = 0;
662 	uint32_t bp_block_size = 0;
663 	struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
664 
665 	if (adev->mman.fw_vram_usage_va != NULL) {
666 
667 		adev->virt.fw_reserve.p_pf2vf =
668 			(struct amd_sriov_msg_pf2vf_info_header *)
669 			(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
670 		adev->virt.fw_reserve.p_vf2pf =
671 			(struct amd_sriov_msg_vf2pf_info_header *)
672 			(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
673 
674 		amdgpu_virt_read_pf2vf_data(adev);
675 		amdgpu_virt_write_vf2pf_data(adev);
676 
677 		/* bad page handling for version 2 */
678 		if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
679 				pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
680 
681 				bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
682 						((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
683 				bp_block_size = pf2vf_v2->bp_block_size;
684 
685 				if (bp_block_size && !adev->virt.ras_init_done)
686 					amdgpu_virt_init_ras_err_handler_data(adev);
687 
688 				if (adev->virt.ras_init_done)
689 					amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
690 			}
691 	}
692 }
693 
amdgpu_detect_virtualization(struct amdgpu_device * adev)694 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
695 {
696 	uint32_t reg;
697 
698 	switch (adev->asic_type) {
699 	case CHIP_TONGA:
700 	case CHIP_FIJI:
701 		reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
702 		break;
703 	case CHIP_VEGA10:
704 	case CHIP_VEGA20:
705 	case CHIP_NAVI10:
706 	case CHIP_NAVI12:
707 	case CHIP_SIENNA_CICHLID:
708 	case CHIP_ARCTURUS:
709 	case CHIP_ALDEBARAN:
710 	case CHIP_IP_DISCOVERY:
711 		reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
712 		break;
713 	default: /* other chip doesn't support SRIOV */
714 		reg = 0;
715 		break;
716 	}
717 
718 	if (reg & 1)
719 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
720 
721 	if (reg & 0x80000000)
722 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
723 
724 	if (!reg) {
725 		/* passthrough mode exclus sriov mod */
726 		if (is_virtual_machine() && !xen_initial_domain())
727 			adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
728 	}
729 
730 	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
731 		/* VF MMIO access (except mailbox range) from CPU
732 		 * will be blocked during sriov runtime
733 		 */
734 		adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
735 
736 	/* we have the ability to check now */
737 	if (amdgpu_sriov_vf(adev)) {
738 		switch (adev->asic_type) {
739 		case CHIP_TONGA:
740 		case CHIP_FIJI:
741 			vi_set_virt_ops(adev);
742 			break;
743 		case CHIP_VEGA10:
744 			soc15_set_virt_ops(adev);
745 #ifdef CONFIG_X86
746 			/* not send GPU_INIT_DATA with MS_HYPERV*/
747 			if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
748 #endif
749 				/* send a dummy GPU_INIT_DATA request to host on vega10 */
750 				amdgpu_virt_request_init_data(adev);
751 			break;
752 		case CHIP_VEGA20:
753 		case CHIP_ARCTURUS:
754 		case CHIP_ALDEBARAN:
755 			soc15_set_virt_ops(adev);
756 			break;
757 		case CHIP_NAVI10:
758 		case CHIP_NAVI12:
759 		case CHIP_SIENNA_CICHLID:
760 		case CHIP_IP_DISCOVERY:
761 			nv_set_virt_ops(adev);
762 			/* try send GPU_INIT_DATA request to host */
763 			amdgpu_virt_request_init_data(adev);
764 			break;
765 		default: /* other chip doesn't support SRIOV */
766 			DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
767 			break;
768 		}
769 	}
770 }
771 
amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device * adev)772 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
773 {
774 	return amdgpu_sriov_is_debug(adev) ? true : false;
775 }
776 
amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device * adev)777 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
778 {
779 	return amdgpu_sriov_is_normal(adev) ? true : false;
780 }
781 
amdgpu_virt_enable_access_debugfs(struct amdgpu_device * adev)782 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
783 {
784 	if (!amdgpu_sriov_vf(adev) ||
785 	    amdgpu_virt_access_debugfs_is_kiq(adev))
786 		return 0;
787 
788 	if (amdgpu_virt_access_debugfs_is_mmio(adev))
789 		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
790 	else
791 		return -EPERM;
792 
793 	return 0;
794 }
795 
amdgpu_virt_disable_access_debugfs(struct amdgpu_device * adev)796 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
797 {
798 	if (amdgpu_sriov_vf(adev))
799 		adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
800 }
801 
amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device * adev)802 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
803 {
804 	enum amdgpu_sriov_vf_mode mode;
805 
806 	if (amdgpu_sriov_vf(adev)) {
807 		if (amdgpu_sriov_is_pp_one_vf(adev))
808 			mode = SRIOV_VF_MODE_ONE_VF;
809 		else
810 			mode = SRIOV_VF_MODE_MULTI_VF;
811 	} else {
812 		mode = SRIOV_VF_MODE_BARE_METAL;
813 	}
814 
815 	return mode;
816 }
817 
amdgpu_virt_fw_load_skip_check(struct amdgpu_device * adev,uint32_t ucode_id)818 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
819 {
820 	switch (adev->ip_versions[MP0_HWIP][0]) {
821 	case IP_VERSION(13, 0, 0):
822 		/* no vf autoload, white list */
823 		if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
824 		    ucode_id == AMDGPU_UCODE_ID_VCN)
825 			return false;
826 		else
827 			return true;
828 	case IP_VERSION(13, 0, 10):
829 		/* white list */
830 		if (ucode_id == AMDGPU_UCODE_ID_CAP
831 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
832 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
833 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
834 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
835 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
836 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
837 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
838 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
839 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
840 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
841 		|| ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
842 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES
843 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
844 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES1
845 		|| ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
846 		|| ucode_id == AMDGPU_UCODE_ID_VCN1
847 		|| ucode_id == AMDGPU_UCODE_ID_VCN)
848 			return false;
849 		else
850 			return true;
851 	default:
852 		/* lagacy black list */
853 		if (ucode_id == AMDGPU_UCODE_ID_SDMA0
854 		    || ucode_id == AMDGPU_UCODE_ID_SDMA1
855 		    || ucode_id == AMDGPU_UCODE_ID_SDMA2
856 		    || ucode_id == AMDGPU_UCODE_ID_SDMA3
857 		    || ucode_id == AMDGPU_UCODE_ID_SDMA4
858 		    || ucode_id == AMDGPU_UCODE_ID_SDMA5
859 		    || ucode_id == AMDGPU_UCODE_ID_SDMA6
860 		    || ucode_id == AMDGPU_UCODE_ID_SDMA7
861 		    || ucode_id == AMDGPU_UCODE_ID_RLC_G
862 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
863 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
864 		    || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
865 		    || ucode_id == AMDGPU_UCODE_ID_SMC)
866 			return true;
867 		else
868 			return false;
869 	}
870 }
871 
amdgpu_virt_update_sriov_video_codec(struct amdgpu_device * adev,struct amdgpu_video_codec_info * encode,uint32_t encode_array_size,struct amdgpu_video_codec_info * decode,uint32_t decode_array_size)872 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
873 			struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
874 			struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
875 {
876 	uint32_t i;
877 
878 	if (!adev->virt.is_mm_bw_enabled)
879 		return;
880 
881 	if (encode) {
882 		for (i = 0; i < encode_array_size; i++) {
883 			encode[i].max_width = adev->virt.encode_max_dimension_pixels;
884 			encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
885 			if (encode[i].max_width > 0)
886 				encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
887 			else
888 				encode[i].max_height = 0;
889 		}
890 	}
891 
892 	if (decode) {
893 		for (i = 0; i < decode_array_size; i++) {
894 			decode[i].max_width = adev->virt.decode_max_dimension_pixels;
895 			decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
896 			if (decode[i].max_width > 0)
897 				decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
898 			else
899 				decode[i].max_height = 0;
900 		}
901 	}
902 }
903 
amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device * adev,u32 acc_flags,u32 hwip,bool write,u32 * rlcg_flag)904 static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
905 						 u32 acc_flags, u32 hwip,
906 						 bool write, u32 *rlcg_flag)
907 {
908 	bool ret = false;
909 
910 	switch (hwip) {
911 	case GC_HWIP:
912 		if (amdgpu_sriov_reg_indirect_gc(adev)) {
913 			*rlcg_flag =
914 				write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
915 			ret = true;
916 		/* only in new version, AMDGPU_REGS_NO_KIQ and
917 		 * AMDGPU_REGS_RLC are enabled simultaneously */
918 		} else if ((acc_flags & AMDGPU_REGS_RLC) &&
919 				!(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
920 			*rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
921 			ret = true;
922 		}
923 		break;
924 	case MMHUB_HWIP:
925 		if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
926 		    (acc_flags & AMDGPU_REGS_RLC) && write) {
927 			*rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
928 			ret = true;
929 		}
930 		break;
931 	default:
932 		break;
933 	}
934 	return ret;
935 }
936 
amdgpu_virt_rlcg_reg_rw(struct amdgpu_device * adev,u32 offset,u32 v,u32 flag)937 static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
938 {
939 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
940 	uint32_t timeout = 50000;
941 	uint32_t i, tmp;
942 	uint32_t ret = 0;
943 	void *scratch_reg0;
944 	void *scratch_reg1;
945 	void *scratch_reg2;
946 	void *scratch_reg3;
947 	void *spare_int;
948 
949 	if (!adev->gfx.rlc.rlcg_reg_access_supported) {
950 		dev_err(adev->dev,
951 			"indirect registers access through rlcg is not available\n");
952 		return 0;
953 	}
954 
955 	reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl;
956 	scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
957 	scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
958 	scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
959 	scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
960 	if (reg_access_ctrl->spare_int)
961 		spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
962 
963 	if (offset == reg_access_ctrl->grbm_cntl) {
964 		/* if the target reg offset is grbm_cntl, write to scratch_reg2 */
965 		writel(v, scratch_reg2);
966 		writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
967 	} else if (offset == reg_access_ctrl->grbm_idx) {
968 		/* if the target reg offset is grbm_idx, write to scratch_reg3 */
969 		writel(v, scratch_reg3);
970 		writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
971 	} else {
972 		/*
973 		 * SCRATCH_REG0 	= read/write value
974 		 * SCRATCH_REG1[30:28]	= command
975 		 * SCRATCH_REG1[19:0]	= address in dword
976 		 * SCRATCH_REG1[26:24]	= Error reporting
977 		 */
978 		writel(v, scratch_reg0);
979 		writel((offset | flag), scratch_reg1);
980 		if (reg_access_ctrl->spare_int)
981 			writel(1, spare_int);
982 
983 		for (i = 0; i < timeout; i++) {
984 			tmp = readl(scratch_reg1);
985 			if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
986 				break;
987 			udelay(10);
988 		}
989 
990 		if (i >= timeout) {
991 			if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
992 				if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
993 					dev_err(adev->dev,
994 						"vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
995 				} else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
996 					dev_err(adev->dev,
997 						"wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
998 				} else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
999 					dev_err(adev->dev,
1000 						"register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
1001 				} else {
1002 					dev_err(adev->dev,
1003 						"unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
1004 				}
1005 			} else {
1006 				dev_err(adev->dev,
1007 					"timeout: rlcg faled to program reg: 0x%05x\n", offset);
1008 			}
1009 		}
1010 	}
1011 
1012 	ret = readl(scratch_reg0);
1013 	return ret;
1014 }
1015 
amdgpu_sriov_wreg(struct amdgpu_device * adev,u32 offset,u32 value,u32 acc_flags,u32 hwip)1016 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
1017 		       u32 offset, u32 value,
1018 		       u32 acc_flags, u32 hwip)
1019 {
1020 	u32 rlcg_flag;
1021 
1022 	if (!amdgpu_sriov_runtime(adev) &&
1023 		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
1024 		amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag);
1025 		return;
1026 	}
1027 
1028 	if (acc_flags & AMDGPU_REGS_NO_KIQ)
1029 		WREG32_NO_KIQ(offset, value);
1030 	else
1031 		WREG32(offset, value);
1032 }
1033 
amdgpu_sriov_rreg(struct amdgpu_device * adev,u32 offset,u32 acc_flags,u32 hwip)1034 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
1035 		      u32 offset, u32 acc_flags, u32 hwip)
1036 {
1037 	u32 rlcg_flag;
1038 
1039 	if (!amdgpu_sriov_runtime(adev) &&
1040 		amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
1041 		return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag);
1042 
1043 	if (acc_flags & AMDGPU_REGS_NO_KIQ)
1044 		return RREG32_NO_KIQ(offset);
1045 	else
1046 		return RREG32(offset);
1047 }
1048