1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <linux/power_supply.h>
29 #include <linux/kthread.h>
30 #include <linux/module.h>
31 #include <linux/console.h>
32 #include <linux/slab.h>
33 #include <linux/iommu.h>
34 #include <linux/pci.h>
35 #include <linux/devcoredump.h>
36 #include <generated/utsrelease.h>
37 #include <linux/pci-p2pdma.h>
38 #include <linux/apple-gmux.h>
39
40 #include <drm/drm_aperture.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_crtc_helper.h>
43 #include <drm/drm_fb_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/amdgpu_drm.h>
46 #include <linux/device.h>
47 #include <linux/vgaarb.h>
48 #include <linux/vga_switcheroo.h>
49 #include <linux/efi.h>
50 #include "amdgpu.h"
51 #include "amdgpu_trace.h"
52 #include "amdgpu_i2c.h"
53 #include "atom.h"
54 #include "amdgpu_atombios.h"
55 #include "amdgpu_atomfirmware.h"
56 #include "amd_pcie.h"
57 #ifdef CONFIG_DRM_AMDGPU_SI
58 #include "si.h"
59 #endif
60 #ifdef CONFIG_DRM_AMDGPU_CIK
61 #include "cik.h"
62 #endif
63 #include "vi.h"
64 #include "soc15.h"
65 #include "nv.h"
66 #include "bif/bif_4_1_d.h"
67 #include <linux/firmware.h>
68 #include "amdgpu_vf_error.h"
69
70 #include "amdgpu_amdkfd.h"
71 #include "amdgpu_pm.h"
72
73 #include "amdgpu_xgmi.h"
74 #include "amdgpu_ras.h"
75 #include "amdgpu_pmu.h"
76 #include "amdgpu_fru_eeprom.h"
77 #include "amdgpu_reset.h"
78
79 #include <linux/suspend.h>
80 #include <drm/task_barrier.h>
81 #include <linux/pm_runtime.h>
82
83 #include <drm/drm_drv.h>
84
85 #if IS_ENABLED(CONFIG_X86)
86 #include <asm/intel-family.h>
87 #endif
88
89 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
90 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
91 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
92 MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
93 MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
94 MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
95 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
96
97 #define AMDGPU_RESUME_MS 2000
98 #define AMDGPU_MAX_RETRY_LIMIT 2
99 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
100
101 static const struct drm_driver amdgpu_kms_driver;
102
103 const char *amdgpu_asic_name[] = {
104 "TAHITI",
105 "PITCAIRN",
106 "VERDE",
107 "OLAND",
108 "HAINAN",
109 "BONAIRE",
110 "KAVERI",
111 "KABINI",
112 "HAWAII",
113 "MULLINS",
114 "TOPAZ",
115 "TONGA",
116 "FIJI",
117 "CARRIZO",
118 "STONEY",
119 "POLARIS10",
120 "POLARIS11",
121 "POLARIS12",
122 "VEGAM",
123 "VEGA10",
124 "VEGA12",
125 "VEGA20",
126 "RAVEN",
127 "ARCTURUS",
128 "RENOIR",
129 "ALDEBARAN",
130 "NAVI10",
131 "CYAN_SKILLFISH",
132 "NAVI14",
133 "NAVI12",
134 "SIENNA_CICHLID",
135 "NAVY_FLOUNDER",
136 "VANGOGH",
137 "DIMGREY_CAVEFISH",
138 "BEIGE_GOBY",
139 "YELLOW_CARP",
140 "IP DISCOVERY",
141 "LAST",
142 };
143
144 /**
145 * DOC: pcie_replay_count
146 *
147 * The amdgpu driver provides a sysfs API for reporting the total number
148 * of PCIe replays (NAKs)
149 * The file pcie_replay_count is used for this and returns the total
150 * number of replays as a sum of the NAKs generated and NAKs received
151 */
152
amdgpu_device_get_pcie_replay_count(struct device * dev,struct device_attribute * attr,char * buf)153 static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
154 struct device_attribute *attr, char *buf)
155 {
156 struct drm_device *ddev = dev_get_drvdata(dev);
157 struct amdgpu_device *adev = drm_to_adev(ddev);
158 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
159
160 return sysfs_emit(buf, "%llu\n", cnt);
161 }
162
163 static DEVICE_ATTR(pcie_replay_count, 0444,
164 amdgpu_device_get_pcie_replay_count, NULL);
165
166 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
167
168
169 /**
170 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
171 *
172 * @dev: drm_device pointer
173 *
174 * Returns true if the device is a dGPU with ATPX power control,
175 * otherwise return false.
176 */
amdgpu_device_supports_px(struct drm_device * dev)177 bool amdgpu_device_supports_px(struct drm_device *dev)
178 {
179 struct amdgpu_device *adev = drm_to_adev(dev);
180
181 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
182 return true;
183 return false;
184 }
185
186 /**
187 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
188 *
189 * @dev: drm_device pointer
190 *
191 * Returns true if the device is a dGPU with ACPI power control,
192 * otherwise return false.
193 */
amdgpu_device_supports_boco(struct drm_device * dev)194 bool amdgpu_device_supports_boco(struct drm_device *dev)
195 {
196 struct amdgpu_device *adev = drm_to_adev(dev);
197
198 if (adev->has_pr3 ||
199 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
200 return true;
201 return false;
202 }
203
204 /**
205 * amdgpu_device_supports_baco - Does the device support BACO
206 *
207 * @dev: drm_device pointer
208 *
209 * Returns true if the device supporte BACO,
210 * otherwise return false.
211 */
amdgpu_device_supports_baco(struct drm_device * dev)212 bool amdgpu_device_supports_baco(struct drm_device *dev)
213 {
214 struct amdgpu_device *adev = drm_to_adev(dev);
215
216 return amdgpu_asic_supports_baco(adev);
217 }
218
219 /**
220 * amdgpu_device_supports_smart_shift - Is the device dGPU with
221 * smart shift support
222 *
223 * @dev: drm_device pointer
224 *
225 * Returns true if the device is a dGPU with Smart Shift support,
226 * otherwise returns false.
227 */
amdgpu_device_supports_smart_shift(struct drm_device * dev)228 bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
229 {
230 return (amdgpu_device_supports_boco(dev) &&
231 amdgpu_acpi_is_power_shift_control_supported());
232 }
233
234 /*
235 * VRAM access helper functions
236 */
237
238 /**
239 * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
240 *
241 * @adev: amdgpu_device pointer
242 * @pos: offset of the buffer in vram
243 * @buf: virtual address of the buffer in system memory
244 * @size: read/write size, sizeof(@buf) must > @size
245 * @write: true - write to vram, otherwise - read from vram
246 */
amdgpu_device_mm_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)247 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
248 void *buf, size_t size, bool write)
249 {
250 unsigned long flags;
251 uint32_t hi = ~0, tmp = 0;
252 uint32_t *data = buf;
253 uint64_t last;
254 int idx;
255
256 if (!drm_dev_enter(adev_to_drm(adev), &idx))
257 return;
258
259 BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
260
261 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
262 for (last = pos + size; pos < last; pos += 4) {
263 tmp = pos >> 31;
264
265 WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
266 if (tmp != hi) {
267 WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
268 hi = tmp;
269 }
270 if (write)
271 WREG32_NO_KIQ(mmMM_DATA, *data++);
272 else
273 *data++ = RREG32_NO_KIQ(mmMM_DATA);
274 }
275
276 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
277 drm_dev_exit(idx);
278 }
279
280 /**
281 * amdgpu_device_aper_access - access vram by vram aperature
282 *
283 * @adev: amdgpu_device pointer
284 * @pos: offset of the buffer in vram
285 * @buf: virtual address of the buffer in system memory
286 * @size: read/write size, sizeof(@buf) must > @size
287 * @write: true - write to vram, otherwise - read from vram
288 *
289 * The return value means how many bytes have been transferred.
290 */
amdgpu_device_aper_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)291 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
292 void *buf, size_t size, bool write)
293 {
294 #ifdef CONFIG_64BIT
295 void __iomem *addr;
296 size_t count = 0;
297 uint64_t last;
298
299 if (!adev->mman.aper_base_kaddr)
300 return 0;
301
302 last = min(pos + size, adev->gmc.visible_vram_size);
303 if (last > pos) {
304 addr = adev->mman.aper_base_kaddr + pos;
305 count = last - pos;
306
307 if (write) {
308 memcpy_toio(addr, buf, count);
309 /* Make sure HDP write cache flush happens without any reordering
310 * after the system memory contents are sent over PCIe device
311 */
312 mb();
313 amdgpu_device_flush_hdp(adev, NULL);
314 } else {
315 amdgpu_device_invalidate_hdp(adev, NULL);
316 /* Make sure HDP read cache is invalidated before issuing a read
317 * to the PCIe device
318 */
319 mb();
320 memcpy_fromio(buf, addr, count);
321 }
322
323 }
324
325 return count;
326 #else
327 return 0;
328 #endif
329 }
330
331 /**
332 * amdgpu_device_vram_access - read/write a buffer in vram
333 *
334 * @adev: amdgpu_device pointer
335 * @pos: offset of the buffer in vram
336 * @buf: virtual address of the buffer in system memory
337 * @size: read/write size, sizeof(@buf) must > @size
338 * @write: true - write to vram, otherwise - read from vram
339 */
amdgpu_device_vram_access(struct amdgpu_device * adev,loff_t pos,void * buf,size_t size,bool write)340 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
341 void *buf, size_t size, bool write)
342 {
343 size_t count;
344
345 /* try to using vram apreature to access vram first */
346 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
347 size -= count;
348 if (size) {
349 /* using MM to access rest vram */
350 pos += count;
351 buf += count;
352 amdgpu_device_mm_access(adev, pos, buf, size, write);
353 }
354 }
355
356 /*
357 * register access helper functions.
358 */
359
360 /* Check if hw access should be skipped because of hotplug or device error */
amdgpu_device_skip_hw_access(struct amdgpu_device * adev)361 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
362 {
363 if (adev->no_hw_access)
364 return true;
365
366 #ifdef CONFIG_LOCKDEP
367 /*
368 * This is a bit complicated to understand, so worth a comment. What we assert
369 * here is that the GPU reset is not running on another thread in parallel.
370 *
371 * For this we trylock the read side of the reset semaphore, if that succeeds
372 * we know that the reset is not running in paralell.
373 *
374 * If the trylock fails we assert that we are either already holding the read
375 * side of the lock or are the reset thread itself and hold the write side of
376 * the lock.
377 */
378 if (in_task()) {
379 if (down_read_trylock(&adev->reset_domain->sem))
380 up_read(&adev->reset_domain->sem);
381 else
382 lockdep_assert_held(&adev->reset_domain->sem);
383 }
384 #endif
385 return false;
386 }
387
388 /**
389 * amdgpu_device_rreg - read a memory mapped IO or indirect register
390 *
391 * @adev: amdgpu_device pointer
392 * @reg: dword aligned register offset
393 * @acc_flags: access flags which require special behavior
394 *
395 * Returns the 32 bit value from the offset specified.
396 */
amdgpu_device_rreg(struct amdgpu_device * adev,uint32_t reg,uint32_t acc_flags)397 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
398 uint32_t reg, uint32_t acc_flags)
399 {
400 uint32_t ret;
401
402 if (amdgpu_device_skip_hw_access(adev))
403 return 0;
404
405 if ((reg * 4) < adev->rmmio_size) {
406 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
407 amdgpu_sriov_runtime(adev) &&
408 down_read_trylock(&adev->reset_domain->sem)) {
409 ret = amdgpu_kiq_rreg(adev, reg);
410 up_read(&adev->reset_domain->sem);
411 } else {
412 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
413 }
414 } else {
415 ret = adev->pcie_rreg(adev, reg * 4);
416 }
417
418 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
419
420 return ret;
421 }
422
423 /*
424 * MMIO register read with bytes helper functions
425 * @offset:bytes offset from MMIO start
426 */
427
428 /**
429 * amdgpu_mm_rreg8 - read a memory mapped IO register
430 *
431 * @adev: amdgpu_device pointer
432 * @offset: byte aligned register offset
433 *
434 * Returns the 8 bit value from the offset specified.
435 */
amdgpu_mm_rreg8(struct amdgpu_device * adev,uint32_t offset)436 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
437 {
438 if (amdgpu_device_skip_hw_access(adev))
439 return 0;
440
441 if (offset < adev->rmmio_size)
442 return (readb(adev->rmmio + offset));
443 BUG();
444 }
445
446 /*
447 * MMIO register write with bytes helper functions
448 * @offset:bytes offset from MMIO start
449 * @value: the value want to be written to the register
450 */
451
452 /**
453 * amdgpu_mm_wreg8 - read a memory mapped IO register
454 *
455 * @adev: amdgpu_device pointer
456 * @offset: byte aligned register offset
457 * @value: 8 bit value to write
458 *
459 * Writes the value specified to the offset specified.
460 */
amdgpu_mm_wreg8(struct amdgpu_device * adev,uint32_t offset,uint8_t value)461 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
462 {
463 if (amdgpu_device_skip_hw_access(adev))
464 return;
465
466 if (offset < adev->rmmio_size)
467 writeb(value, adev->rmmio + offset);
468 else
469 BUG();
470 }
471
472 /**
473 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
474 *
475 * @adev: amdgpu_device pointer
476 * @reg: dword aligned register offset
477 * @v: 32 bit value to write to the register
478 * @acc_flags: access flags which require special behavior
479 *
480 * Writes the value specified to the offset specified.
481 */
amdgpu_device_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t acc_flags)482 void amdgpu_device_wreg(struct amdgpu_device *adev,
483 uint32_t reg, uint32_t v,
484 uint32_t acc_flags)
485 {
486 if (amdgpu_device_skip_hw_access(adev))
487 return;
488
489 if ((reg * 4) < adev->rmmio_size) {
490 if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
491 amdgpu_sriov_runtime(adev) &&
492 down_read_trylock(&adev->reset_domain->sem)) {
493 amdgpu_kiq_wreg(adev, reg, v);
494 up_read(&adev->reset_domain->sem);
495 } else {
496 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
497 }
498 } else {
499 adev->pcie_wreg(adev, reg * 4, v);
500 }
501
502 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
503 }
504
505 /**
506 * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
507 *
508 * @adev: amdgpu_device pointer
509 * @reg: mmio/rlc register
510 * @v: value to write
511 *
512 * this function is invoked only for the debugfs register access
513 */
amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device * adev,uint32_t reg,uint32_t v,uint32_t xcc_id)514 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
515 uint32_t reg, uint32_t v,
516 uint32_t xcc_id)
517 {
518 if (amdgpu_device_skip_hw_access(adev))
519 return;
520
521 if (amdgpu_sriov_fullaccess(adev) &&
522 adev->gfx.rlc.funcs &&
523 adev->gfx.rlc.funcs->is_rlcg_access_range) {
524 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
525 return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
526 } else if ((reg * 4) >= adev->rmmio_size) {
527 adev->pcie_wreg(adev, reg * 4, v);
528 } else {
529 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
530 }
531 }
532
533 /**
534 * amdgpu_device_indirect_rreg - read an indirect register
535 *
536 * @adev: amdgpu_device pointer
537 * @reg_addr: indirect register address to read from
538 *
539 * Returns the value of indirect register @reg_addr
540 */
amdgpu_device_indirect_rreg(struct amdgpu_device * adev,u32 reg_addr)541 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
542 u32 reg_addr)
543 {
544 unsigned long flags, pcie_index, pcie_data;
545 void __iomem *pcie_index_offset;
546 void __iomem *pcie_data_offset;
547 u32 r;
548
549 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
550 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
551
552 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
553 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
554 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
555
556 writel(reg_addr, pcie_index_offset);
557 readl(pcie_index_offset);
558 r = readl(pcie_data_offset);
559 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
560
561 return r;
562 }
563
amdgpu_device_indirect_rreg_ext(struct amdgpu_device * adev,u64 reg_addr)564 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
565 u64 reg_addr)
566 {
567 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
568 u32 r;
569 void __iomem *pcie_index_offset;
570 void __iomem *pcie_index_hi_offset;
571 void __iomem *pcie_data_offset;
572
573 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
574 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
575 if (adev->nbio.funcs->get_pcie_index_hi_offset)
576 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
577 else
578 pcie_index_hi = 0;
579
580 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
581 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
582 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
583 if (pcie_index_hi != 0)
584 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
585 pcie_index_hi * 4;
586
587 writel(reg_addr, pcie_index_offset);
588 readl(pcie_index_offset);
589 if (pcie_index_hi != 0) {
590 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
591 readl(pcie_index_hi_offset);
592 }
593 r = readl(pcie_data_offset);
594
595 /* clear the high bits */
596 if (pcie_index_hi != 0) {
597 writel(0, pcie_index_hi_offset);
598 readl(pcie_index_hi_offset);
599 }
600
601 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
602
603 return r;
604 }
605
606 /**
607 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
608 *
609 * @adev: amdgpu_device pointer
610 * @reg_addr: indirect register address to read from
611 *
612 * Returns the value of indirect register @reg_addr
613 */
amdgpu_device_indirect_rreg64(struct amdgpu_device * adev,u32 reg_addr)614 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
615 u32 reg_addr)
616 {
617 unsigned long flags, pcie_index, pcie_data;
618 void __iomem *pcie_index_offset;
619 void __iomem *pcie_data_offset;
620 u64 r;
621
622 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
623 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
624
625 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
626 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
627 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
628
629 /* read low 32 bits */
630 writel(reg_addr, pcie_index_offset);
631 readl(pcie_index_offset);
632 r = readl(pcie_data_offset);
633 /* read high 32 bits */
634 writel(reg_addr + 4, pcie_index_offset);
635 readl(pcie_index_offset);
636 r |= ((u64)readl(pcie_data_offset) << 32);
637 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
638
639 return r;
640 }
641
642 /**
643 * amdgpu_device_indirect_wreg - write an indirect register address
644 *
645 * @adev: amdgpu_device pointer
646 * @reg_addr: indirect register offset
647 * @reg_data: indirect register data
648 *
649 */
amdgpu_device_indirect_wreg(struct amdgpu_device * adev,u32 reg_addr,u32 reg_data)650 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
651 u32 reg_addr, u32 reg_data)
652 {
653 unsigned long flags, pcie_index, pcie_data;
654 void __iomem *pcie_index_offset;
655 void __iomem *pcie_data_offset;
656
657 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
658 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
659
660 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
661 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
662 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
663
664 writel(reg_addr, pcie_index_offset);
665 readl(pcie_index_offset);
666 writel(reg_data, pcie_data_offset);
667 readl(pcie_data_offset);
668 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
669 }
670
amdgpu_device_indirect_wreg_ext(struct amdgpu_device * adev,u64 reg_addr,u32 reg_data)671 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
672 u64 reg_addr, u32 reg_data)
673 {
674 unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
675 void __iomem *pcie_index_offset;
676 void __iomem *pcie_index_hi_offset;
677 void __iomem *pcie_data_offset;
678
679 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
680 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
681 if (adev->nbio.funcs->get_pcie_index_hi_offset)
682 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
683 else
684 pcie_index_hi = 0;
685
686 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
687 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
688 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
689 if (pcie_index_hi != 0)
690 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
691 pcie_index_hi * 4;
692
693 writel(reg_addr, pcie_index_offset);
694 readl(pcie_index_offset);
695 if (pcie_index_hi != 0) {
696 writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
697 readl(pcie_index_hi_offset);
698 }
699 writel(reg_data, pcie_data_offset);
700 readl(pcie_data_offset);
701
702 /* clear the high bits */
703 if (pcie_index_hi != 0) {
704 writel(0, pcie_index_hi_offset);
705 readl(pcie_index_hi_offset);
706 }
707
708 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
709 }
710
711 /**
712 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
713 *
714 * @adev: amdgpu_device pointer
715 * @reg_addr: indirect register offset
716 * @reg_data: indirect register data
717 *
718 */
amdgpu_device_indirect_wreg64(struct amdgpu_device * adev,u32 reg_addr,u64 reg_data)719 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
720 u32 reg_addr, u64 reg_data)
721 {
722 unsigned long flags, pcie_index, pcie_data;
723 void __iomem *pcie_index_offset;
724 void __iomem *pcie_data_offset;
725
726 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
727 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
728
729 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
730 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
731 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
732
733 /* write low 32 bits */
734 writel(reg_addr, pcie_index_offset);
735 readl(pcie_index_offset);
736 writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
737 readl(pcie_data_offset);
738 /* write high 32 bits */
739 writel(reg_addr + 4, pcie_index_offset);
740 readl(pcie_index_offset);
741 writel((u32)(reg_data >> 32), pcie_data_offset);
742 readl(pcie_data_offset);
743 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
744 }
745
746 /**
747 * amdgpu_device_get_rev_id - query device rev_id
748 *
749 * @adev: amdgpu_device pointer
750 *
751 * Return device rev_id
752 */
amdgpu_device_get_rev_id(struct amdgpu_device * adev)753 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
754 {
755 return adev->nbio.funcs->get_rev_id(adev);
756 }
757
758 /**
759 * amdgpu_invalid_rreg - dummy reg read function
760 *
761 * @adev: amdgpu_device pointer
762 * @reg: offset of register
763 *
764 * Dummy register read function. Used for register blocks
765 * that certain asics don't have (all asics).
766 * Returns the value in the register.
767 */
amdgpu_invalid_rreg(struct amdgpu_device * adev,uint32_t reg)768 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
769 {
770 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
771 BUG();
772 return 0;
773 }
774
amdgpu_invalid_rreg_ext(struct amdgpu_device * adev,uint64_t reg)775 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
776 {
777 DRM_ERROR("Invalid callback to read register 0x%llX\n", reg);
778 BUG();
779 return 0;
780 }
781
782 /**
783 * amdgpu_invalid_wreg - dummy reg write function
784 *
785 * @adev: amdgpu_device pointer
786 * @reg: offset of register
787 * @v: value to write to the register
788 *
789 * Dummy register read function. Used for register blocks
790 * that certain asics don't have (all asics).
791 */
amdgpu_invalid_wreg(struct amdgpu_device * adev,uint32_t reg,uint32_t v)792 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
793 {
794 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
795 reg, v);
796 BUG();
797 }
798
amdgpu_invalid_wreg_ext(struct amdgpu_device * adev,uint64_t reg,uint32_t v)799 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
800 {
801 DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n",
802 reg, v);
803 BUG();
804 }
805
806 /**
807 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
808 *
809 * @adev: amdgpu_device pointer
810 * @reg: offset of register
811 *
812 * Dummy register read function. Used for register blocks
813 * that certain asics don't have (all asics).
814 * Returns the value in the register.
815 */
amdgpu_invalid_rreg64(struct amdgpu_device * adev,uint32_t reg)816 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
817 {
818 DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
819 BUG();
820 return 0;
821 }
822
823 /**
824 * amdgpu_invalid_wreg64 - dummy reg write function
825 *
826 * @adev: amdgpu_device pointer
827 * @reg: offset of register
828 * @v: value to write to the register
829 *
830 * Dummy register read function. Used for register blocks
831 * that certain asics don't have (all asics).
832 */
amdgpu_invalid_wreg64(struct amdgpu_device * adev,uint32_t reg,uint64_t v)833 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
834 {
835 DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
836 reg, v);
837 BUG();
838 }
839
840 /**
841 * amdgpu_block_invalid_rreg - dummy reg read function
842 *
843 * @adev: amdgpu_device pointer
844 * @block: offset of instance
845 * @reg: offset of register
846 *
847 * Dummy register read function. Used for register blocks
848 * that certain asics don't have (all asics).
849 * Returns the value in the register.
850 */
amdgpu_block_invalid_rreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg)851 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
852 uint32_t block, uint32_t reg)
853 {
854 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
855 reg, block);
856 BUG();
857 return 0;
858 }
859
860 /**
861 * amdgpu_block_invalid_wreg - dummy reg write function
862 *
863 * @adev: amdgpu_device pointer
864 * @block: offset of instance
865 * @reg: offset of register
866 * @v: value to write to the register
867 *
868 * Dummy register read function. Used for register blocks
869 * that certain asics don't have (all asics).
870 */
amdgpu_block_invalid_wreg(struct amdgpu_device * adev,uint32_t block,uint32_t reg,uint32_t v)871 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
872 uint32_t block,
873 uint32_t reg, uint32_t v)
874 {
875 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
876 reg, block, v);
877 BUG();
878 }
879
880 /**
881 * amdgpu_device_asic_init - Wrapper for atom asic_init
882 *
883 * @adev: amdgpu_device pointer
884 *
885 * Does any asic specific work and then calls atom asic init.
886 */
amdgpu_device_asic_init(struct amdgpu_device * adev)887 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
888 {
889 int ret;
890
891 amdgpu_asic_pre_asic_init(adev);
892
893 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||
894 adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
895 amdgpu_psp_wait_for_bootloader(adev);
896 ret = amdgpu_atomfirmware_asic_init(adev, true);
897 return ret;
898 } else {
899 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
900 }
901
902 return 0;
903 }
904
905 /**
906 * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
907 *
908 * @adev: amdgpu_device pointer
909 *
910 * Allocates a scratch page of VRAM for use by various things in the
911 * driver.
912 */
amdgpu_device_mem_scratch_init(struct amdgpu_device * adev)913 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
914 {
915 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
916 AMDGPU_GEM_DOMAIN_VRAM |
917 AMDGPU_GEM_DOMAIN_GTT,
918 &adev->mem_scratch.robj,
919 &adev->mem_scratch.gpu_addr,
920 (void **)&adev->mem_scratch.ptr);
921 }
922
923 /**
924 * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
925 *
926 * @adev: amdgpu_device pointer
927 *
928 * Frees the VRAM scratch page.
929 */
amdgpu_device_mem_scratch_fini(struct amdgpu_device * adev)930 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
931 {
932 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
933 }
934
935 /**
936 * amdgpu_device_program_register_sequence - program an array of registers.
937 *
938 * @adev: amdgpu_device pointer
939 * @registers: pointer to the register array
940 * @array_size: size of the register array
941 *
942 * Programs an array or registers with and or masks.
943 * This is a helper for setting golden registers.
944 */
amdgpu_device_program_register_sequence(struct amdgpu_device * adev,const u32 * registers,const u32 array_size)945 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
946 const u32 *registers,
947 const u32 array_size)
948 {
949 u32 tmp, reg, and_mask, or_mask;
950 int i;
951
952 if (array_size % 3)
953 return;
954
955 for (i = 0; i < array_size; i += 3) {
956 reg = registers[i + 0];
957 and_mask = registers[i + 1];
958 or_mask = registers[i + 2];
959
960 if (and_mask == 0xffffffff) {
961 tmp = or_mask;
962 } else {
963 tmp = RREG32(reg);
964 tmp &= ~and_mask;
965 if (adev->family >= AMDGPU_FAMILY_AI)
966 tmp |= (or_mask & and_mask);
967 else
968 tmp |= or_mask;
969 }
970 WREG32(reg, tmp);
971 }
972 }
973
974 /**
975 * amdgpu_device_pci_config_reset - reset the GPU
976 *
977 * @adev: amdgpu_device pointer
978 *
979 * Resets the GPU using the pci config reset sequence.
980 * Only applicable to asics prior to vega10.
981 */
amdgpu_device_pci_config_reset(struct amdgpu_device * adev)982 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
983 {
984 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
985 }
986
987 /**
988 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
989 *
990 * @adev: amdgpu_device pointer
991 *
992 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
993 */
amdgpu_device_pci_reset(struct amdgpu_device * adev)994 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
995 {
996 return pci_reset_function(adev->pdev);
997 }
998
999 /*
1000 * amdgpu_device_wb_*()
1001 * Writeback is the method by which the GPU updates special pages in memory
1002 * with the status of certain GPU events (fences, ring pointers,etc.).
1003 */
1004
1005 /**
1006 * amdgpu_device_wb_fini - Disable Writeback and free memory
1007 *
1008 * @adev: amdgpu_device pointer
1009 *
1010 * Disables Writeback and frees the Writeback memory (all asics).
1011 * Used at driver shutdown.
1012 */
amdgpu_device_wb_fini(struct amdgpu_device * adev)1013 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1014 {
1015 if (adev->wb.wb_obj) {
1016 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1017 &adev->wb.gpu_addr,
1018 (void **)&adev->wb.wb);
1019 adev->wb.wb_obj = NULL;
1020 }
1021 }
1022
1023 /**
1024 * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
1025 *
1026 * @adev: amdgpu_device pointer
1027 *
1028 * Initializes writeback and allocates writeback memory (all asics).
1029 * Used at driver startup.
1030 * Returns 0 on success or an -error on failure.
1031 */
amdgpu_device_wb_init(struct amdgpu_device * adev)1032 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1033 {
1034 int r;
1035
1036 if (adev->wb.wb_obj == NULL) {
1037 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
1038 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1039 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
1040 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1041 (void **)&adev->wb.wb);
1042 if (r) {
1043 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1044 return r;
1045 }
1046
1047 adev->wb.num_wb = AMDGPU_MAX_WB;
1048 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1049
1050 /* clear wb memory */
1051 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1052 }
1053
1054 return 0;
1055 }
1056
1057 /**
1058 * amdgpu_device_wb_get - Allocate a wb entry
1059 *
1060 * @adev: amdgpu_device pointer
1061 * @wb: wb index
1062 *
1063 * Allocate a wb slot for use by the driver (all asics).
1064 * Returns 0 on success or -EINVAL on failure.
1065 */
amdgpu_device_wb_get(struct amdgpu_device * adev,u32 * wb)1066 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1067 {
1068 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1069
1070 if (offset < adev->wb.num_wb) {
1071 __set_bit(offset, adev->wb.used);
1072 *wb = offset << 3; /* convert to dw offset */
1073 return 0;
1074 } else {
1075 return -EINVAL;
1076 }
1077 }
1078
1079 /**
1080 * amdgpu_device_wb_free - Free a wb entry
1081 *
1082 * @adev: amdgpu_device pointer
1083 * @wb: wb index
1084 *
1085 * Free a wb slot allocated for use by the driver (all asics)
1086 */
amdgpu_device_wb_free(struct amdgpu_device * adev,u32 wb)1087 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1088 {
1089 wb >>= 3;
1090 if (wb < adev->wb.num_wb)
1091 __clear_bit(wb, adev->wb.used);
1092 }
1093
1094 /**
1095 * amdgpu_device_resize_fb_bar - try to resize FB BAR
1096 *
1097 * @adev: amdgpu_device pointer
1098 *
1099 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
1100 * to fail, but if any of the BARs is not accessible after the size we abort
1101 * driver loading by returning -ENODEV.
1102 */
amdgpu_device_resize_fb_bar(struct amdgpu_device * adev)1103 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1104 {
1105 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1106 struct pci_bus *root;
1107 struct resource *res;
1108 unsigned int i;
1109 u16 cmd;
1110 int r;
1111
1112 if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
1113 return 0;
1114
1115 /* Bypass for VF */
1116 if (amdgpu_sriov_vf(adev))
1117 return 0;
1118
1119 /* skip if the bios has already enabled large BAR */
1120 if (adev->gmc.real_vram_size &&
1121 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1122 return 0;
1123
1124 /* Check if the root BUS has 64bit memory resources */
1125 root = adev->pdev->bus;
1126 while (root->parent)
1127 root = root->parent;
1128
1129 pci_bus_for_each_resource(root, res, i) {
1130 if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1131 res->start > 0x100000000ull)
1132 break;
1133 }
1134
1135 /* Trying to resize is pointless without a root hub window above 4GB */
1136 if (!res)
1137 return 0;
1138
1139 /* Limit the BAR size to what is available */
1140 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1141 rbar_size);
1142
1143 /* Disable memory decoding while we change the BAR addresses and size */
1144 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1145 pci_write_config_word(adev->pdev, PCI_COMMAND,
1146 cmd & ~PCI_COMMAND_MEMORY);
1147
1148 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
1149 amdgpu_doorbell_fini(adev);
1150 if (adev->asic_type >= CHIP_BONAIRE)
1151 pci_release_resource(adev->pdev, 2);
1152
1153 pci_release_resource(adev->pdev, 0);
1154
1155 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1156 if (r == -ENOSPC)
1157 DRM_INFO("Not enough PCI address space for a large BAR.");
1158 else if (r && r != -ENOTSUPP)
1159 DRM_ERROR("Problem resizing BAR0 (%d).", r);
1160
1161 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1162
1163 /* When the doorbell or fb BAR isn't available we have no chance of
1164 * using the device.
1165 */
1166 r = amdgpu_doorbell_init(adev);
1167 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1168 return -ENODEV;
1169
1170 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1171
1172 return 0;
1173 }
1174
amdgpu_device_read_bios(struct amdgpu_device * adev)1175 static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1176 {
1177 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1178 return false;
1179
1180 return true;
1181 }
1182
1183 /*
1184 * GPU helpers function.
1185 */
1186 /**
1187 * amdgpu_device_need_post - check if the hw need post or not
1188 *
1189 * @adev: amdgpu_device pointer
1190 *
1191 * Check if the asic has been initialized (all asics) at driver startup
1192 * or post is needed if hw reset is performed.
1193 * Returns true if need or false if not.
1194 */
amdgpu_device_need_post(struct amdgpu_device * adev)1195 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1196 {
1197 uint32_t reg;
1198
1199 if (amdgpu_sriov_vf(adev))
1200 return false;
1201
1202 if (!amdgpu_device_read_bios(adev))
1203 return false;
1204
1205 if (amdgpu_passthrough(adev)) {
1206 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
1207 * some old smc fw still need driver do vPost otherwise gpu hang, while
1208 * those smc fw version above 22.15 doesn't have this flaw, so we force
1209 * vpost executed for smc version below 22.15
1210 */
1211 if (adev->asic_type == CHIP_FIJI) {
1212 int err;
1213 uint32_t fw_ver;
1214
1215 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1216 /* force vPost if error occured */
1217 if (err)
1218 return true;
1219
1220 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1221 release_firmware(adev->pm.fw);
1222 if (fw_ver < 0x00160e00)
1223 return true;
1224 }
1225 }
1226
1227 /* Don't post if we need to reset whole hive on init */
1228 if (adev->gmc.xgmi.pending_reset)
1229 return false;
1230
1231 if (adev->has_hw_reset) {
1232 adev->has_hw_reset = false;
1233 return true;
1234 }
1235
1236 /* bios scratch used on CIK+ */
1237 if (adev->asic_type >= CHIP_BONAIRE)
1238 return amdgpu_atombios_scratch_need_asic_init(adev);
1239
1240 /* check MEM_SIZE for older asics */
1241 reg = amdgpu_asic_get_config_memsize(adev);
1242
1243 if ((reg != 0) && (reg != 0xffffffff))
1244 return false;
1245
1246 return true;
1247 }
1248
1249 /*
1250 * Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
1251 * speed switching. Until we have confirmation from Intel that a specific host
1252 * supports it, it's safer that we keep it disabled for all.
1253 *
1254 * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
1255 * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
1256 */
amdgpu_device_pcie_dynamic_switching_supported(void)1257 bool amdgpu_device_pcie_dynamic_switching_supported(void)
1258 {
1259 #if IS_ENABLED(CONFIG_X86)
1260 struct cpuinfo_x86 *c = &cpu_data(0);
1261
1262 if (c->x86_vendor == X86_VENDOR_INTEL)
1263 return false;
1264 #endif
1265 return true;
1266 }
1267
1268 /**
1269 * amdgpu_device_should_use_aspm - check if the device should program ASPM
1270 *
1271 * @adev: amdgpu_device pointer
1272 *
1273 * Confirm whether the module parameter and pcie bridge agree that ASPM should
1274 * be set for this device.
1275 *
1276 * Returns true if it should be used or false if not.
1277 */
amdgpu_device_should_use_aspm(struct amdgpu_device * adev)1278 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1279 {
1280 switch (amdgpu_aspm) {
1281 case -1:
1282 break;
1283 case 0:
1284 return false;
1285 case 1:
1286 return true;
1287 default:
1288 return false;
1289 }
1290 return pcie_aspm_enabled(adev->pdev);
1291 }
1292
amdgpu_device_aspm_support_quirk(void)1293 bool amdgpu_device_aspm_support_quirk(void)
1294 {
1295 #if IS_ENABLED(CONFIG_X86)
1296 struct cpuinfo_x86 *c = &cpu_data(0);
1297
1298 return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
1299 #else
1300 return true;
1301 #endif
1302 }
1303
1304 /* if we get transitioned to only one device, take VGA back */
1305 /**
1306 * amdgpu_device_vga_set_decode - enable/disable vga decode
1307 *
1308 * @pdev: PCI device pointer
1309 * @state: enable/disable vga decode
1310 *
1311 * Enable/disable vga decode (all asics).
1312 * Returns VGA resource flags.
1313 */
amdgpu_device_vga_set_decode(struct pci_dev * pdev,bool state)1314 static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
1315 bool state)
1316 {
1317 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1318
1319 amdgpu_asic_set_vga_state(adev, state);
1320 if (state)
1321 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1322 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1323 else
1324 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1325 }
1326
1327 /**
1328 * amdgpu_device_check_block_size - validate the vm block size
1329 *
1330 * @adev: amdgpu_device pointer
1331 *
1332 * Validates the vm block size specified via module parameter.
1333 * The vm block size defines number of bits in page table versus page directory,
1334 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1335 * page table and the remaining bits are in the page directory.
1336 */
amdgpu_device_check_block_size(struct amdgpu_device * adev)1337 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1338 {
1339 /* defines number of bits in page table versus page directory,
1340 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1341 * page table and the remaining bits are in the page directory
1342 */
1343 if (amdgpu_vm_block_size == -1)
1344 return;
1345
1346 if (amdgpu_vm_block_size < 9) {
1347 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1348 amdgpu_vm_block_size);
1349 amdgpu_vm_block_size = -1;
1350 }
1351 }
1352
1353 /**
1354 * amdgpu_device_check_vm_size - validate the vm size
1355 *
1356 * @adev: amdgpu_device pointer
1357 *
1358 * Validates the vm size in GB specified via module parameter.
1359 * The VM size is the size of the GPU virtual memory space in GB.
1360 */
amdgpu_device_check_vm_size(struct amdgpu_device * adev)1361 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1362 {
1363 /* no need to check the default value */
1364 if (amdgpu_vm_size == -1)
1365 return;
1366
1367 if (amdgpu_vm_size < 1) {
1368 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1369 amdgpu_vm_size);
1370 amdgpu_vm_size = -1;
1371 }
1372 }
1373
amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device * adev)1374 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1375 {
1376 struct sysinfo si;
1377 bool is_os_64 = (sizeof(void *) == 8);
1378 uint64_t total_memory;
1379 uint64_t dram_size_seven_GB = 0x1B8000000;
1380 uint64_t dram_size_three_GB = 0xB8000000;
1381
1382 if (amdgpu_smu_memory_pool_size == 0)
1383 return;
1384
1385 if (!is_os_64) {
1386 DRM_WARN("Not 64-bit OS, feature not supported\n");
1387 goto def_value;
1388 }
1389 si_meminfo(&si);
1390 total_memory = (uint64_t)si.totalram * si.mem_unit;
1391
1392 if ((amdgpu_smu_memory_pool_size == 1) ||
1393 (amdgpu_smu_memory_pool_size == 2)) {
1394 if (total_memory < dram_size_three_GB)
1395 goto def_value1;
1396 } else if ((amdgpu_smu_memory_pool_size == 4) ||
1397 (amdgpu_smu_memory_pool_size == 8)) {
1398 if (total_memory < dram_size_seven_GB)
1399 goto def_value1;
1400 } else {
1401 DRM_WARN("Smu memory pool size not supported\n");
1402 goto def_value;
1403 }
1404 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1405
1406 return;
1407
1408 def_value1:
1409 DRM_WARN("No enough system memory\n");
1410 def_value:
1411 adev->pm.smu_prv_buffer_size = 0;
1412 }
1413
amdgpu_device_init_apu_flags(struct amdgpu_device * adev)1414 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1415 {
1416 if (!(adev->flags & AMD_IS_APU) ||
1417 adev->asic_type < CHIP_RAVEN)
1418 return 0;
1419
1420 switch (adev->asic_type) {
1421 case CHIP_RAVEN:
1422 if (adev->pdev->device == 0x15dd)
1423 adev->apu_flags |= AMD_APU_IS_RAVEN;
1424 if (adev->pdev->device == 0x15d8)
1425 adev->apu_flags |= AMD_APU_IS_PICASSO;
1426 break;
1427 case CHIP_RENOIR:
1428 if ((adev->pdev->device == 0x1636) ||
1429 (adev->pdev->device == 0x164c))
1430 adev->apu_flags |= AMD_APU_IS_RENOIR;
1431 else
1432 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1433 break;
1434 case CHIP_VANGOGH:
1435 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1436 break;
1437 case CHIP_YELLOW_CARP:
1438 break;
1439 case CHIP_CYAN_SKILLFISH:
1440 if ((adev->pdev->device == 0x13FE) ||
1441 (adev->pdev->device == 0x143F))
1442 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1443 break;
1444 default:
1445 break;
1446 }
1447
1448 return 0;
1449 }
1450
1451 /**
1452 * amdgpu_device_check_arguments - validate module params
1453 *
1454 * @adev: amdgpu_device pointer
1455 *
1456 * Validates certain module parameters and updates
1457 * the associated values used by the driver (all asics).
1458 */
amdgpu_device_check_arguments(struct amdgpu_device * adev)1459 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1460 {
1461 if (amdgpu_sched_jobs < 4) {
1462 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1463 amdgpu_sched_jobs);
1464 amdgpu_sched_jobs = 4;
1465 } else if (!is_power_of_2(amdgpu_sched_jobs)) {
1466 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1467 amdgpu_sched_jobs);
1468 amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
1469 }
1470
1471 if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1472 /* gart size must be greater or equal to 32M */
1473 dev_warn(adev->dev, "gart size (%d) too small\n",
1474 amdgpu_gart_size);
1475 amdgpu_gart_size = -1;
1476 }
1477
1478 if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1479 /* gtt size must be greater or equal to 32M */
1480 dev_warn(adev->dev, "gtt size (%d) too small\n",
1481 amdgpu_gtt_size);
1482 amdgpu_gtt_size = -1;
1483 }
1484
1485 /* valid range is between 4 and 9 inclusive */
1486 if (amdgpu_vm_fragment_size != -1 &&
1487 (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
1488 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1489 amdgpu_vm_fragment_size = -1;
1490 }
1491
1492 if (amdgpu_sched_hw_submission < 2) {
1493 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1494 amdgpu_sched_hw_submission);
1495 amdgpu_sched_hw_submission = 2;
1496 } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
1497 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1498 amdgpu_sched_hw_submission);
1499 amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
1500 }
1501
1502 if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
1503 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1504 amdgpu_reset_method = -1;
1505 }
1506
1507 amdgpu_device_check_smu_prv_buffer_size(adev);
1508
1509 amdgpu_device_check_vm_size(adev);
1510
1511 amdgpu_device_check_block_size(adev);
1512
1513 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1514
1515 return 0;
1516 }
1517
1518 /**
1519 * amdgpu_switcheroo_set_state - set switcheroo state
1520 *
1521 * @pdev: pci dev pointer
1522 * @state: vga_switcheroo state
1523 *
1524 * Callback for the switcheroo driver. Suspends or resumes
1525 * the asics before or after it is powered up using ACPI methods.
1526 */
amdgpu_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)1527 static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
1528 enum vga_switcheroo_state state)
1529 {
1530 struct drm_device *dev = pci_get_drvdata(pdev);
1531 int r;
1532
1533 if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
1534 return;
1535
1536 if (state == VGA_SWITCHEROO_ON) {
1537 pr_info("switched on\n");
1538 /* don't suspend or resume card normally */
1539 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1540
1541 pci_set_power_state(pdev, PCI_D0);
1542 amdgpu_device_load_pci_state(pdev);
1543 r = pci_enable_device(pdev);
1544 if (r)
1545 DRM_WARN("pci_enable_device failed (%d)\n", r);
1546 amdgpu_device_resume(dev, true);
1547
1548 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1549 } else {
1550 pr_info("switched off\n");
1551 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1552 amdgpu_device_suspend(dev, true);
1553 amdgpu_device_cache_pci_state(pdev);
1554 /* Shut down the device */
1555 pci_disable_device(pdev);
1556 pci_set_power_state(pdev, PCI_D3cold);
1557 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1558 }
1559 }
1560
1561 /**
1562 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
1563 *
1564 * @pdev: pci dev pointer
1565 *
1566 * Callback for the switcheroo driver. Check of the switcheroo
1567 * state can be changed.
1568 * Returns true if the state can be changed, false if not.
1569 */
amdgpu_switcheroo_can_switch(struct pci_dev * pdev)1570 static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
1571 {
1572 struct drm_device *dev = pci_get_drvdata(pdev);
1573
1574 /*
1575 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1576 * locking inversion with the driver load path. And the access here is
1577 * completely racy anyway. So don't bother with locking for now.
1578 */
1579 return atomic_read(&dev->open_count) == 0;
1580 }
1581
1582 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
1583 .set_gpu_state = amdgpu_switcheroo_set_state,
1584 .reprobe = NULL,
1585 .can_switch = amdgpu_switcheroo_can_switch,
1586 };
1587
1588 /**
1589 * amdgpu_device_ip_set_clockgating_state - set the CG state
1590 *
1591 * @dev: amdgpu_device pointer
1592 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1593 * @state: clockgating state (gate or ungate)
1594 *
1595 * Sets the requested clockgating state for all instances of
1596 * the hardware IP specified.
1597 * Returns the error code from the last instance.
1598 */
amdgpu_device_ip_set_clockgating_state(void * dev,enum amd_ip_block_type block_type,enum amd_clockgating_state state)1599 int amdgpu_device_ip_set_clockgating_state(void *dev,
1600 enum amd_ip_block_type block_type,
1601 enum amd_clockgating_state state)
1602 {
1603 struct amdgpu_device *adev = dev;
1604 int i, r = 0;
1605
1606 for (i = 0; i < adev->num_ip_blocks; i++) {
1607 if (!adev->ip_blocks[i].status.valid)
1608 continue;
1609 if (adev->ip_blocks[i].version->type != block_type)
1610 continue;
1611 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1612 continue;
1613 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1614 (void *)adev, state);
1615 if (r)
1616 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
1617 adev->ip_blocks[i].version->funcs->name, r);
1618 }
1619 return r;
1620 }
1621
1622 /**
1623 * amdgpu_device_ip_set_powergating_state - set the PG state
1624 *
1625 * @dev: amdgpu_device pointer
1626 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1627 * @state: powergating state (gate or ungate)
1628 *
1629 * Sets the requested powergating state for all instances of
1630 * the hardware IP specified.
1631 * Returns the error code from the last instance.
1632 */
amdgpu_device_ip_set_powergating_state(void * dev,enum amd_ip_block_type block_type,enum amd_powergating_state state)1633 int amdgpu_device_ip_set_powergating_state(void *dev,
1634 enum amd_ip_block_type block_type,
1635 enum amd_powergating_state state)
1636 {
1637 struct amdgpu_device *adev = dev;
1638 int i, r = 0;
1639
1640 for (i = 0; i < adev->num_ip_blocks; i++) {
1641 if (!adev->ip_blocks[i].status.valid)
1642 continue;
1643 if (adev->ip_blocks[i].version->type != block_type)
1644 continue;
1645 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1646 continue;
1647 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1648 (void *)adev, state);
1649 if (r)
1650 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
1651 adev->ip_blocks[i].version->funcs->name, r);
1652 }
1653 return r;
1654 }
1655
1656 /**
1657 * amdgpu_device_ip_get_clockgating_state - get the CG state
1658 *
1659 * @adev: amdgpu_device pointer
1660 * @flags: clockgating feature flags
1661 *
1662 * Walks the list of IPs on the device and updates the clockgating
1663 * flags for each IP.
1664 * Updates @flags with the feature flags for each hardware IP where
1665 * clockgating is enabled.
1666 */
amdgpu_device_ip_get_clockgating_state(struct amdgpu_device * adev,u64 * flags)1667 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1668 u64 *flags)
1669 {
1670 int i;
1671
1672 for (i = 0; i < adev->num_ip_blocks; i++) {
1673 if (!adev->ip_blocks[i].status.valid)
1674 continue;
1675 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1676 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1677 }
1678 }
1679
1680 /**
1681 * amdgpu_device_ip_wait_for_idle - wait for idle
1682 *
1683 * @adev: amdgpu_device pointer
1684 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1685 *
1686 * Waits for the request hardware IP to be idle.
1687 * Returns 0 for success or a negative error code on failure.
1688 */
amdgpu_device_ip_wait_for_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)1689 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1690 enum amd_ip_block_type block_type)
1691 {
1692 int i, r;
1693
1694 for (i = 0; i < adev->num_ip_blocks; i++) {
1695 if (!adev->ip_blocks[i].status.valid)
1696 continue;
1697 if (adev->ip_blocks[i].version->type == block_type) {
1698 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1699 if (r)
1700 return r;
1701 break;
1702 }
1703 }
1704 return 0;
1705
1706 }
1707
1708 /**
1709 * amdgpu_device_ip_is_idle - is the hardware IP idle
1710 *
1711 * @adev: amdgpu_device pointer
1712 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
1713 *
1714 * Check if the hardware IP is idle or not.
1715 * Returns true if it the IP is idle, false if not.
1716 */
amdgpu_device_ip_is_idle(struct amdgpu_device * adev,enum amd_ip_block_type block_type)1717 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1718 enum amd_ip_block_type block_type)
1719 {
1720 int i;
1721
1722 for (i = 0; i < adev->num_ip_blocks; i++) {
1723 if (!adev->ip_blocks[i].status.valid)
1724 continue;
1725 if (adev->ip_blocks[i].version->type == block_type)
1726 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1727 }
1728 return true;
1729
1730 }
1731
1732 /**
1733 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
1734 *
1735 * @adev: amdgpu_device pointer
1736 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1737 *
1738 * Returns a pointer to the hardware IP block structure
1739 * if it exists for the asic, otherwise NULL.
1740 */
1741 struct amdgpu_ip_block *
amdgpu_device_ip_get_ip_block(struct amdgpu_device * adev,enum amd_ip_block_type type)1742 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1743 enum amd_ip_block_type type)
1744 {
1745 int i;
1746
1747 for (i = 0; i < adev->num_ip_blocks; i++)
1748 if (adev->ip_blocks[i].version->type == type)
1749 return &adev->ip_blocks[i];
1750
1751 return NULL;
1752 }
1753
1754 /**
1755 * amdgpu_device_ip_block_version_cmp
1756 *
1757 * @adev: amdgpu_device pointer
1758 * @type: enum amd_ip_block_type
1759 * @major: major version
1760 * @minor: minor version
1761 *
1762 * return 0 if equal or greater
1763 * return 1 if smaller or the ip_block doesn't exist
1764 */
amdgpu_device_ip_block_version_cmp(struct amdgpu_device * adev,enum amd_ip_block_type type,u32 major,u32 minor)1765 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1766 enum amd_ip_block_type type,
1767 u32 major, u32 minor)
1768 {
1769 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1770
1771 if (ip_block && ((ip_block->version->major > major) ||
1772 ((ip_block->version->major == major) &&
1773 (ip_block->version->minor >= minor))))
1774 return 0;
1775
1776 return 1;
1777 }
1778
1779 /**
1780 * amdgpu_device_ip_block_add
1781 *
1782 * @adev: amdgpu_device pointer
1783 * @ip_block_version: pointer to the IP to add
1784 *
1785 * Adds the IP block driver information to the collection of IPs
1786 * on the asic.
1787 */
amdgpu_device_ip_block_add(struct amdgpu_device * adev,const struct amdgpu_ip_block_version * ip_block_version)1788 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1789 const struct amdgpu_ip_block_version *ip_block_version)
1790 {
1791 if (!ip_block_version)
1792 return -EINVAL;
1793
1794 switch (ip_block_version->type) {
1795 case AMD_IP_BLOCK_TYPE_VCN:
1796 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1797 return 0;
1798 break;
1799 case AMD_IP_BLOCK_TYPE_JPEG:
1800 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1801 return 0;
1802 break;
1803 default:
1804 break;
1805 }
1806
1807 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1808 ip_block_version->funcs->name);
1809
1810 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1811
1812 return 0;
1813 }
1814
1815 /**
1816 * amdgpu_device_enable_virtual_display - enable virtual display feature
1817 *
1818 * @adev: amdgpu_device pointer
1819 *
1820 * Enabled the virtual display feature if the user has enabled it via
1821 * the module parameter virtual_display. This feature provides a virtual
1822 * display hardware on headless boards or in virtualized environments.
1823 * This function parses and validates the configuration string specified by
1824 * the user and configues the virtual display configuration (number of
1825 * virtual connectors, crtcs, etc.) specified.
1826 */
amdgpu_device_enable_virtual_display(struct amdgpu_device * adev)1827 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1828 {
1829 adev->enable_virtual_display = false;
1830
1831 if (amdgpu_virtual_display) {
1832 const char *pci_address_name = pci_name(adev->pdev);
1833 char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1834
1835 pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
1836 pciaddstr_tmp = pciaddstr;
1837 while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
1838 pciaddname = strsep(&pciaddname_tmp, ",");
1839 if (!strcmp("all", pciaddname)
1840 || !strcmp(pci_address_name, pciaddname)) {
1841 long num_crtc;
1842 int res = -1;
1843
1844 adev->enable_virtual_display = true;
1845
1846 if (pciaddname_tmp)
1847 res = kstrtol(pciaddname_tmp, 10,
1848 &num_crtc);
1849
1850 if (!res) {
1851 if (num_crtc < 1)
1852 num_crtc = 1;
1853 if (num_crtc > 6)
1854 num_crtc = 6;
1855 adev->mode_info.num_crtc = num_crtc;
1856 } else {
1857 adev->mode_info.num_crtc = 1;
1858 }
1859 break;
1860 }
1861 }
1862
1863 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1864 amdgpu_virtual_display, pci_address_name,
1865 adev->enable_virtual_display, adev->mode_info.num_crtc);
1866
1867 kfree(pciaddstr);
1868 }
1869 }
1870
amdgpu_device_set_sriov_virtual_display(struct amdgpu_device * adev)1871 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1872 {
1873 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1874 adev->mode_info.num_crtc = 1;
1875 adev->enable_virtual_display = true;
1876 DRM_INFO("virtual_display:%d, num_crtc:%d\n",
1877 adev->enable_virtual_display, adev->mode_info.num_crtc);
1878 }
1879 }
1880
1881 /**
1882 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
1883 *
1884 * @adev: amdgpu_device pointer
1885 *
1886 * Parses the asic configuration parameters specified in the gpu info
1887 * firmware and makes them availale to the driver for use in configuring
1888 * the asic.
1889 * Returns 0 on success, -EINVAL on failure.
1890 */
amdgpu_device_parse_gpu_info_fw(struct amdgpu_device * adev)1891 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1892 {
1893 const char *chip_name;
1894 char fw_name[40];
1895 int err;
1896 const struct gpu_info_firmware_header_v1_0 *hdr;
1897
1898 adev->firmware.gpu_info_fw = NULL;
1899
1900 if (adev->mman.discovery_bin)
1901 return 0;
1902
1903 switch (adev->asic_type) {
1904 default:
1905 return 0;
1906 case CHIP_VEGA10:
1907 chip_name = "vega10";
1908 break;
1909 case CHIP_VEGA12:
1910 chip_name = "vega12";
1911 break;
1912 case CHIP_RAVEN:
1913 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1914 chip_name = "raven2";
1915 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1916 chip_name = "picasso";
1917 else
1918 chip_name = "raven";
1919 break;
1920 case CHIP_ARCTURUS:
1921 chip_name = "arcturus";
1922 break;
1923 case CHIP_NAVI12:
1924 chip_name = "navi12";
1925 break;
1926 }
1927
1928 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1929 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
1930 if (err) {
1931 dev_err(adev->dev,
1932 "Failed to get gpu_info firmware \"%s\"\n",
1933 fw_name);
1934 goto out;
1935 }
1936
1937 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1938 amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
1939
1940 switch (hdr->version_major) {
1941 case 1:
1942 {
1943 const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1944 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1945 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1946
1947 /*
1948 * Should be droped when DAL no longer needs it.
1949 */
1950 if (adev->asic_type == CHIP_NAVI12)
1951 goto parse_soc_bounding_box;
1952
1953 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1954 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1955 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1956 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1957 adev->gfx.config.max_texture_channel_caches =
1958 le32_to_cpu(gpu_info_fw->gc_num_tccs);
1959 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1960 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1961 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1962 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1963 adev->gfx.config.double_offchip_lds_buf =
1964 le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
1965 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1966 adev->gfx.cu_info.max_waves_per_simd =
1967 le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
1968 adev->gfx.cu_info.max_scratch_slots_per_cu =
1969 le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
1970 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1971 if (hdr->version_minor >= 1) {
1972 const struct gpu_info_firmware_v1_1 *gpu_info_fw =
1973 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
1974 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1975 adev->gfx.config.num_sc_per_sh =
1976 le32_to_cpu(gpu_info_fw->num_sc_per_sh);
1977 adev->gfx.config.num_packer_per_sc =
1978 le32_to_cpu(gpu_info_fw->num_packer_per_sc);
1979 }
1980
1981 parse_soc_bounding_box:
1982 /*
1983 * soc bounding box info is not integrated in disocovery table,
1984 * we always need to parse it from gpu info firmware if needed.
1985 */
1986 if (hdr->version_minor == 2) {
1987 const struct gpu_info_firmware_v1_2 *gpu_info_fw =
1988 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
1989 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1990 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
1991 }
1992 break;
1993 }
1994 default:
1995 dev_err(adev->dev,
1996 "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
1997 err = -EINVAL;
1998 goto out;
1999 }
2000 out:
2001 return err;
2002 }
2003
2004 /**
2005 * amdgpu_device_ip_early_init - run early init for hardware IPs
2006 *
2007 * @adev: amdgpu_device pointer
2008 *
2009 * Early initialization pass for hardware IPs. The hardware IPs that make
2010 * up each asic are discovered each IP's early_init callback is run. This
2011 * is the first stage in initializing the asic.
2012 * Returns 0 on success, negative error code on failure.
2013 */
amdgpu_device_ip_early_init(struct amdgpu_device * adev)2014 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2015 {
2016 struct pci_dev *parent;
2017 int i, r;
2018 bool total;
2019
2020 amdgpu_device_enable_virtual_display(adev);
2021
2022 if (amdgpu_sriov_vf(adev)) {
2023 r = amdgpu_virt_request_full_gpu(adev, true);
2024 if (r)
2025 return r;
2026 }
2027
2028 switch (adev->asic_type) {
2029 #ifdef CONFIG_DRM_AMDGPU_SI
2030 case CHIP_VERDE:
2031 case CHIP_TAHITI:
2032 case CHIP_PITCAIRN:
2033 case CHIP_OLAND:
2034 case CHIP_HAINAN:
2035 adev->family = AMDGPU_FAMILY_SI;
2036 r = si_set_ip_blocks(adev);
2037 if (r)
2038 return r;
2039 break;
2040 #endif
2041 #ifdef CONFIG_DRM_AMDGPU_CIK
2042 case CHIP_BONAIRE:
2043 case CHIP_HAWAII:
2044 case CHIP_KAVERI:
2045 case CHIP_KABINI:
2046 case CHIP_MULLINS:
2047 if (adev->flags & AMD_IS_APU)
2048 adev->family = AMDGPU_FAMILY_KV;
2049 else
2050 adev->family = AMDGPU_FAMILY_CI;
2051
2052 r = cik_set_ip_blocks(adev);
2053 if (r)
2054 return r;
2055 break;
2056 #endif
2057 case CHIP_TOPAZ:
2058 case CHIP_TONGA:
2059 case CHIP_FIJI:
2060 case CHIP_POLARIS10:
2061 case CHIP_POLARIS11:
2062 case CHIP_POLARIS12:
2063 case CHIP_VEGAM:
2064 case CHIP_CARRIZO:
2065 case CHIP_STONEY:
2066 if (adev->flags & AMD_IS_APU)
2067 adev->family = AMDGPU_FAMILY_CZ;
2068 else
2069 adev->family = AMDGPU_FAMILY_VI;
2070
2071 r = vi_set_ip_blocks(adev);
2072 if (r)
2073 return r;
2074 break;
2075 default:
2076 r = amdgpu_discovery_set_ip_blocks(adev);
2077 if (r)
2078 return r;
2079 break;
2080 }
2081
2082 if (amdgpu_has_atpx() &&
2083 (amdgpu_is_atpx_hybrid() ||
2084 amdgpu_has_atpx_dgpu_power_cntl()) &&
2085 ((adev->flags & AMD_IS_APU) == 0) &&
2086 !dev_is_removable(&adev->pdev->dev))
2087 adev->flags |= AMD_IS_PX;
2088
2089 if (!(adev->flags & AMD_IS_APU)) {
2090 parent = pcie_find_root_port(adev->pdev);
2091 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2092 }
2093
2094
2095 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2096 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2097 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2098 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2099 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2100 if (!amdgpu_device_pcie_dynamic_switching_supported())
2101 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2102
2103 total = true;
2104 for (i = 0; i < adev->num_ip_blocks; i++) {
2105 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2106 DRM_WARN("disabled ip block: %d <%s>\n",
2107 i, adev->ip_blocks[i].version->funcs->name);
2108 adev->ip_blocks[i].status.valid = false;
2109 } else {
2110 if (adev->ip_blocks[i].version->funcs->early_init) {
2111 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2112 if (r == -ENOENT) {
2113 adev->ip_blocks[i].status.valid = false;
2114 } else if (r) {
2115 DRM_ERROR("early_init of IP block <%s> failed %d\n",
2116 adev->ip_blocks[i].version->funcs->name, r);
2117 total = false;
2118 } else {
2119 adev->ip_blocks[i].status.valid = true;
2120 }
2121 } else {
2122 adev->ip_blocks[i].status.valid = true;
2123 }
2124 }
2125 /* get the vbios after the asic_funcs are set up */
2126 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2127 r = amdgpu_device_parse_gpu_info_fw(adev);
2128 if (r)
2129 return r;
2130
2131 /* Read BIOS */
2132 if (amdgpu_device_read_bios(adev)) {
2133 if (!amdgpu_get_bios(adev))
2134 return -EINVAL;
2135
2136 r = amdgpu_atombios_init(adev);
2137 if (r) {
2138 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2139 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2140 return r;
2141 }
2142 }
2143
2144 /*get pf2vf msg info at it's earliest time*/
2145 if (amdgpu_sriov_vf(adev))
2146 amdgpu_virt_init_data_exchange(adev);
2147
2148 }
2149 }
2150 if (!total)
2151 return -ENODEV;
2152
2153 amdgpu_amdkfd_device_probe(adev);
2154 adev->cg_flags &= amdgpu_cg_mask;
2155 adev->pg_flags &= amdgpu_pg_mask;
2156
2157 return 0;
2158 }
2159
amdgpu_device_ip_hw_init_phase1(struct amdgpu_device * adev)2160 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2161 {
2162 int i, r;
2163
2164 for (i = 0; i < adev->num_ip_blocks; i++) {
2165 if (!adev->ip_blocks[i].status.sw)
2166 continue;
2167 if (adev->ip_blocks[i].status.hw)
2168 continue;
2169 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2170 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2171 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2172 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2173 if (r) {
2174 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2175 adev->ip_blocks[i].version->funcs->name, r);
2176 return r;
2177 }
2178 adev->ip_blocks[i].status.hw = true;
2179 }
2180 }
2181
2182 return 0;
2183 }
2184
amdgpu_device_ip_hw_init_phase2(struct amdgpu_device * adev)2185 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2186 {
2187 int i, r;
2188
2189 for (i = 0; i < adev->num_ip_blocks; i++) {
2190 if (!adev->ip_blocks[i].status.sw)
2191 continue;
2192 if (adev->ip_blocks[i].status.hw)
2193 continue;
2194 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2195 if (r) {
2196 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2197 adev->ip_blocks[i].version->funcs->name, r);
2198 return r;
2199 }
2200 adev->ip_blocks[i].status.hw = true;
2201 }
2202
2203 return 0;
2204 }
2205
amdgpu_device_fw_loading(struct amdgpu_device * adev)2206 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2207 {
2208 int r = 0;
2209 int i;
2210 uint32_t smu_version;
2211
2212 if (adev->asic_type >= CHIP_VEGA10) {
2213 for (i = 0; i < adev->num_ip_blocks; i++) {
2214 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2215 continue;
2216
2217 if (!adev->ip_blocks[i].status.sw)
2218 continue;
2219
2220 /* no need to do the fw loading again if already done*/
2221 if (adev->ip_blocks[i].status.hw == true)
2222 break;
2223
2224 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2225 r = adev->ip_blocks[i].version->funcs->resume(adev);
2226 if (r) {
2227 DRM_ERROR("resume of IP block <%s> failed %d\n",
2228 adev->ip_blocks[i].version->funcs->name, r);
2229 return r;
2230 }
2231 } else {
2232 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2233 if (r) {
2234 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
2235 adev->ip_blocks[i].version->funcs->name, r);
2236 return r;
2237 }
2238 }
2239
2240 adev->ip_blocks[i].status.hw = true;
2241 break;
2242 }
2243 }
2244
2245 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2246 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2247
2248 return r;
2249 }
2250
amdgpu_device_init_schedulers(struct amdgpu_device * adev)2251 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2252 {
2253 long timeout;
2254 int r, i;
2255
2256 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
2257 struct amdgpu_ring *ring = adev->rings[i];
2258
2259 /* No need to setup the GPU scheduler for rings that don't need it */
2260 if (!ring || ring->no_scheduler)
2261 continue;
2262
2263 switch (ring->funcs->type) {
2264 case AMDGPU_RING_TYPE_GFX:
2265 timeout = adev->gfx_timeout;
2266 break;
2267 case AMDGPU_RING_TYPE_COMPUTE:
2268 timeout = adev->compute_timeout;
2269 break;
2270 case AMDGPU_RING_TYPE_SDMA:
2271 timeout = adev->sdma_timeout;
2272 break;
2273 default:
2274 timeout = adev->video_timeout;
2275 break;
2276 }
2277
2278 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
2279 ring->num_hw_submission, 0,
2280 timeout, adev->reset_domain->wq,
2281 ring->sched_score, ring->name,
2282 adev->dev);
2283 if (r) {
2284 DRM_ERROR("Failed to create scheduler on ring %s.\n",
2285 ring->name);
2286 return r;
2287 }
2288 }
2289
2290 amdgpu_xcp_update_partition_sched_list(adev);
2291
2292 return 0;
2293 }
2294
2295
2296 /**
2297 * amdgpu_device_ip_init - run init for hardware IPs
2298 *
2299 * @adev: amdgpu_device pointer
2300 *
2301 * Main initialization pass for hardware IPs. The list of all the hardware
2302 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
2303 * are run. sw_init initializes the software state associated with each IP
2304 * and hw_init initializes the hardware associated with each IP.
2305 * Returns 0 on success, negative error code on failure.
2306 */
amdgpu_device_ip_init(struct amdgpu_device * adev)2307 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2308 {
2309 int i, r;
2310
2311 r = amdgpu_ras_init(adev);
2312 if (r)
2313 return r;
2314
2315 for (i = 0; i < adev->num_ip_blocks; i++) {
2316 if (!adev->ip_blocks[i].status.valid)
2317 continue;
2318 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2319 if (r) {
2320 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
2321 adev->ip_blocks[i].version->funcs->name, r);
2322 goto init_failed;
2323 }
2324 adev->ip_blocks[i].status.sw = true;
2325
2326 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2327 /* need to do common hw init early so everything is set up for gmc */
2328 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2329 if (r) {
2330 DRM_ERROR("hw_init %d failed %d\n", i, r);
2331 goto init_failed;
2332 }
2333 adev->ip_blocks[i].status.hw = true;
2334 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2335 /* need to do gmc hw init early so we can allocate gpu mem */
2336 /* Try to reserve bad pages early */
2337 if (amdgpu_sriov_vf(adev))
2338 amdgpu_virt_exchange_data(adev);
2339
2340 r = amdgpu_device_mem_scratch_init(adev);
2341 if (r) {
2342 DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
2343 goto init_failed;
2344 }
2345 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2346 if (r) {
2347 DRM_ERROR("hw_init %d failed %d\n", i, r);
2348 goto init_failed;
2349 }
2350 r = amdgpu_device_wb_init(adev);
2351 if (r) {
2352 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2353 goto init_failed;
2354 }
2355 adev->ip_blocks[i].status.hw = true;
2356
2357 /* right after GMC hw init, we create CSA */
2358 if (adev->gfx.mcbp) {
2359 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2360 AMDGPU_GEM_DOMAIN_VRAM |
2361 AMDGPU_GEM_DOMAIN_GTT,
2362 AMDGPU_CSA_SIZE);
2363 if (r) {
2364 DRM_ERROR("allocate CSA failed %d\n", r);
2365 goto init_failed;
2366 }
2367 }
2368 }
2369 }
2370
2371 if (amdgpu_sriov_vf(adev))
2372 amdgpu_virt_init_data_exchange(adev);
2373
2374 r = amdgpu_ib_pool_init(adev);
2375 if (r) {
2376 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2377 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2378 goto init_failed;
2379 }
2380
2381 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2382 if (r)
2383 goto init_failed;
2384
2385 r = amdgpu_device_ip_hw_init_phase1(adev);
2386 if (r)
2387 goto init_failed;
2388
2389 r = amdgpu_device_fw_loading(adev);
2390 if (r)
2391 goto init_failed;
2392
2393 r = amdgpu_device_ip_hw_init_phase2(adev);
2394 if (r)
2395 goto init_failed;
2396
2397 /*
2398 * retired pages will be loaded from eeprom and reserved here,
2399 * it should be called after amdgpu_device_ip_hw_init_phase2 since
2400 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
2401 * for I2C communication which only true at this point.
2402 *
2403 * amdgpu_ras_recovery_init may fail, but the upper only cares the
2404 * failure from bad gpu situation and stop amdgpu init process
2405 * accordingly. For other failed cases, it will still release all
2406 * the resource and print error message, rather than returning one
2407 * negative value to upper level.
2408 *
2409 * Note: theoretically, this should be called before all vram allocations
2410 * to protect retired page from abusing
2411 */
2412 r = amdgpu_ras_recovery_init(adev);
2413 if (r)
2414 goto init_failed;
2415
2416 /**
2417 * In case of XGMI grab extra reference for reset domain for this device
2418 */
2419 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2420 if (amdgpu_xgmi_add_device(adev) == 0) {
2421 if (!amdgpu_sriov_vf(adev)) {
2422 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2423
2424 if (WARN_ON(!hive)) {
2425 r = -ENOENT;
2426 goto init_failed;
2427 }
2428
2429 if (!hive->reset_domain ||
2430 !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
2431 r = -ENOENT;
2432 amdgpu_put_xgmi_hive(hive);
2433 goto init_failed;
2434 }
2435
2436 /* Drop the early temporary reset domain we created for device */
2437 amdgpu_reset_put_reset_domain(adev->reset_domain);
2438 adev->reset_domain = hive->reset_domain;
2439 amdgpu_put_xgmi_hive(hive);
2440 }
2441 }
2442 }
2443
2444 r = amdgpu_device_init_schedulers(adev);
2445 if (r)
2446 goto init_failed;
2447
2448 /* Don't init kfd if whole hive need to be reset during init */
2449 if (!adev->gmc.xgmi.pending_reset) {
2450 kgd2kfd_init_zone_device(adev);
2451 amdgpu_amdkfd_device_init(adev);
2452 }
2453
2454 amdgpu_fru_get_product_info(adev);
2455
2456 init_failed:
2457
2458 return r;
2459 }
2460
2461 /**
2462 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
2463 *
2464 * @adev: amdgpu_device pointer
2465 *
2466 * Writes a reset magic value to the gart pointer in VRAM. The driver calls
2467 * this function before a GPU reset. If the value is retained after a
2468 * GPU reset, VRAM has not been lost. Some GPU resets may destry VRAM contents.
2469 */
amdgpu_device_fill_reset_magic(struct amdgpu_device * adev)2470 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2471 {
2472 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2473 }
2474
2475 /**
2476 * amdgpu_device_check_vram_lost - check if vram is valid
2477 *
2478 * @adev: amdgpu_device pointer
2479 *
2480 * Checks the reset magic value written to the gart pointer in VRAM.
2481 * The driver calls this after a GPU reset to see if the contents of
2482 * VRAM is lost or now.
2483 * returns true if vram is lost, false if not.
2484 */
amdgpu_device_check_vram_lost(struct amdgpu_device * adev)2485 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2486 {
2487 if (memcmp(adev->gart.ptr, adev->reset_magic,
2488 AMDGPU_RESET_MAGIC_NUM))
2489 return true;
2490
2491 if (!amdgpu_in_reset(adev))
2492 return false;
2493
2494 /*
2495 * For all ASICs with baco/mode1 reset, the VRAM is
2496 * always assumed to be lost.
2497 */
2498 switch (amdgpu_asic_reset_method(adev)) {
2499 case AMD_RESET_METHOD_BACO:
2500 case AMD_RESET_METHOD_MODE1:
2501 return true;
2502 default:
2503 return false;
2504 }
2505 }
2506
2507 /**
2508 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2509 *
2510 * @adev: amdgpu_device pointer
2511 * @state: clockgating state (gate or ungate)
2512 *
2513 * The list of all the hardware IPs that make up the asic is walked and the
2514 * set_clockgating_state callbacks are run.
2515 * Late initialization pass enabling clockgating for hardware IPs.
2516 * Fini or suspend, pass disabling clockgating for hardware IPs.
2517 * Returns 0 on success, negative error code on failure.
2518 */
2519
amdgpu_device_set_cg_state(struct amdgpu_device * adev,enum amd_clockgating_state state)2520 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2521 enum amd_clockgating_state state)
2522 {
2523 int i, j, r;
2524
2525 if (amdgpu_emu_mode == 1)
2526 return 0;
2527
2528 for (j = 0; j < adev->num_ip_blocks; j++) {
2529 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2530 if (!adev->ip_blocks[i].status.late_initialized)
2531 continue;
2532 /* skip CG for GFX, SDMA on S0ix */
2533 if (adev->in_s0ix &&
2534 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2535 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2536 continue;
2537 /* skip CG for VCE/UVD, it's handled specially */
2538 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2539 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2540 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2541 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2542 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2543 /* enable clockgating to save power */
2544 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2545 state);
2546 if (r) {
2547 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2548 adev->ip_blocks[i].version->funcs->name, r);
2549 return r;
2550 }
2551 }
2552 }
2553
2554 return 0;
2555 }
2556
amdgpu_device_set_pg_state(struct amdgpu_device * adev,enum amd_powergating_state state)2557 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2558 enum amd_powergating_state state)
2559 {
2560 int i, j, r;
2561
2562 if (amdgpu_emu_mode == 1)
2563 return 0;
2564
2565 for (j = 0; j < adev->num_ip_blocks; j++) {
2566 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2567 if (!adev->ip_blocks[i].status.late_initialized)
2568 continue;
2569 /* skip PG for GFX, SDMA on S0ix */
2570 if (adev->in_s0ix &&
2571 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2572 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2573 continue;
2574 /* skip CG for VCE/UVD, it's handled specially */
2575 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2576 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2577 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2578 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2579 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2580 /* enable powergating to save power */
2581 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2582 state);
2583 if (r) {
2584 DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
2585 adev->ip_blocks[i].version->funcs->name, r);
2586 return r;
2587 }
2588 }
2589 }
2590 return 0;
2591 }
2592
amdgpu_device_enable_mgpu_fan_boost(void)2593 static int amdgpu_device_enable_mgpu_fan_boost(void)
2594 {
2595 struct amdgpu_gpu_instance *gpu_ins;
2596 struct amdgpu_device *adev;
2597 int i, ret = 0;
2598
2599 mutex_lock(&mgpu_info.mutex);
2600
2601 /*
2602 * MGPU fan boost feature should be enabled
2603 * only when there are two or more dGPUs in
2604 * the system
2605 */
2606 if (mgpu_info.num_dgpu < 2)
2607 goto out;
2608
2609 for (i = 0; i < mgpu_info.num_dgpu; i++) {
2610 gpu_ins = &(mgpu_info.gpu_ins[i]);
2611 adev = gpu_ins->adev;
2612 if (!(adev->flags & AMD_IS_APU) &&
2613 !gpu_ins->mgpu_fan_enabled) {
2614 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2615 if (ret)
2616 break;
2617
2618 gpu_ins->mgpu_fan_enabled = 1;
2619 }
2620 }
2621
2622 out:
2623 mutex_unlock(&mgpu_info.mutex);
2624
2625 return ret;
2626 }
2627
2628 /**
2629 * amdgpu_device_ip_late_init - run late init for hardware IPs
2630 *
2631 * @adev: amdgpu_device pointer
2632 *
2633 * Late initialization pass for hardware IPs. The list of all the hardware
2634 * IPs that make up the asic is walked and the late_init callbacks are run.
2635 * late_init covers any special initialization that an IP requires
2636 * after all of the have been initialized or something that needs to happen
2637 * late in the init process.
2638 * Returns 0 on success, negative error code on failure.
2639 */
amdgpu_device_ip_late_init(struct amdgpu_device * adev)2640 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2641 {
2642 struct amdgpu_gpu_instance *gpu_instance;
2643 int i = 0, r;
2644
2645 for (i = 0; i < adev->num_ip_blocks; i++) {
2646 if (!adev->ip_blocks[i].status.hw)
2647 continue;
2648 if (adev->ip_blocks[i].version->funcs->late_init) {
2649 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2650 if (r) {
2651 DRM_ERROR("late_init of IP block <%s> failed %d\n",
2652 adev->ip_blocks[i].version->funcs->name, r);
2653 return r;
2654 }
2655 }
2656 adev->ip_blocks[i].status.late_initialized = true;
2657 }
2658
2659 r = amdgpu_ras_late_init(adev);
2660 if (r) {
2661 DRM_ERROR("amdgpu_ras_late_init failed %d", r);
2662 return r;
2663 }
2664
2665 amdgpu_ras_set_error_query_ready(adev, true);
2666
2667 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2668 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2669
2670 amdgpu_device_fill_reset_magic(adev);
2671
2672 r = amdgpu_device_enable_mgpu_fan_boost();
2673 if (r)
2674 DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
2675
2676 /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
2677 if (amdgpu_passthrough(adev) &&
2678 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
2679 adev->asic_type == CHIP_ALDEBARAN))
2680 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2681
2682 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2683 mutex_lock(&mgpu_info.mutex);
2684
2685 /*
2686 * Reset device p-state to low as this was booted with high.
2687 *
2688 * This should be performed only after all devices from the same
2689 * hive get initialized.
2690 *
2691 * However, it's unknown how many device in the hive in advance.
2692 * As this is counted one by one during devices initializations.
2693 *
2694 * So, we wait for all XGMI interlinked devices initialized.
2695 * This may bring some delays as those devices may come from
2696 * different hives. But that should be OK.
2697 */
2698 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2699 for (i = 0; i < mgpu_info.num_gpu; i++) {
2700 gpu_instance = &(mgpu_info.gpu_ins[i]);
2701 if (gpu_instance->adev->flags & AMD_IS_APU)
2702 continue;
2703
2704 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2705 AMDGPU_XGMI_PSTATE_MIN);
2706 if (r) {
2707 DRM_ERROR("pstate setting failed (%d).\n", r);
2708 break;
2709 }
2710 }
2711 }
2712
2713 mutex_unlock(&mgpu_info.mutex);
2714 }
2715
2716 return 0;
2717 }
2718
2719 /**
2720 * amdgpu_device_smu_fini_early - smu hw_fini wrapper
2721 *
2722 * @adev: amdgpu_device pointer
2723 *
2724 * For ASICs need to disable SMC first
2725 */
amdgpu_device_smu_fini_early(struct amdgpu_device * adev)2726 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2727 {
2728 int i, r;
2729
2730 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2731 return;
2732
2733 for (i = 0; i < adev->num_ip_blocks; i++) {
2734 if (!adev->ip_blocks[i].status.hw)
2735 continue;
2736 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2737 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2738 /* XXX handle errors */
2739 if (r) {
2740 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2741 adev->ip_blocks[i].version->funcs->name, r);
2742 }
2743 adev->ip_blocks[i].status.hw = false;
2744 break;
2745 }
2746 }
2747 }
2748
amdgpu_device_ip_fini_early(struct amdgpu_device * adev)2749 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2750 {
2751 int i, r;
2752
2753 for (i = 0; i < adev->num_ip_blocks; i++) {
2754 if (!adev->ip_blocks[i].version->funcs->early_fini)
2755 continue;
2756
2757 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2758 if (r) {
2759 DRM_DEBUG("early_fini of IP block <%s> failed %d\n",
2760 adev->ip_blocks[i].version->funcs->name, r);
2761 }
2762 }
2763
2764 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2765 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2766
2767 amdgpu_amdkfd_suspend(adev, false);
2768
2769 /* Workaroud for ASICs need to disable SMC first */
2770 amdgpu_device_smu_fini_early(adev);
2771
2772 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2773 if (!adev->ip_blocks[i].status.hw)
2774 continue;
2775
2776 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2777 /* XXX handle errors */
2778 if (r) {
2779 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2780 adev->ip_blocks[i].version->funcs->name, r);
2781 }
2782
2783 adev->ip_blocks[i].status.hw = false;
2784 }
2785
2786 if (amdgpu_sriov_vf(adev)) {
2787 if (amdgpu_virt_release_full_gpu(adev, false))
2788 DRM_ERROR("failed to release exclusive mode on fini\n");
2789 }
2790
2791 return 0;
2792 }
2793
2794 /**
2795 * amdgpu_device_ip_fini - run fini for hardware IPs
2796 *
2797 * @adev: amdgpu_device pointer
2798 *
2799 * Main teardown pass for hardware IPs. The list of all the hardware
2800 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
2801 * are run. hw_fini tears down the hardware associated with each IP
2802 * and sw_fini tears down any software state associated with each IP.
2803 * Returns 0 on success, negative error code on failure.
2804 */
amdgpu_device_ip_fini(struct amdgpu_device * adev)2805 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2806 {
2807 int i, r;
2808
2809 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2810 amdgpu_virt_release_ras_err_handler_data(adev);
2811
2812 if (adev->gmc.xgmi.num_physical_nodes > 1)
2813 amdgpu_xgmi_remove_device(adev);
2814
2815 amdgpu_amdkfd_device_fini_sw(adev);
2816
2817 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2818 if (!adev->ip_blocks[i].status.sw)
2819 continue;
2820
2821 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2822 amdgpu_ucode_free_bo(adev);
2823 amdgpu_free_static_csa(&adev->virt.csa_obj);
2824 amdgpu_device_wb_fini(adev);
2825 amdgpu_device_mem_scratch_fini(adev);
2826 amdgpu_ib_pool_fini(adev);
2827 }
2828
2829 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2830 /* XXX handle errors */
2831 if (r) {
2832 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
2833 adev->ip_blocks[i].version->funcs->name, r);
2834 }
2835 adev->ip_blocks[i].status.sw = false;
2836 adev->ip_blocks[i].status.valid = false;
2837 }
2838
2839 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2840 if (!adev->ip_blocks[i].status.late_initialized)
2841 continue;
2842 if (adev->ip_blocks[i].version->funcs->late_fini)
2843 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2844 adev->ip_blocks[i].status.late_initialized = false;
2845 }
2846
2847 amdgpu_ras_fini(adev);
2848
2849 return 0;
2850 }
2851
2852 /**
2853 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2854 *
2855 * @work: work_struct.
2856 */
amdgpu_device_delayed_init_work_handler(struct work_struct * work)2857 static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2858 {
2859 struct amdgpu_device *adev =
2860 container_of(work, struct amdgpu_device, delayed_init_work.work);
2861 int r;
2862
2863 r = amdgpu_ib_ring_tests(adev);
2864 if (r)
2865 DRM_ERROR("ib ring test failed (%d).\n", r);
2866 }
2867
amdgpu_device_delay_enable_gfx_off(struct work_struct * work)2868 static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
2869 {
2870 struct amdgpu_device *adev =
2871 container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
2872
2873 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2874 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2875
2876 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2877 adev->gfx.gfx_off_state = true;
2878 }
2879
2880 /**
2881 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2882 *
2883 * @adev: amdgpu_device pointer
2884 *
2885 * Main suspend function for hardware IPs. The list of all the hardware
2886 * IPs that make up the asic is walked, clockgating is disabled and the
2887 * suspend callbacks are run. suspend puts the hardware and software state
2888 * in each IP into a state suitable for suspend.
2889 * Returns 0 on success, negative error code on failure.
2890 */
amdgpu_device_ip_suspend_phase1(struct amdgpu_device * adev)2891 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2892 {
2893 int i, r;
2894
2895 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2896 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2897
2898 /*
2899 * Per PMFW team's suggestion, driver needs to handle gfxoff
2900 * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
2901 * scenario. Add the missing df cstate disablement here.
2902 */
2903 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2904 dev_warn(adev->dev, "Failed to disallow df cstate");
2905
2906 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2907 if (!adev->ip_blocks[i].status.valid)
2908 continue;
2909
2910 /* displays are handled separately */
2911 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2912 continue;
2913
2914 /* XXX handle errors */
2915 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2916 /* XXX handle errors */
2917 if (r) {
2918 DRM_ERROR("suspend of IP block <%s> failed %d\n",
2919 adev->ip_blocks[i].version->funcs->name, r);
2920 return r;
2921 }
2922
2923 adev->ip_blocks[i].status.hw = false;
2924 }
2925
2926 return 0;
2927 }
2928
2929 /**
2930 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
2931 *
2932 * @adev: amdgpu_device pointer
2933 *
2934 * Main suspend function for hardware IPs. The list of all the hardware
2935 * IPs that make up the asic is walked, clockgating is disabled and the
2936 * suspend callbacks are run. suspend puts the hardware and software state
2937 * in each IP into a state suitable for suspend.
2938 * Returns 0 on success, negative error code on failure.
2939 */
amdgpu_device_ip_suspend_phase2(struct amdgpu_device * adev)2940 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2941 {
2942 int i, r;
2943
2944 if (adev->in_s0ix)
2945 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2946
2947 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2948 if (!adev->ip_blocks[i].status.valid)
2949 continue;
2950 /* displays are handled in phase1 */
2951 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2952 continue;
2953 /* PSP lost connection when err_event_athub occurs */
2954 if (amdgpu_ras_intr_triggered() &&
2955 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2956 adev->ip_blocks[i].status.hw = false;
2957 continue;
2958 }
2959
2960 /* skip unnecessary suspend if we do not initialize them yet */
2961 if (adev->gmc.xgmi.pending_reset &&
2962 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2963 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2964 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2965 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2966 adev->ip_blocks[i].status.hw = false;
2967 continue;
2968 }
2969
2970 /* skip suspend of gfx/mes and psp for S0ix
2971 * gfx is in gfxoff state, so on resume it will exit gfxoff just
2972 * like at runtime. PSP is also part of the always on hardware
2973 * so no need to suspend it.
2974 */
2975 if (adev->in_s0ix &&
2976 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
2977 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2978 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
2979 continue;
2980
2981 /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
2982 if (adev->in_s0ix &&
2983 (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
2984 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2985 continue;
2986
2987 /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
2988 * These are in TMR, hence are expected to be reused by PSP-TOS to reload
2989 * from this location and RLC Autoload automatically also gets loaded
2990 * from here based on PMFW -> PSP message during re-init sequence.
2991 * Therefore, the psp suspend & resume should be skipped to avoid destroy
2992 * the TMR and reload FWs again for IMU enabled APU ASICs.
2993 */
2994 if (amdgpu_in_reset(adev) &&
2995 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
2996 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2997 continue;
2998
2999 /* XXX handle errors */
3000 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3001 /* XXX handle errors */
3002 if (r) {
3003 DRM_ERROR("suspend of IP block <%s> failed %d\n",
3004 adev->ip_blocks[i].version->funcs->name, r);
3005 }
3006 adev->ip_blocks[i].status.hw = false;
3007 /* handle putting the SMC in the appropriate state */
3008 if (!amdgpu_sriov_vf(adev)) {
3009 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3010 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3011 if (r) {
3012 DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
3013 adev->mp1_state, r);
3014 return r;
3015 }
3016 }
3017 }
3018 }
3019
3020 return 0;
3021 }
3022
3023 /**
3024 * amdgpu_device_ip_suspend - run suspend for hardware IPs
3025 *
3026 * @adev: amdgpu_device pointer
3027 *
3028 * Main suspend function for hardware IPs. The list of all the hardware
3029 * IPs that make up the asic is walked, clockgating is disabled and the
3030 * suspend callbacks are run. suspend puts the hardware and software state
3031 * in each IP into a state suitable for suspend.
3032 * Returns 0 on success, negative error code on failure.
3033 */
amdgpu_device_ip_suspend(struct amdgpu_device * adev)3034 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3035 {
3036 int r;
3037
3038 if (amdgpu_sriov_vf(adev)) {
3039 amdgpu_virt_fini_data_exchange(adev);
3040 amdgpu_virt_request_full_gpu(adev, false);
3041 }
3042
3043 r = amdgpu_device_ip_suspend_phase1(adev);
3044 if (r)
3045 return r;
3046 r = amdgpu_device_ip_suspend_phase2(adev);
3047
3048 if (amdgpu_sriov_vf(adev))
3049 amdgpu_virt_release_full_gpu(adev, false);
3050
3051 return r;
3052 }
3053
amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device * adev)3054 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3055 {
3056 int i, r;
3057
3058 static enum amd_ip_block_type ip_order[] = {
3059 AMD_IP_BLOCK_TYPE_COMMON,
3060 AMD_IP_BLOCK_TYPE_GMC,
3061 AMD_IP_BLOCK_TYPE_PSP,
3062 AMD_IP_BLOCK_TYPE_IH,
3063 };
3064
3065 for (i = 0; i < adev->num_ip_blocks; i++) {
3066 int j;
3067 struct amdgpu_ip_block *block;
3068
3069 block = &adev->ip_blocks[i];
3070 block->status.hw = false;
3071
3072 for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
3073
3074 if (block->version->type != ip_order[j] ||
3075 !block->status.valid)
3076 continue;
3077
3078 r = block->version->funcs->hw_init(adev);
3079 DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3080 if (r)
3081 return r;
3082 block->status.hw = true;
3083 }
3084 }
3085
3086 return 0;
3087 }
3088
amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device * adev)3089 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3090 {
3091 int i, r;
3092
3093 static enum amd_ip_block_type ip_order[] = {
3094 AMD_IP_BLOCK_TYPE_SMC,
3095 AMD_IP_BLOCK_TYPE_DCE,
3096 AMD_IP_BLOCK_TYPE_GFX,
3097 AMD_IP_BLOCK_TYPE_SDMA,
3098 AMD_IP_BLOCK_TYPE_MES,
3099 AMD_IP_BLOCK_TYPE_UVD,
3100 AMD_IP_BLOCK_TYPE_VCE,
3101 AMD_IP_BLOCK_TYPE_VCN,
3102 AMD_IP_BLOCK_TYPE_JPEG
3103 };
3104
3105 for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
3106 int j;
3107 struct amdgpu_ip_block *block;
3108
3109 for (j = 0; j < adev->num_ip_blocks; j++) {
3110 block = &adev->ip_blocks[j];
3111
3112 if (block->version->type != ip_order[i] ||
3113 !block->status.valid ||
3114 block->status.hw)
3115 continue;
3116
3117 if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
3118 r = block->version->funcs->resume(adev);
3119 else
3120 r = block->version->funcs->hw_init(adev);
3121
3122 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
3123 if (r)
3124 return r;
3125 block->status.hw = true;
3126 }
3127 }
3128
3129 return 0;
3130 }
3131
3132 /**
3133 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
3134 *
3135 * @adev: amdgpu_device pointer
3136 *
3137 * First resume function for hardware IPs. The list of all the hardware
3138 * IPs that make up the asic is walked and the resume callbacks are run for
3139 * COMMON, GMC, and IH. resume puts the hardware into a functional state
3140 * after a suspend and updates the software state as necessary. This
3141 * function is also used for restoring the GPU after a GPU reset.
3142 * Returns 0 on success, negative error code on failure.
3143 */
amdgpu_device_ip_resume_phase1(struct amdgpu_device * adev)3144 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3145 {
3146 int i, r;
3147
3148 for (i = 0; i < adev->num_ip_blocks; i++) {
3149 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3150 continue;
3151 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3152 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3153 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3154 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3155
3156 r = adev->ip_blocks[i].version->funcs->resume(adev);
3157 if (r) {
3158 DRM_ERROR("resume of IP block <%s> failed %d\n",
3159 adev->ip_blocks[i].version->funcs->name, r);
3160 return r;
3161 }
3162 adev->ip_blocks[i].status.hw = true;
3163 }
3164 }
3165
3166 return 0;
3167 }
3168
3169 /**
3170 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
3171 *
3172 * @adev: amdgpu_device pointer
3173 *
3174 * First resume function for hardware IPs. The list of all the hardware
3175 * IPs that make up the asic is walked and the resume callbacks are run for
3176 * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
3177 * functional state after a suspend and updates the software state as
3178 * necessary. This function is also used for restoring the GPU after a GPU
3179 * reset.
3180 * Returns 0 on success, negative error code on failure.
3181 */
amdgpu_device_ip_resume_phase2(struct amdgpu_device * adev)3182 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3183 {
3184 int i, r;
3185
3186 for (i = 0; i < adev->num_ip_blocks; i++) {
3187 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3188 continue;
3189 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3190 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3191 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3192 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3193 continue;
3194 r = adev->ip_blocks[i].version->funcs->resume(adev);
3195 if (r) {
3196 DRM_ERROR("resume of IP block <%s> failed %d\n",
3197 adev->ip_blocks[i].version->funcs->name, r);
3198 return r;
3199 }
3200 adev->ip_blocks[i].status.hw = true;
3201 }
3202
3203 return 0;
3204 }
3205
3206 /**
3207 * amdgpu_device_ip_resume - run resume for hardware IPs
3208 *
3209 * @adev: amdgpu_device pointer
3210 *
3211 * Main resume function for hardware IPs. The hardware IPs
3212 * are split into two resume functions because they are
3213 * also used in recovering from a GPU reset and some additional
3214 * steps need to be take between them. In this case (S3/S4) they are
3215 * run sequentially.
3216 * Returns 0 on success, negative error code on failure.
3217 */
amdgpu_device_ip_resume(struct amdgpu_device * adev)3218 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3219 {
3220 int r;
3221
3222 r = amdgpu_device_ip_resume_phase1(adev);
3223 if (r)
3224 return r;
3225
3226 r = amdgpu_device_fw_loading(adev);
3227 if (r)
3228 return r;
3229
3230 r = amdgpu_device_ip_resume_phase2(adev);
3231
3232 return r;
3233 }
3234
3235 /**
3236 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
3237 *
3238 * @adev: amdgpu_device pointer
3239 *
3240 * Query the VBIOS data tables to determine if the board supports SR-IOV.
3241 */
amdgpu_device_detect_sriov_bios(struct amdgpu_device * adev)3242 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3243 {
3244 if (amdgpu_sriov_vf(adev)) {
3245 if (adev->is_atom_fw) {
3246 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3247 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3248 } else {
3249 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3250 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3251 }
3252
3253 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3254 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3255 }
3256 }
3257
3258 /**
3259 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
3260 *
3261 * @asic_type: AMD asic type
3262 *
3263 * Check if there is DC (new modesetting infrastructre) support for an asic.
3264 * returns true if DC has support, false if not.
3265 */
amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)3266 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
3267 {
3268 switch (asic_type) {
3269 #ifdef CONFIG_DRM_AMDGPU_SI
3270 case CHIP_HAINAN:
3271 #endif
3272 case CHIP_TOPAZ:
3273 /* chips with no display hardware */
3274 return false;
3275 #if defined(CONFIG_DRM_AMD_DC)
3276 case CHIP_TAHITI:
3277 case CHIP_PITCAIRN:
3278 case CHIP_VERDE:
3279 case CHIP_OLAND:
3280 /*
3281 * We have systems in the wild with these ASICs that require
3282 * LVDS and VGA support which is not supported with DC.
3283 *
3284 * Fallback to the non-DC driver here by default so as not to
3285 * cause regressions.
3286 */
3287 #if defined(CONFIG_DRM_AMD_DC_SI)
3288 return amdgpu_dc > 0;
3289 #else
3290 return false;
3291 #endif
3292 case CHIP_BONAIRE:
3293 case CHIP_KAVERI:
3294 case CHIP_KABINI:
3295 case CHIP_MULLINS:
3296 /*
3297 * We have systems in the wild with these ASICs that require
3298 * VGA support which is not supported with DC.
3299 *
3300 * Fallback to the non-DC driver here by default so as not to
3301 * cause regressions.
3302 */
3303 return amdgpu_dc > 0;
3304 default:
3305 return amdgpu_dc != 0;
3306 #else
3307 default:
3308 if (amdgpu_dc > 0)
3309 DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
3310 return false;
3311 #endif
3312 }
3313 }
3314
3315 /**
3316 * amdgpu_device_has_dc_support - check if dc is supported
3317 *
3318 * @adev: amdgpu_device pointer
3319 *
3320 * Returns true for supported, false for not supported
3321 */
amdgpu_device_has_dc_support(struct amdgpu_device * adev)3322 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3323 {
3324 if (adev->enable_virtual_display ||
3325 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3326 return false;
3327
3328 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3329 }
3330
amdgpu_device_xgmi_reset_func(struct work_struct * __work)3331 static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
3332 {
3333 struct amdgpu_device *adev =
3334 container_of(__work, struct amdgpu_device, xgmi_reset_work);
3335 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3336
3337 /* It's a bug to not have a hive within this function */
3338 if (WARN_ON(!hive))
3339 return;
3340
3341 /*
3342 * Use task barrier to synchronize all xgmi reset works across the
3343 * hive. task_barrier_enter and task_barrier_exit will block
3344 * until all the threads running the xgmi reset works reach
3345 * those points. task_barrier_full will do both blocks.
3346 */
3347 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3348
3349 task_barrier_enter(&hive->tb);
3350 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3351
3352 if (adev->asic_reset_res)
3353 goto fail;
3354
3355 task_barrier_exit(&hive->tb);
3356 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3357
3358 if (adev->asic_reset_res)
3359 goto fail;
3360
3361 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3362 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3363 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3364 } else {
3365
3366 task_barrier_full(&hive->tb);
3367 adev->asic_reset_res = amdgpu_asic_reset(adev);
3368 }
3369
3370 fail:
3371 if (adev->asic_reset_res)
3372 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3373 adev->asic_reset_res, adev_to_drm(adev)->unique);
3374 amdgpu_put_xgmi_hive(hive);
3375 }
3376
amdgpu_device_get_job_timeout_settings(struct amdgpu_device * adev)3377 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3378 {
3379 char *input = amdgpu_lockup_timeout;
3380 char *timeout_setting = NULL;
3381 int index = 0;
3382 long timeout;
3383 int ret = 0;
3384
3385 /*
3386 * By default timeout for non compute jobs is 10000
3387 * and 60000 for compute jobs.
3388 * In SR-IOV or passthrough mode, timeout for compute
3389 * jobs are 60000 by default.
3390 */
3391 adev->gfx_timeout = msecs_to_jiffies(10000);
3392 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3393 if (amdgpu_sriov_vf(adev))
3394 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3395 msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
3396 else
3397 adev->compute_timeout = msecs_to_jiffies(60000);
3398
3399 if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3400 while ((timeout_setting = strsep(&input, ",")) &&
3401 strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3402 ret = kstrtol(timeout_setting, 0, &timeout);
3403 if (ret)
3404 return ret;
3405
3406 if (timeout == 0) {
3407 index++;
3408 continue;
3409 } else if (timeout < 0) {
3410 timeout = MAX_SCHEDULE_TIMEOUT;
3411 dev_warn(adev->dev, "lockup timeout disabled");
3412 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
3413 } else {
3414 timeout = msecs_to_jiffies(timeout);
3415 }
3416
3417 switch (index++) {
3418 case 0:
3419 adev->gfx_timeout = timeout;
3420 break;
3421 case 1:
3422 adev->compute_timeout = timeout;
3423 break;
3424 case 2:
3425 adev->sdma_timeout = timeout;
3426 break;
3427 case 3:
3428 adev->video_timeout = timeout;
3429 break;
3430 default:
3431 break;
3432 }
3433 }
3434 /*
3435 * There is only one value specified and
3436 * it should apply to all non-compute jobs.
3437 */
3438 if (index == 1) {
3439 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3440 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3441 adev->compute_timeout = adev->gfx_timeout;
3442 }
3443 }
3444
3445 return ret;
3446 }
3447
3448 /**
3449 * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
3450 *
3451 * @adev: amdgpu_device pointer
3452 *
3453 * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
3454 */
amdgpu_device_check_iommu_direct_map(struct amdgpu_device * adev)3455 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3456 {
3457 struct iommu_domain *domain;
3458
3459 domain = iommu_get_domain_for_dev(adev->dev);
3460 if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
3461 adev->ram_is_direct_mapped = true;
3462 }
3463
3464 static const struct attribute *amdgpu_dev_attributes[] = {
3465 &dev_attr_pcie_replay_count.attr,
3466 NULL
3467 };
3468
amdgpu_device_set_mcbp(struct amdgpu_device * adev)3469 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3470 {
3471 if (amdgpu_mcbp == 1)
3472 adev->gfx.mcbp = true;
3473 else if (amdgpu_mcbp == 0)
3474 adev->gfx.mcbp = false;
3475
3476 if (amdgpu_sriov_vf(adev))
3477 adev->gfx.mcbp = true;
3478
3479 if (adev->gfx.mcbp)
3480 DRM_INFO("MCBP is enabled\n");
3481 }
3482
3483 /**
3484 * amdgpu_device_init - initialize the driver
3485 *
3486 * @adev: amdgpu_device pointer
3487 * @flags: driver flags
3488 *
3489 * Initializes the driver info and hw (all asics).
3490 * Returns 0 for success or an error on failure.
3491 * Called at driver startup.
3492 */
amdgpu_device_init(struct amdgpu_device * adev,uint32_t flags)3493 int amdgpu_device_init(struct amdgpu_device *adev,
3494 uint32_t flags)
3495 {
3496 struct drm_device *ddev = adev_to_drm(adev);
3497 struct pci_dev *pdev = adev->pdev;
3498 int r, i;
3499 bool px = false;
3500 u32 max_MBps;
3501 int tmp;
3502
3503 adev->shutdown = false;
3504 adev->flags = flags;
3505
3506 if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
3507 adev->asic_type = amdgpu_force_asic_type;
3508 else
3509 adev->asic_type = flags & AMD_ASIC_MASK;
3510
3511 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3512 if (amdgpu_emu_mode == 1)
3513 adev->usec_timeout *= 10;
3514 adev->gmc.gart_size = 512 * 1024 * 1024;
3515 adev->accel_working = false;
3516 adev->num_rings = 0;
3517 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3518 adev->mman.buffer_funcs = NULL;
3519 adev->mman.buffer_funcs_ring = NULL;
3520 adev->vm_manager.vm_pte_funcs = NULL;
3521 adev->vm_manager.vm_pte_num_scheds = 0;
3522 adev->gmc.gmc_funcs = NULL;
3523 adev->harvest_ip_mask = 0x0;
3524 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3525 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3526
3527 adev->smc_rreg = &amdgpu_invalid_rreg;
3528 adev->smc_wreg = &amdgpu_invalid_wreg;
3529 adev->pcie_rreg = &amdgpu_invalid_rreg;
3530 adev->pcie_wreg = &amdgpu_invalid_wreg;
3531 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3532 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3533 adev->pciep_rreg = &amdgpu_invalid_rreg;
3534 adev->pciep_wreg = &amdgpu_invalid_wreg;
3535 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3536 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3537 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3538 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3539 adev->didt_rreg = &amdgpu_invalid_rreg;
3540 adev->didt_wreg = &amdgpu_invalid_wreg;
3541 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3542 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3543 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3544 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3545
3546 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
3547 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3548 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
3549
3550 /* mutex initialization are all done here so we
3551 * can recall function without having locking issues
3552 */
3553 mutex_init(&adev->firmware.mutex);
3554 mutex_init(&adev->pm.mutex);
3555 mutex_init(&adev->gfx.gpu_clock_mutex);
3556 mutex_init(&adev->srbm_mutex);
3557 mutex_init(&adev->gfx.pipe_reserve_mutex);
3558 mutex_init(&adev->gfx.gfx_off_mutex);
3559 mutex_init(&adev->gfx.partition_mutex);
3560 mutex_init(&adev->grbm_idx_mutex);
3561 mutex_init(&adev->mn_lock);
3562 mutex_init(&adev->virt.vf_errors.lock);
3563 hash_init(adev->mn_hash);
3564 mutex_init(&adev->psp.mutex);
3565 mutex_init(&adev->notifier_lock);
3566 mutex_init(&adev->pm.stable_pstate_ctx_lock);
3567 mutex_init(&adev->benchmark_mutex);
3568
3569 amdgpu_device_init_apu_flags(adev);
3570
3571 r = amdgpu_device_check_arguments(adev);
3572 if (r)
3573 return r;
3574
3575 spin_lock_init(&adev->mmio_idx_lock);
3576 spin_lock_init(&adev->smc_idx_lock);
3577 spin_lock_init(&adev->pcie_idx_lock);
3578 spin_lock_init(&adev->uvd_ctx_idx_lock);
3579 spin_lock_init(&adev->didt_idx_lock);
3580 spin_lock_init(&adev->gc_cac_idx_lock);
3581 spin_lock_init(&adev->se_cac_idx_lock);
3582 spin_lock_init(&adev->audio_endpt_idx_lock);
3583 spin_lock_init(&adev->mm_stats.lock);
3584
3585 INIT_LIST_HEAD(&adev->shadow_list);
3586 mutex_init(&adev->shadow_list_lock);
3587
3588 INIT_LIST_HEAD(&adev->reset_list);
3589
3590 INIT_LIST_HEAD(&adev->ras_list);
3591
3592 INIT_DELAYED_WORK(&adev->delayed_init_work,
3593 amdgpu_device_delayed_init_work_handler);
3594 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3595 amdgpu_device_delay_enable_gfx_off);
3596
3597 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3598
3599 adev->gfx.gfx_off_req_count = 1;
3600 adev->gfx.gfx_off_residency = 0;
3601 adev->gfx.gfx_off_entrycount = 0;
3602 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3603
3604 atomic_set(&adev->throttling_logging_enabled, 1);
3605 /*
3606 * If throttling continues, logging will be performed every minute
3607 * to avoid log flooding. "-1" is subtracted since the thermal
3608 * throttling interrupt comes every second. Thus, the total logging
3609 * interval is 59 seconds(retelimited printk interval) + 1(waiting
3610 * for throttling interrupt) = 60 seconds.
3611 */
3612 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3613 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3614
3615 /* Registers mapping */
3616 /* TODO: block userspace mapping of io register */
3617 if (adev->asic_type >= CHIP_BONAIRE) {
3618 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3619 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3620 } else {
3621 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3622 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3623 }
3624
3625 for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
3626 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3627
3628 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3629 if (!adev->rmmio)
3630 return -ENOMEM;
3631
3632 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3633 DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
3634
3635 /*
3636 * Reset domain needs to be present early, before XGMI hive discovered
3637 * (if any) and intitialized to use reset sem and in_gpu reset flag
3638 * early on during init and before calling to RREG32.
3639 */
3640 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3641 if (!adev->reset_domain)
3642 return -ENOMEM;
3643
3644 /* detect hw virtualization here */
3645 amdgpu_detect_virtualization(adev);
3646
3647 amdgpu_device_get_pcie_info(adev);
3648
3649 r = amdgpu_device_get_job_timeout_settings(adev);
3650 if (r) {
3651 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3652 return r;
3653 }
3654
3655 /* early init functions */
3656 r = amdgpu_device_ip_early_init(adev);
3657 if (r)
3658 return r;
3659
3660 amdgpu_device_set_mcbp(adev);
3661
3662 /* Get rid of things like offb */
3663 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3664 if (r)
3665 return r;
3666
3667 /* Enable TMZ based on IP_VERSION */
3668 amdgpu_gmc_tmz_set(adev);
3669
3670 amdgpu_gmc_noretry_set(adev);
3671 /* Need to get xgmi info early to decide the reset behavior*/
3672 if (adev->gmc.xgmi.supported) {
3673 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3674 if (r)
3675 return r;
3676 }
3677
3678 /* enable PCIE atomic ops */
3679 if (amdgpu_sriov_vf(adev)) {
3680 if (adev->virt.fw_reserve.p_pf2vf)
3681 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3682 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3683 (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3684 /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
3685 * internal path natively support atomics, set have_atomics_support to true.
3686 */
3687 } else if ((adev->flags & AMD_IS_APU) &&
3688 (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) {
3689 adev->have_atomics_support = true;
3690 } else {
3691 adev->have_atomics_support =
3692 !pci_enable_atomic_ops_to_root(adev->pdev,
3693 PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
3694 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
3695 }
3696
3697 if (!adev->have_atomics_support)
3698 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3699
3700 /* doorbell bar mapping and doorbell index init*/
3701 amdgpu_doorbell_init(adev);
3702
3703 if (amdgpu_emu_mode == 1) {
3704 /* post the asic on emulation mode */
3705 emu_soc_asic_init(adev);
3706 goto fence_driver_init;
3707 }
3708
3709 amdgpu_reset_init(adev);
3710
3711 /* detect if we are with an SRIOV vbios */
3712 if (adev->bios)
3713 amdgpu_device_detect_sriov_bios(adev);
3714
3715 /* check if we need to reset the asic
3716 * E.g., driver was not cleanly unloaded previously, etc.
3717 */
3718 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3719 if (adev->gmc.xgmi.num_physical_nodes) {
3720 dev_info(adev->dev, "Pending hive reset.\n");
3721 adev->gmc.xgmi.pending_reset = true;
3722 /* Only need to init necessary block for SMU to handle the reset */
3723 for (i = 0; i < adev->num_ip_blocks; i++) {
3724 if (!adev->ip_blocks[i].status.valid)
3725 continue;
3726 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3727 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3728 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3729 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3730 DRM_DEBUG("IP %s disabled for hw_init.\n",
3731 adev->ip_blocks[i].version->funcs->name);
3732 adev->ip_blocks[i].status.hw = true;
3733 }
3734 }
3735 } else {
3736 tmp = amdgpu_reset_method;
3737 /* It should do a default reset when loading or reloading the driver,
3738 * regardless of the module parameter reset_method.
3739 */
3740 amdgpu_reset_method = AMD_RESET_METHOD_NONE;
3741 r = amdgpu_asic_reset(adev);
3742 amdgpu_reset_method = tmp;
3743 if (r) {
3744 dev_err(adev->dev, "asic reset on init failed\n");
3745 goto failed;
3746 }
3747 }
3748 }
3749
3750 /* Post card if necessary */
3751 if (amdgpu_device_need_post(adev)) {
3752 if (!adev->bios) {
3753 dev_err(adev->dev, "no vBIOS found\n");
3754 r = -EINVAL;
3755 goto failed;
3756 }
3757 DRM_INFO("GPU posting now...\n");
3758 r = amdgpu_device_asic_init(adev);
3759 if (r) {
3760 dev_err(adev->dev, "gpu post error!\n");
3761 goto failed;
3762 }
3763 }
3764
3765 if (adev->bios) {
3766 if (adev->is_atom_fw) {
3767 /* Initialize clocks */
3768 r = amdgpu_atomfirmware_get_clock_info(adev);
3769 if (r) {
3770 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3771 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3772 goto failed;
3773 }
3774 } else {
3775 /* Initialize clocks */
3776 r = amdgpu_atombios_get_clock_info(adev);
3777 if (r) {
3778 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3779 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3780 goto failed;
3781 }
3782 /* init i2c buses */
3783 if (!amdgpu_device_has_dc_support(adev))
3784 amdgpu_atombios_i2c_init(adev);
3785 }
3786 }
3787
3788 fence_driver_init:
3789 /* Fence driver */
3790 r = amdgpu_fence_driver_sw_init(adev);
3791 if (r) {
3792 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3793 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3794 goto failed;
3795 }
3796
3797 /* init the mode config */
3798 drm_mode_config_init(adev_to_drm(adev));
3799
3800 r = amdgpu_device_ip_init(adev);
3801 if (r) {
3802 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3803 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3804 goto release_ras_con;
3805 }
3806
3807 amdgpu_fence_driver_hw_init(adev);
3808
3809 dev_info(adev->dev,
3810 "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
3811 adev->gfx.config.max_shader_engines,
3812 adev->gfx.config.max_sh_per_se,
3813 adev->gfx.config.max_cu_per_sh,
3814 adev->gfx.cu_info.number);
3815
3816 adev->accel_working = true;
3817
3818 amdgpu_vm_check_compute_bug(adev);
3819
3820 /* Initialize the buffer migration limit. */
3821 if (amdgpu_moverate >= 0)
3822 max_MBps = amdgpu_moverate;
3823 else
3824 max_MBps = 8; /* Allow 8 MB/s. */
3825 /* Get a log2 for easy divisions. */
3826 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3827
3828 r = amdgpu_atombios_sysfs_init(adev);
3829 if (r)
3830 drm_err(&adev->ddev,
3831 "registering atombios sysfs failed (%d).\n", r);
3832
3833 r = amdgpu_pm_sysfs_init(adev);
3834 if (r)
3835 DRM_ERROR("registering pm sysfs failed (%d).\n", r);
3836
3837 r = amdgpu_ucode_sysfs_init(adev);
3838 if (r) {
3839 adev->ucode_sysfs_en = false;
3840 DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3841 } else
3842 adev->ucode_sysfs_en = true;
3843
3844 /*
3845 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
3846 * Otherwise the mgpu fan boost feature will be skipped due to the
3847 * gpu instance is counted less.
3848 */
3849 amdgpu_register_gpu_instance(adev);
3850
3851 /* enable clockgating, etc. after ib tests, etc. since some blocks require
3852 * explicit gating rather than handling it automatically.
3853 */
3854 if (!adev->gmc.xgmi.pending_reset) {
3855 r = amdgpu_device_ip_late_init(adev);
3856 if (r) {
3857 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3858 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3859 goto release_ras_con;
3860 }
3861 /* must succeed. */
3862 amdgpu_ras_resume(adev);
3863 queue_delayed_work(system_wq, &adev->delayed_init_work,
3864 msecs_to_jiffies(AMDGPU_RESUME_MS));
3865 }
3866
3867 if (amdgpu_sriov_vf(adev)) {
3868 amdgpu_virt_release_full_gpu(adev, true);
3869 flush_delayed_work(&adev->delayed_init_work);
3870 }
3871
3872 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3873 if (r)
3874 dev_err(adev->dev, "Could not create amdgpu device attr\n");
3875
3876 amdgpu_fru_sysfs_init(adev);
3877
3878 if (IS_ENABLED(CONFIG_PERF_EVENTS))
3879 r = amdgpu_pmu_init(adev);
3880 if (r)
3881 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
3882
3883 /* Have stored pci confspace at hand for restore in sudden PCI error */
3884 if (amdgpu_device_cache_pci_state(adev->pdev))
3885 pci_restore_state(pdev);
3886
3887 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
3888 /* this will fail for cards that aren't VGA class devices, just
3889 * ignore it
3890 */
3891 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
3892 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
3893
3894 px = amdgpu_device_supports_px(ddev);
3895
3896 if (px || (!dev_is_removable(&adev->pdev->dev) &&
3897 apple_gmux_detect(NULL, NULL)))
3898 vga_switcheroo_register_client(adev->pdev,
3899 &amdgpu_switcheroo_ops, px);
3900
3901 if (px)
3902 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3903
3904 if (adev->gmc.xgmi.pending_reset)
3905 queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
3906 msecs_to_jiffies(AMDGPU_RESUME_MS));
3907
3908 amdgpu_device_check_iommu_direct_map(adev);
3909
3910 return 0;
3911
3912 release_ras_con:
3913 if (amdgpu_sriov_vf(adev))
3914 amdgpu_virt_release_full_gpu(adev, true);
3915
3916 /* failed in exclusive mode due to timeout */
3917 if (amdgpu_sriov_vf(adev) &&
3918 !amdgpu_sriov_runtime(adev) &&
3919 amdgpu_virt_mmio_blocked(adev) &&
3920 !amdgpu_virt_wait_reset(adev)) {
3921 dev_err(adev->dev, "VF exclusive mode timeout\n");
3922 /* Don't send request since VF is inactive. */
3923 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
3924 adev->virt.ops = NULL;
3925 r = -EAGAIN;
3926 }
3927 amdgpu_release_ras_context(adev);
3928
3929 failed:
3930 amdgpu_vf_error_trans_all(adev);
3931
3932 return r;
3933 }
3934
amdgpu_device_unmap_mmio(struct amdgpu_device * adev)3935 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
3936 {
3937
3938 /* Clear all CPU mappings pointing to this device */
3939 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
3940
3941 /* Unmap all mapped bars - Doorbell, registers and VRAM */
3942 amdgpu_doorbell_fini(adev);
3943
3944 iounmap(adev->rmmio);
3945 adev->rmmio = NULL;
3946 if (adev->mman.aper_base_kaddr)
3947 iounmap(adev->mman.aper_base_kaddr);
3948 adev->mman.aper_base_kaddr = NULL;
3949
3950 /* Memory manager related */
3951 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
3952 arch_phys_wc_del(adev->gmc.vram_mtrr);
3953 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
3954 }
3955 }
3956
3957 /**
3958 * amdgpu_device_fini_hw - tear down the driver
3959 *
3960 * @adev: amdgpu_device pointer
3961 *
3962 * Tear down the driver info (all asics).
3963 * Called at driver shutdown.
3964 */
amdgpu_device_fini_hw(struct amdgpu_device * adev)3965 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
3966 {
3967 dev_info(adev->dev, "amdgpu: finishing device.\n");
3968 flush_delayed_work(&adev->delayed_init_work);
3969 adev->shutdown = true;
3970
3971 /* make sure IB test finished before entering exclusive mode
3972 * to avoid preemption on IB test
3973 */
3974 if (amdgpu_sriov_vf(adev)) {
3975 amdgpu_virt_request_full_gpu(adev, false);
3976 amdgpu_virt_fini_data_exchange(adev);
3977 }
3978
3979 /* disable all interrupts */
3980 amdgpu_irq_disable_all(adev);
3981 if (adev->mode_info.mode_config_initialized) {
3982 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
3983 drm_helper_force_disable_all(adev_to_drm(adev));
3984 else
3985 drm_atomic_helper_shutdown(adev_to_drm(adev));
3986 }
3987 amdgpu_fence_driver_hw_fini(adev);
3988
3989 if (adev->mman.initialized)
3990 drain_workqueue(adev->mman.bdev.wq);
3991
3992 if (adev->pm.sysfs_initialized)
3993 amdgpu_pm_sysfs_fini(adev);
3994 if (adev->ucode_sysfs_en)
3995 amdgpu_ucode_sysfs_fini(adev);
3996 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3997 amdgpu_fru_sysfs_fini(adev);
3998
3999 /* disable ras feature must before hw fini */
4000 amdgpu_ras_pre_fini(adev);
4001
4002 amdgpu_device_ip_fini_early(adev);
4003
4004 amdgpu_irq_fini_hw(adev);
4005
4006 if (adev->mman.initialized)
4007 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4008
4009 amdgpu_gart_dummy_page_fini(adev);
4010
4011 if (drm_dev_is_unplugged(adev_to_drm(adev)))
4012 amdgpu_device_unmap_mmio(adev);
4013
4014 }
4015
amdgpu_device_fini_sw(struct amdgpu_device * adev)4016 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4017 {
4018 int idx;
4019 bool px;
4020
4021 amdgpu_fence_driver_sw_fini(adev);
4022 amdgpu_device_ip_fini(adev);
4023 amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4024 adev->accel_working = false;
4025 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4026
4027 amdgpu_reset_fini(adev);
4028
4029 /* free i2c buses */
4030 if (!amdgpu_device_has_dc_support(adev))
4031 amdgpu_i2c_fini(adev);
4032
4033 if (amdgpu_emu_mode != 1)
4034 amdgpu_atombios_fini(adev);
4035
4036 kfree(adev->bios);
4037 adev->bios = NULL;
4038
4039 px = amdgpu_device_supports_px(adev_to_drm(adev));
4040
4041 if (px || (!dev_is_removable(&adev->pdev->dev) &&
4042 apple_gmux_detect(NULL, NULL)))
4043 vga_switcheroo_unregister_client(adev->pdev);
4044
4045 if (px)
4046 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4047
4048 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4049 vga_client_unregister(adev->pdev);
4050
4051 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4052
4053 iounmap(adev->rmmio);
4054 adev->rmmio = NULL;
4055 amdgpu_doorbell_fini(adev);
4056 drm_dev_exit(idx);
4057 }
4058
4059 if (IS_ENABLED(CONFIG_PERF_EVENTS))
4060 amdgpu_pmu_fini(adev);
4061 if (adev->mman.discovery_bin)
4062 amdgpu_discovery_fini(adev);
4063
4064 amdgpu_reset_put_reset_domain(adev->reset_domain);
4065 adev->reset_domain = NULL;
4066
4067 kfree(adev->pci_state);
4068
4069 }
4070
4071 /**
4072 * amdgpu_device_evict_resources - evict device resources
4073 * @adev: amdgpu device object
4074 *
4075 * Evicts all ttm device resources(vram BOs, gart table) from the lru list
4076 * of the vram memory type. Mainly used for evicting device resources
4077 * at suspend time.
4078 *
4079 */
amdgpu_device_evict_resources(struct amdgpu_device * adev)4080 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4081 {
4082 int ret;
4083
4084 /* No need to evict vram on APUs for suspend to ram or s2idle */
4085 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4086 return 0;
4087
4088 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4089 if (ret)
4090 DRM_WARN("evicting device resources failed\n");
4091 return ret;
4092 }
4093
4094 /*
4095 * Suspend & resume.
4096 */
4097 /**
4098 * amdgpu_device_suspend - initiate device suspend
4099 *
4100 * @dev: drm dev pointer
4101 * @fbcon : notify the fbdev of suspend
4102 *
4103 * Puts the hw in the suspend state (all asics).
4104 * Returns 0 for success or an error on failure.
4105 * Called at driver suspend.
4106 */
amdgpu_device_suspend(struct drm_device * dev,bool fbcon)4107 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
4108 {
4109 struct amdgpu_device *adev = drm_to_adev(dev);
4110 int r = 0;
4111
4112 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4113 return 0;
4114
4115 adev->in_suspend = true;
4116
4117 /* Evict the majority of BOs before grabbing the full access */
4118 r = amdgpu_device_evict_resources(adev);
4119 if (r)
4120 return r;
4121
4122 if (amdgpu_sriov_vf(adev)) {
4123 amdgpu_virt_fini_data_exchange(adev);
4124 r = amdgpu_virt_request_full_gpu(adev, false);
4125 if (r)
4126 return r;
4127 }
4128
4129 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
4130 DRM_WARN("smart shift update failed\n");
4131
4132 if (fbcon)
4133 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4134
4135 cancel_delayed_work_sync(&adev->delayed_init_work);
4136
4137 amdgpu_ras_suspend(adev);
4138
4139 amdgpu_device_ip_suspend_phase1(adev);
4140
4141 if (!adev->in_s0ix)
4142 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4143
4144 r = amdgpu_device_evict_resources(adev);
4145 if (r)
4146 return r;
4147
4148 amdgpu_fence_driver_hw_fini(adev);
4149
4150 amdgpu_device_ip_suspend_phase2(adev);
4151
4152 if (amdgpu_sriov_vf(adev))
4153 amdgpu_virt_release_full_gpu(adev, false);
4154
4155 return 0;
4156 }
4157
4158 /**
4159 * amdgpu_device_resume - initiate device resume
4160 *
4161 * @dev: drm dev pointer
4162 * @fbcon : notify the fbdev of resume
4163 *
4164 * Bring the hw back to operating state (all asics).
4165 * Returns 0 for success or an error on failure.
4166 * Called at driver resume.
4167 */
amdgpu_device_resume(struct drm_device * dev,bool fbcon)4168 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
4169 {
4170 struct amdgpu_device *adev = drm_to_adev(dev);
4171 int r = 0;
4172
4173 if (amdgpu_sriov_vf(adev)) {
4174 r = amdgpu_virt_request_full_gpu(adev, true);
4175 if (r)
4176 return r;
4177 }
4178
4179 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
4180 return 0;
4181
4182 if (adev->in_s0ix)
4183 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4184
4185 /* post card */
4186 if (amdgpu_device_need_post(adev)) {
4187 r = amdgpu_device_asic_init(adev);
4188 if (r)
4189 dev_err(adev->dev, "amdgpu asic init failed\n");
4190 }
4191
4192 r = amdgpu_device_ip_resume(adev);
4193
4194 if (r) {
4195 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4196 goto exit;
4197 }
4198 amdgpu_fence_driver_hw_init(adev);
4199
4200 r = amdgpu_device_ip_late_init(adev);
4201 if (r)
4202 goto exit;
4203
4204 queue_delayed_work(system_wq, &adev->delayed_init_work,
4205 msecs_to_jiffies(AMDGPU_RESUME_MS));
4206
4207 if (!adev->in_s0ix) {
4208 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4209 if (r)
4210 goto exit;
4211 }
4212
4213 exit:
4214 if (amdgpu_sriov_vf(adev)) {
4215 amdgpu_virt_init_data_exchange(adev);
4216 amdgpu_virt_release_full_gpu(adev, true);
4217 }
4218
4219 if (r)
4220 return r;
4221
4222 /* Make sure IB tests flushed */
4223 flush_delayed_work(&adev->delayed_init_work);
4224
4225 if (fbcon)
4226 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4227
4228 amdgpu_ras_resume(adev);
4229
4230 if (adev->mode_info.num_crtc) {
4231 /*
4232 * Most of the connector probing functions try to acquire runtime pm
4233 * refs to ensure that the GPU is powered on when connector polling is
4234 * performed. Since we're calling this from a runtime PM callback,
4235 * trying to acquire rpm refs will cause us to deadlock.
4236 *
4237 * Since we're guaranteed to be holding the rpm lock, it's safe to
4238 * temporarily disable the rpm helpers so this doesn't deadlock us.
4239 */
4240 #ifdef CONFIG_PM
4241 dev->dev->power.disable_depth++;
4242 #endif
4243 if (!adev->dc_enabled)
4244 drm_helper_hpd_irq_event(dev);
4245 else
4246 drm_kms_helper_hotplug_event(dev);
4247 #ifdef CONFIG_PM
4248 dev->dev->power.disable_depth--;
4249 #endif
4250 }
4251 adev->in_suspend = false;
4252
4253 if (adev->enable_mes)
4254 amdgpu_mes_self_test(adev);
4255
4256 if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
4257 DRM_WARN("smart shift update failed\n");
4258
4259 return 0;
4260 }
4261
4262 /**
4263 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
4264 *
4265 * @adev: amdgpu_device pointer
4266 *
4267 * The list of all the hardware IPs that make up the asic is walked and
4268 * the check_soft_reset callbacks are run. check_soft_reset determines
4269 * if the asic is still hung or not.
4270 * Returns true if any of the IPs are still in a hung state, false if not.
4271 */
amdgpu_device_ip_check_soft_reset(struct amdgpu_device * adev)4272 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4273 {
4274 int i;
4275 bool asic_hang = false;
4276
4277 if (amdgpu_sriov_vf(adev))
4278 return true;
4279
4280 if (amdgpu_asic_need_full_reset(adev))
4281 return true;
4282
4283 for (i = 0; i < adev->num_ip_blocks; i++) {
4284 if (!adev->ip_blocks[i].status.valid)
4285 continue;
4286 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4287 adev->ip_blocks[i].status.hang =
4288 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4289 if (adev->ip_blocks[i].status.hang) {
4290 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4291 asic_hang = true;
4292 }
4293 }
4294 return asic_hang;
4295 }
4296
4297 /**
4298 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
4299 *
4300 * @adev: amdgpu_device pointer
4301 *
4302 * The list of all the hardware IPs that make up the asic is walked and the
4303 * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
4304 * handles any IP specific hardware or software state changes that are
4305 * necessary for a soft reset to succeed.
4306 * Returns 0 on success, negative error code on failure.
4307 */
amdgpu_device_ip_pre_soft_reset(struct amdgpu_device * adev)4308 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4309 {
4310 int i, r = 0;
4311
4312 for (i = 0; i < adev->num_ip_blocks; i++) {
4313 if (!adev->ip_blocks[i].status.valid)
4314 continue;
4315 if (adev->ip_blocks[i].status.hang &&
4316 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4317 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4318 if (r)
4319 return r;
4320 }
4321 }
4322
4323 return 0;
4324 }
4325
4326 /**
4327 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
4328 *
4329 * @adev: amdgpu_device pointer
4330 *
4331 * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
4332 * reset is necessary to recover.
4333 * Returns true if a full asic reset is required, false if not.
4334 */
amdgpu_device_ip_need_full_reset(struct amdgpu_device * adev)4335 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4336 {
4337 int i;
4338
4339 if (amdgpu_asic_need_full_reset(adev))
4340 return true;
4341
4342 for (i = 0; i < adev->num_ip_blocks; i++) {
4343 if (!adev->ip_blocks[i].status.valid)
4344 continue;
4345 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4346 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4347 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4348 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4349 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4350 if (adev->ip_blocks[i].status.hang) {
4351 dev_info(adev->dev, "Some block need full reset!\n");
4352 return true;
4353 }
4354 }
4355 }
4356 return false;
4357 }
4358
4359 /**
4360 * amdgpu_device_ip_soft_reset - do a soft reset
4361 *
4362 * @adev: amdgpu_device pointer
4363 *
4364 * The list of all the hardware IPs that make up the asic is walked and the
4365 * soft_reset callbacks are run if the block is hung. soft_reset handles any
4366 * IP specific hardware or software state changes that are necessary to soft
4367 * reset the IP.
4368 * Returns 0 on success, negative error code on failure.
4369 */
amdgpu_device_ip_soft_reset(struct amdgpu_device * adev)4370 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4371 {
4372 int i, r = 0;
4373
4374 for (i = 0; i < adev->num_ip_blocks; i++) {
4375 if (!adev->ip_blocks[i].status.valid)
4376 continue;
4377 if (adev->ip_blocks[i].status.hang &&
4378 adev->ip_blocks[i].version->funcs->soft_reset) {
4379 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4380 if (r)
4381 return r;
4382 }
4383 }
4384
4385 return 0;
4386 }
4387
4388 /**
4389 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
4390 *
4391 * @adev: amdgpu_device pointer
4392 *
4393 * The list of all the hardware IPs that make up the asic is walked and the
4394 * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
4395 * handles any IP specific hardware or software state changes that are
4396 * necessary after the IP has been soft reset.
4397 * Returns 0 on success, negative error code on failure.
4398 */
amdgpu_device_ip_post_soft_reset(struct amdgpu_device * adev)4399 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4400 {
4401 int i, r = 0;
4402
4403 for (i = 0; i < adev->num_ip_blocks; i++) {
4404 if (!adev->ip_blocks[i].status.valid)
4405 continue;
4406 if (adev->ip_blocks[i].status.hang &&
4407 adev->ip_blocks[i].version->funcs->post_soft_reset)
4408 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4409 if (r)
4410 return r;
4411 }
4412
4413 return 0;
4414 }
4415
4416 /**
4417 * amdgpu_device_recover_vram - Recover some VRAM contents
4418 *
4419 * @adev: amdgpu_device pointer
4420 *
4421 * Restores the contents of VRAM buffers from the shadows in GTT. Used to
4422 * restore things like GPUVM page tables after a GPU reset where
4423 * the contents of VRAM might be lost.
4424 *
4425 * Returns:
4426 * 0 on success, negative error code on failure.
4427 */
amdgpu_device_recover_vram(struct amdgpu_device * adev)4428 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4429 {
4430 struct dma_fence *fence = NULL, *next = NULL;
4431 struct amdgpu_bo *shadow;
4432 struct amdgpu_bo_vm *vmbo;
4433 long r = 1, tmo;
4434
4435 if (amdgpu_sriov_runtime(adev))
4436 tmo = msecs_to_jiffies(8000);
4437 else
4438 tmo = msecs_to_jiffies(100);
4439
4440 dev_info(adev->dev, "recover vram bo from shadow start\n");
4441 mutex_lock(&adev->shadow_list_lock);
4442 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4443 /* If vm is compute context or adev is APU, shadow will be NULL */
4444 if (!vmbo->shadow)
4445 continue;
4446 shadow = vmbo->shadow;
4447
4448 /* No need to recover an evicted BO */
4449 if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
4450 shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
4451 shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
4452 continue;
4453
4454 r = amdgpu_bo_restore_shadow(shadow, &next);
4455 if (r)
4456 break;
4457
4458 if (fence) {
4459 tmo = dma_fence_wait_timeout(fence, false, tmo);
4460 dma_fence_put(fence);
4461 fence = next;
4462 if (tmo == 0) {
4463 r = -ETIMEDOUT;
4464 break;
4465 } else if (tmo < 0) {
4466 r = tmo;
4467 break;
4468 }
4469 } else {
4470 fence = next;
4471 }
4472 }
4473 mutex_unlock(&adev->shadow_list_lock);
4474
4475 if (fence)
4476 tmo = dma_fence_wait_timeout(fence, false, tmo);
4477 dma_fence_put(fence);
4478
4479 if (r < 0 || tmo <= 0) {
4480 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4481 return -EIO;
4482 }
4483
4484 dev_info(adev->dev, "recover vram bo from shadow done\n");
4485 return 0;
4486 }
4487
4488
4489 /**
4490 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4491 *
4492 * @adev: amdgpu_device pointer
4493 * @from_hypervisor: request from hypervisor
4494 *
4495 * do VF FLR and reinitialize Asic
4496 * return 0 means succeeded otherwise failed
4497 */
amdgpu_device_reset_sriov(struct amdgpu_device * adev,bool from_hypervisor)4498 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4499 bool from_hypervisor)
4500 {
4501 int r;
4502 struct amdgpu_hive_info *hive = NULL;
4503 int retry_limit = 0;
4504
4505 retry:
4506 amdgpu_amdkfd_pre_reset(adev);
4507
4508 if (from_hypervisor)
4509 r = amdgpu_virt_request_full_gpu(adev, true);
4510 else
4511 r = amdgpu_virt_reset_gpu(adev);
4512 if (r)
4513 return r;
4514 amdgpu_irq_gpu_reset_resume_helper(adev);
4515
4516 /* some sw clean up VF needs to do before recover */
4517 amdgpu_virt_post_reset(adev);
4518
4519 /* Resume IP prior to SMC */
4520 r = amdgpu_device_ip_reinit_early_sriov(adev);
4521 if (r)
4522 goto error;
4523
4524 amdgpu_virt_init_data_exchange(adev);
4525
4526 r = amdgpu_device_fw_loading(adev);
4527 if (r)
4528 return r;
4529
4530 /* now we are okay to resume SMC/CP/SDMA */
4531 r = amdgpu_device_ip_reinit_late_sriov(adev);
4532 if (r)
4533 goto error;
4534
4535 hive = amdgpu_get_xgmi_hive(adev);
4536 /* Update PSP FW topology after reset */
4537 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4538 r = amdgpu_xgmi_update_topology(hive, adev);
4539
4540 if (hive)
4541 amdgpu_put_xgmi_hive(hive);
4542
4543 if (!r) {
4544 r = amdgpu_ib_ring_tests(adev);
4545
4546 amdgpu_amdkfd_post_reset(adev);
4547 }
4548
4549 error:
4550 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4551 amdgpu_inc_vram_lost(adev);
4552 r = amdgpu_device_recover_vram(adev);
4553 }
4554 amdgpu_virt_release_full_gpu(adev, true);
4555
4556 if (AMDGPU_RETRY_SRIOV_RESET(r)) {
4557 if (retry_limit < AMDGPU_MAX_RETRY_LIMIT) {
4558 retry_limit++;
4559 goto retry;
4560 } else
4561 DRM_ERROR("GPU reset retry is beyond the retry limit\n");
4562 }
4563
4564 return r;
4565 }
4566
4567 /**
4568 * amdgpu_device_has_job_running - check if there is any job in mirror list
4569 *
4570 * @adev: amdgpu_device pointer
4571 *
4572 * check if there is any job in mirror list
4573 */
amdgpu_device_has_job_running(struct amdgpu_device * adev)4574 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4575 {
4576 int i;
4577 struct drm_sched_job *job;
4578
4579 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4580 struct amdgpu_ring *ring = adev->rings[i];
4581
4582 if (!ring || !ring->sched.thread)
4583 continue;
4584
4585 spin_lock(&ring->sched.job_list_lock);
4586 job = list_first_entry_or_null(&ring->sched.pending_list,
4587 struct drm_sched_job, list);
4588 spin_unlock(&ring->sched.job_list_lock);
4589 if (job)
4590 return true;
4591 }
4592 return false;
4593 }
4594
4595 /**
4596 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
4597 *
4598 * @adev: amdgpu_device pointer
4599 *
4600 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
4601 * a hung GPU.
4602 */
amdgpu_device_should_recover_gpu(struct amdgpu_device * adev)4603 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4604 {
4605
4606 if (amdgpu_gpu_recovery == 0)
4607 goto disabled;
4608
4609 /* Skip soft reset check in fatal error mode */
4610 if (!amdgpu_ras_is_poison_mode_supported(adev))
4611 return true;
4612
4613 if (amdgpu_sriov_vf(adev))
4614 return true;
4615
4616 if (amdgpu_gpu_recovery == -1) {
4617 switch (adev->asic_type) {
4618 #ifdef CONFIG_DRM_AMDGPU_SI
4619 case CHIP_VERDE:
4620 case CHIP_TAHITI:
4621 case CHIP_PITCAIRN:
4622 case CHIP_OLAND:
4623 case CHIP_HAINAN:
4624 #endif
4625 #ifdef CONFIG_DRM_AMDGPU_CIK
4626 case CHIP_KAVERI:
4627 case CHIP_KABINI:
4628 case CHIP_MULLINS:
4629 #endif
4630 case CHIP_CARRIZO:
4631 case CHIP_STONEY:
4632 case CHIP_CYAN_SKILLFISH:
4633 goto disabled;
4634 default:
4635 break;
4636 }
4637 }
4638
4639 return true;
4640
4641 disabled:
4642 dev_info(adev->dev, "GPU recovery disabled.\n");
4643 return false;
4644 }
4645
amdgpu_device_mode1_reset(struct amdgpu_device * adev)4646 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4647 {
4648 u32 i;
4649 int ret = 0;
4650
4651 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4652
4653 dev_info(adev->dev, "GPU mode1 reset\n");
4654
4655 /* disable BM */
4656 pci_clear_master(adev->pdev);
4657
4658 amdgpu_device_cache_pci_state(adev->pdev);
4659
4660 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4661 dev_info(adev->dev, "GPU smu mode1 reset\n");
4662 ret = amdgpu_dpm_mode1_reset(adev);
4663 } else {
4664 dev_info(adev->dev, "GPU psp mode1 reset\n");
4665 ret = psp_gpu_reset(adev);
4666 }
4667
4668 if (ret)
4669 goto mode1_reset_failed;
4670
4671 amdgpu_device_load_pci_state(adev->pdev);
4672 ret = amdgpu_psp_wait_for_bootloader(adev);
4673 if (ret)
4674 goto mode1_reset_failed;
4675
4676 /* wait for asic to come out of reset */
4677 for (i = 0; i < adev->usec_timeout; i++) {
4678 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4679
4680 if (memsize != 0xffffffff)
4681 break;
4682 udelay(1);
4683 }
4684
4685 if (i >= adev->usec_timeout) {
4686 ret = -ETIMEDOUT;
4687 goto mode1_reset_failed;
4688 }
4689
4690 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4691
4692 return 0;
4693
4694 mode1_reset_failed:
4695 dev_err(adev->dev, "GPU mode1 reset failed\n");
4696 return ret;
4697 }
4698
amdgpu_device_pre_asic_reset(struct amdgpu_device * adev,struct amdgpu_reset_context * reset_context)4699 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4700 struct amdgpu_reset_context *reset_context)
4701 {
4702 int i, r = 0;
4703 struct amdgpu_job *job = NULL;
4704 bool need_full_reset =
4705 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4706
4707 if (reset_context->reset_req_dev == adev)
4708 job = reset_context->job;
4709
4710 if (amdgpu_sriov_vf(adev)) {
4711 /* stop the data exchange thread */
4712 amdgpu_virt_fini_data_exchange(adev);
4713 }
4714
4715 amdgpu_fence_driver_isr_toggle(adev, true);
4716
4717 /* block all schedulers and reset given job's ring */
4718 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
4719 struct amdgpu_ring *ring = adev->rings[i];
4720
4721 if (!ring || !ring->sched.thread)
4722 continue;
4723
4724 /* Clear job fence from fence drv to avoid force_completion
4725 * leave NULL and vm flush fence in fence drv
4726 */
4727 amdgpu_fence_driver_clear_job_fences(ring);
4728
4729 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
4730 amdgpu_fence_driver_force_completion(ring);
4731 }
4732
4733 amdgpu_fence_driver_isr_toggle(adev, false);
4734
4735 if (job && job->vm)
4736 drm_sched_increase_karma(&job->base);
4737
4738 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4739 /* If reset handler not implemented, continue; otherwise return */
4740 if (r == -EOPNOTSUPP)
4741 r = 0;
4742 else
4743 return r;
4744
4745 /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4746 if (!amdgpu_sriov_vf(adev)) {
4747
4748 if (!need_full_reset)
4749 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4750
4751 if (!need_full_reset && amdgpu_gpu_recovery &&
4752 amdgpu_device_ip_check_soft_reset(adev)) {
4753 amdgpu_device_ip_pre_soft_reset(adev);
4754 r = amdgpu_device_ip_soft_reset(adev);
4755 amdgpu_device_ip_post_soft_reset(adev);
4756 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4757 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4758 need_full_reset = true;
4759 }
4760 }
4761
4762 if (need_full_reset)
4763 r = amdgpu_device_ip_suspend(adev);
4764 if (need_full_reset)
4765 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4766 else
4767 clear_bit(AMDGPU_NEED_FULL_RESET,
4768 &reset_context->flags);
4769 }
4770
4771 return r;
4772 }
4773
amdgpu_reset_reg_dumps(struct amdgpu_device * adev)4774 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4775 {
4776 int i;
4777
4778 lockdep_assert_held(&adev->reset_domain->sem);
4779
4780 for (i = 0; i < adev->num_regs; i++) {
4781 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4782 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4783 adev->reset_dump_reg_value[i]);
4784 }
4785
4786 return 0;
4787 }
4788
4789 #ifdef CONFIG_DEV_COREDUMP
amdgpu_devcoredump_read(char * buffer,loff_t offset,size_t count,void * data,size_t datalen)4790 static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset,
4791 size_t count, void *data, size_t datalen)
4792 {
4793 struct drm_printer p;
4794 struct amdgpu_device *adev = data;
4795 struct drm_print_iterator iter;
4796 int i;
4797
4798 iter.data = buffer;
4799 iter.offset = 0;
4800 iter.start = offset;
4801 iter.remain = count;
4802
4803 p = drm_coredump_printer(&iter);
4804
4805 drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
4806 drm_printf(&p, "kernel: " UTS_RELEASE "\n");
4807 drm_printf(&p, "module: " KBUILD_MODNAME "\n");
4808 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
4809 if (adev->reset_task_info.pid)
4810 drm_printf(&p, "process_name: %s PID: %d\n",
4811 adev->reset_task_info.process_name,
4812 adev->reset_task_info.pid);
4813
4814 if (adev->reset_vram_lost)
4815 drm_printf(&p, "VRAM is lost due to GPU reset!\n");
4816 if (adev->num_regs) {
4817 drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n");
4818
4819 for (i = 0; i < adev->num_regs; i++)
4820 drm_printf(&p, "0x%08x: 0x%08x\n",
4821 adev->reset_dump_reg_list[i],
4822 adev->reset_dump_reg_value[i]);
4823 }
4824
4825 return count - iter.remain;
4826 }
4827
amdgpu_devcoredump_free(void * data)4828 static void amdgpu_devcoredump_free(void *data)
4829 {
4830 }
4831
amdgpu_reset_capture_coredumpm(struct amdgpu_device * adev)4832 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
4833 {
4834 struct drm_device *dev = adev_to_drm(adev);
4835
4836 ktime_get_ts64(&adev->reset_time);
4837 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_NOWAIT,
4838 amdgpu_devcoredump_read, amdgpu_devcoredump_free);
4839 }
4840 #endif
4841
amdgpu_do_asic_reset(struct list_head * device_list_handle,struct amdgpu_reset_context * reset_context)4842 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
4843 struct amdgpu_reset_context *reset_context)
4844 {
4845 struct amdgpu_device *tmp_adev = NULL;
4846 bool need_full_reset, skip_hw_reset, vram_lost = false;
4847 int r = 0;
4848 bool gpu_reset_for_dev_remove = 0;
4849
4850 /* Try reset handler method first */
4851 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
4852 reset_list);
4853 amdgpu_reset_reg_dumps(tmp_adev);
4854
4855 reset_context->reset_device_list = device_list_handle;
4856 r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
4857 /* If reset handler not implemented, continue; otherwise return */
4858 if (r == -EOPNOTSUPP)
4859 r = 0;
4860 else
4861 return r;
4862
4863 /* Reset handler not implemented, use the default method */
4864 need_full_reset =
4865 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4866 skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
4867
4868 gpu_reset_for_dev_remove =
4869 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
4870 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
4871
4872 /*
4873 * ASIC reset has to be done on all XGMI hive nodes ASAP
4874 * to allow proper links negotiation in FW (within 1 sec)
4875 */
4876 if (!skip_hw_reset && need_full_reset) {
4877 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4878 /* For XGMI run all resets in parallel to speed up the process */
4879 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4880 tmp_adev->gmc.xgmi.pending_reset = false;
4881 if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4882 r = -EALREADY;
4883 } else
4884 r = amdgpu_asic_reset(tmp_adev);
4885
4886 if (r) {
4887 dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4888 r, adev_to_drm(tmp_adev)->unique);
4889 break;
4890 }
4891 }
4892
4893 /* For XGMI wait for all resets to complete before proceed */
4894 if (!r) {
4895 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4896 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4897 flush_work(&tmp_adev->xgmi_reset_work);
4898 r = tmp_adev->asic_reset_res;
4899 if (r)
4900 break;
4901 }
4902 }
4903 }
4904 }
4905
4906 if (!r && amdgpu_ras_intr_triggered()) {
4907 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4908 if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops &&
4909 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
4910 tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev);
4911 }
4912
4913 amdgpu_ras_intr_cleared();
4914 }
4915
4916 /* Since the mode1 reset affects base ip blocks, the
4917 * phase1 ip blocks need to be resumed. Otherwise there
4918 * will be a BIOS signature error and the psp bootloader
4919 * can't load kdb on the next amdgpu install.
4920 */
4921 if (gpu_reset_for_dev_remove) {
4922 list_for_each_entry(tmp_adev, device_list_handle, reset_list)
4923 amdgpu_device_ip_resume_phase1(tmp_adev);
4924
4925 goto end;
4926 }
4927
4928 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4929 if (need_full_reset) {
4930 /* post card */
4931 r = amdgpu_device_asic_init(tmp_adev);
4932 if (r) {
4933 dev_warn(tmp_adev->dev, "asic atom init failed!");
4934 } else {
4935 dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
4936
4937 r = amdgpu_device_ip_resume_phase1(tmp_adev);
4938 if (r)
4939 goto out;
4940
4941 vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
4942 #ifdef CONFIG_DEV_COREDUMP
4943 tmp_adev->reset_vram_lost = vram_lost;
4944 memset(&tmp_adev->reset_task_info, 0,
4945 sizeof(tmp_adev->reset_task_info));
4946 if (reset_context->job && reset_context->job->vm)
4947 tmp_adev->reset_task_info =
4948 reset_context->job->vm->task_info;
4949 amdgpu_reset_capture_coredumpm(tmp_adev);
4950 #endif
4951 if (vram_lost) {
4952 DRM_INFO("VRAM is lost due to GPU reset!\n");
4953 amdgpu_inc_vram_lost(tmp_adev);
4954 }
4955
4956 r = amdgpu_device_fw_loading(tmp_adev);
4957 if (r)
4958 return r;
4959
4960 r = amdgpu_device_ip_resume_phase2(tmp_adev);
4961 if (r)
4962 goto out;
4963
4964 if (vram_lost)
4965 amdgpu_device_fill_reset_magic(tmp_adev);
4966
4967 /*
4968 * Add this ASIC as tracked as reset was already
4969 * complete successfully.
4970 */
4971 amdgpu_register_gpu_instance(tmp_adev);
4972
4973 if (!reset_context->hive &&
4974 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
4975 amdgpu_xgmi_add_device(tmp_adev);
4976
4977 r = amdgpu_device_ip_late_init(tmp_adev);
4978 if (r)
4979 goto out;
4980
4981 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
4982
4983 /*
4984 * The GPU enters bad state once faulty pages
4985 * by ECC has reached the threshold, and ras
4986 * recovery is scheduled next. So add one check
4987 * here to break recovery if it indeed exceeds
4988 * bad page threshold, and remind user to
4989 * retire this GPU or setting one bigger
4990 * bad_page_threshold value to fix this once
4991 * probing driver again.
4992 */
4993 if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4994 /* must succeed. */
4995 amdgpu_ras_resume(tmp_adev);
4996 } else {
4997 r = -EINVAL;
4998 goto out;
4999 }
5000
5001 /* Update PSP FW topology after reset */
5002 if (reset_context->hive &&
5003 tmp_adev->gmc.xgmi.num_physical_nodes > 1)
5004 r = amdgpu_xgmi_update_topology(
5005 reset_context->hive, tmp_adev);
5006 }
5007 }
5008
5009 out:
5010 if (!r) {
5011 amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
5012 r = amdgpu_ib_ring_tests(tmp_adev);
5013 if (r) {
5014 dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
5015 need_full_reset = true;
5016 r = -EAGAIN;
5017 goto end;
5018 }
5019 }
5020
5021 if (!r)
5022 r = amdgpu_device_recover_vram(tmp_adev);
5023 else
5024 tmp_adev->asic_reset_res = r;
5025 }
5026
5027 end:
5028 if (need_full_reset)
5029 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5030 else
5031 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5032 return r;
5033 }
5034
amdgpu_device_set_mp1_state(struct amdgpu_device * adev)5035 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5036 {
5037
5038 switch (amdgpu_asic_reset_method(adev)) {
5039 case AMD_RESET_METHOD_MODE1:
5040 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5041 break;
5042 case AMD_RESET_METHOD_MODE2:
5043 adev->mp1_state = PP_MP1_STATE_RESET;
5044 break;
5045 default:
5046 adev->mp1_state = PP_MP1_STATE_NONE;
5047 break;
5048 }
5049 }
5050
amdgpu_device_unset_mp1_state(struct amdgpu_device * adev)5051 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5052 {
5053 amdgpu_vf_error_trans_all(adev);
5054 adev->mp1_state = PP_MP1_STATE_NONE;
5055 }
5056
amdgpu_device_resume_display_audio(struct amdgpu_device * adev)5057 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5058 {
5059 struct pci_dev *p = NULL;
5060
5061 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5062 adev->pdev->bus->number, 1);
5063 if (p) {
5064 pm_runtime_enable(&(p->dev));
5065 pm_runtime_resume(&(p->dev));
5066 }
5067
5068 pci_dev_put(p);
5069 }
5070
amdgpu_device_suspend_display_audio(struct amdgpu_device * adev)5071 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5072 {
5073 enum amd_reset_method reset_method;
5074 struct pci_dev *p = NULL;
5075 u64 expires;
5076
5077 /*
5078 * For now, only BACO and mode1 reset are confirmed
5079 * to suffer the audio issue without proper suspended.
5080 */
5081 reset_method = amdgpu_asic_reset_method(adev);
5082 if ((reset_method != AMD_RESET_METHOD_BACO) &&
5083 (reset_method != AMD_RESET_METHOD_MODE1))
5084 return -EINVAL;
5085
5086 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5087 adev->pdev->bus->number, 1);
5088 if (!p)
5089 return -ENODEV;
5090
5091 expires = pm_runtime_autosuspend_expiration(&(p->dev));
5092 if (!expires)
5093 /*
5094 * If we cannot get the audio device autosuspend delay,
5095 * a fixed 4S interval will be used. Considering 3S is
5096 * the audio controller default autosuspend delay setting.
5097 * 4S used here is guaranteed to cover that.
5098 */
5099 expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
5100
5101 while (!pm_runtime_status_suspended(&(p->dev))) {
5102 if (!pm_runtime_suspend(&(p->dev)))
5103 break;
5104
5105 if (expires < ktime_get_mono_fast_ns()) {
5106 dev_warn(adev->dev, "failed to suspend display audio\n");
5107 pci_dev_put(p);
5108 /* TODO: abort the succeeding gpu reset? */
5109 return -ETIMEDOUT;
5110 }
5111 }
5112
5113 pm_runtime_disable(&(p->dev));
5114
5115 pci_dev_put(p);
5116 return 0;
5117 }
5118
amdgpu_device_stop_pending_resets(struct amdgpu_device * adev)5119 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5120 {
5121 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5122
5123 #if defined(CONFIG_DEBUG_FS)
5124 if (!amdgpu_sriov_vf(adev))
5125 cancel_work(&adev->reset_work);
5126 #endif
5127
5128 if (adev->kfd.dev)
5129 cancel_work(&adev->kfd.reset_work);
5130
5131 if (amdgpu_sriov_vf(adev))
5132 cancel_work(&adev->virt.flr_work);
5133
5134 if (con && adev->ras_enabled)
5135 cancel_work(&con->recovery_work);
5136
5137 }
5138
5139 /**
5140 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
5141 *
5142 * @adev: amdgpu_device pointer
5143 * @job: which job trigger hang
5144 * @reset_context: amdgpu reset context pointer
5145 *
5146 * Attempt to reset the GPU if it has hung (all asics).
5147 * Attempt to do soft-reset or full-reset and reinitialize Asic
5148 * Returns 0 for success or an error on failure.
5149 */
5150
amdgpu_device_gpu_recover(struct amdgpu_device * adev,struct amdgpu_job * job,struct amdgpu_reset_context * reset_context)5151 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5152 struct amdgpu_job *job,
5153 struct amdgpu_reset_context *reset_context)
5154 {
5155 struct list_head device_list, *device_list_handle = NULL;
5156 bool job_signaled = false;
5157 struct amdgpu_hive_info *hive = NULL;
5158 struct amdgpu_device *tmp_adev = NULL;
5159 int i, r = 0;
5160 bool need_emergency_restart = false;
5161 bool audio_suspended = false;
5162 bool gpu_reset_for_dev_remove = false;
5163
5164 gpu_reset_for_dev_remove =
5165 test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) &&
5166 test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
5167
5168 /*
5169 * Special case: RAS triggered and full reset isn't supported
5170 */
5171 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5172
5173 /*
5174 * Flush RAM to disk so that after reboot
5175 * the user can read log and see why the system rebooted.
5176 */
5177 if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5178 amdgpu_ras_get_context(adev)->reboot) {
5179 DRM_WARN("Emergency reboot.");
5180
5181 ksys_sync_helper();
5182 emergency_restart();
5183 }
5184
5185 dev_info(adev->dev, "GPU %s begin!\n",
5186 need_emergency_restart ? "jobs stop":"reset");
5187
5188 if (!amdgpu_sriov_vf(adev))
5189 hive = amdgpu_get_xgmi_hive(adev);
5190 if (hive)
5191 mutex_lock(&hive->hive_lock);
5192
5193 reset_context->job = job;
5194 reset_context->hive = hive;
5195 /*
5196 * Build list of devices to reset.
5197 * In case we are in XGMI hive mode, resort the device list
5198 * to put adev in the 1st position.
5199 */
5200 INIT_LIST_HEAD(&device_list);
5201 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) {
5202 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
5203 list_add_tail(&tmp_adev->reset_list, &device_list);
5204 if (gpu_reset_for_dev_remove && adev->shutdown)
5205 tmp_adev->shutdown = true;
5206 }
5207 if (!list_is_first(&adev->reset_list, &device_list))
5208 list_rotate_to_front(&adev->reset_list, &device_list);
5209 device_list_handle = &device_list;
5210 } else {
5211 list_add_tail(&adev->reset_list, &device_list);
5212 device_list_handle = &device_list;
5213 }
5214
5215 /* We need to lock reset domain only once both for XGMI and single device */
5216 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5217 reset_list);
5218 amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
5219
5220 /* block all schedulers and reset given job's ring */
5221 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5222
5223 amdgpu_device_set_mp1_state(tmp_adev);
5224
5225 /*
5226 * Try to put the audio codec into suspend state
5227 * before gpu reset started.
5228 *
5229 * Due to the power domain of the graphics device
5230 * is shared with AZ power domain. Without this,
5231 * we may change the audio hardware from behind
5232 * the audio driver's back. That will trigger
5233 * some audio codec errors.
5234 */
5235 if (!amdgpu_device_suspend_display_audio(tmp_adev))
5236 audio_suspended = true;
5237
5238 amdgpu_ras_set_error_query_ready(tmp_adev, false);
5239
5240 cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
5241
5242 if (!amdgpu_sriov_vf(tmp_adev))
5243 amdgpu_amdkfd_pre_reset(tmp_adev);
5244
5245 /*
5246 * Mark these ASICs to be reseted as untracked first
5247 * And add them back after reset completed
5248 */
5249 amdgpu_unregister_gpu_instance(tmp_adev);
5250
5251 drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
5252
5253 /* disable ras on ALL IPs */
5254 if (!need_emergency_restart &&
5255 amdgpu_device_ip_need_full_reset(tmp_adev))
5256 amdgpu_ras_suspend(tmp_adev);
5257
5258 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5259 struct amdgpu_ring *ring = tmp_adev->rings[i];
5260
5261 if (!ring || !ring->sched.thread)
5262 continue;
5263
5264 drm_sched_stop(&ring->sched, job ? &job->base : NULL);
5265
5266 if (need_emergency_restart)
5267 amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
5268 }
5269 atomic_inc(&tmp_adev->gpu_reset_counter);
5270 }
5271
5272 if (need_emergency_restart)
5273 goto skip_sched_resume;
5274
5275 /*
5276 * Must check guilty signal here since after this point all old
5277 * HW fences are force signaled.
5278 *
5279 * job->base holds a reference to parent fence
5280 */
5281 if (job && dma_fence_is_signaled(&job->hw_fence)) {
5282 job_signaled = true;
5283 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5284 goto skip_hw_reset;
5285 }
5286
5287 retry: /* Rest of adevs pre asic reset from XGMI hive. */
5288 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5289 if (gpu_reset_for_dev_remove) {
5290 /* Workaroud for ASICs need to disable SMC first */
5291 amdgpu_device_smu_fini_early(tmp_adev);
5292 }
5293 r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
5294 /*TODO Should we stop ?*/
5295 if (r) {
5296 dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
5297 r, adev_to_drm(tmp_adev)->unique);
5298 tmp_adev->asic_reset_res = r;
5299 }
5300
5301 /*
5302 * Drop all pending non scheduler resets. Scheduler resets
5303 * were already dropped during drm_sched_stop
5304 */
5305 amdgpu_device_stop_pending_resets(tmp_adev);
5306 }
5307
5308 /* Actual ASIC resets if needed.*/
5309 /* Host driver will handle XGMI hive reset for SRIOV */
5310 if (amdgpu_sriov_vf(adev)) {
5311 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5312 if (r)
5313 adev->asic_reset_res = r;
5314
5315 /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
5316 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
5317 adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3))
5318 amdgpu_ras_resume(adev);
5319 } else {
5320 r = amdgpu_do_asic_reset(device_list_handle, reset_context);
5321 if (r && r == -EAGAIN)
5322 goto retry;
5323
5324 if (!r && gpu_reset_for_dev_remove)
5325 goto recover_end;
5326 }
5327
5328 skip_hw_reset:
5329
5330 /* Post ASIC reset for all devs .*/
5331 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5332
5333 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5334 struct amdgpu_ring *ring = tmp_adev->rings[i];
5335
5336 if (!ring || !ring->sched.thread)
5337 continue;
5338
5339 drm_sched_start(&ring->sched, true);
5340 }
5341
5342 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5343 amdgpu_mes_self_test(tmp_adev);
5344
5345 if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
5346 drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
5347
5348 if (tmp_adev->asic_reset_res)
5349 r = tmp_adev->asic_reset_res;
5350
5351 tmp_adev->asic_reset_res = 0;
5352
5353 if (r) {
5354 /* bad news, how to tell it to userspace ? */
5355 dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
5356 amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
5357 } else {
5358 dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
5359 if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
5360 DRM_WARN("smart shift update failed\n");
5361 }
5362 }
5363
5364 skip_sched_resume:
5365 list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
5366 /* unlock kfd: SRIOV would do it separately */
5367 if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
5368 amdgpu_amdkfd_post_reset(tmp_adev);
5369
5370 /* kfd_post_reset will do nothing if kfd device is not initialized,
5371 * need to bring up kfd here if it's not be initialized before
5372 */
5373 if (!adev->kfd.init_complete)
5374 amdgpu_amdkfd_device_init(adev);
5375
5376 if (audio_suspended)
5377 amdgpu_device_resume_display_audio(tmp_adev);
5378
5379 amdgpu_device_unset_mp1_state(tmp_adev);
5380
5381 amdgpu_ras_set_error_query_ready(tmp_adev, true);
5382 }
5383
5384 recover_end:
5385 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
5386 reset_list);
5387 amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
5388
5389 if (hive) {
5390 mutex_unlock(&hive->hive_lock);
5391 amdgpu_put_xgmi_hive(hive);
5392 }
5393
5394 if (r)
5395 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5396
5397 atomic_set(&adev->reset_domain->reset_res, r);
5398 return r;
5399 }
5400
5401 /**
5402 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
5403 *
5404 * @adev: amdgpu_device pointer
5405 *
5406 * Fetchs and stores in the driver the PCIE capabilities (gen speed
5407 * and lanes) of the slot the device is in. Handles APUs and
5408 * virtualized environments where PCIE config space may not be available.
5409 */
amdgpu_device_get_pcie_info(struct amdgpu_device * adev)5410 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5411 {
5412 struct pci_dev *pdev;
5413 enum pci_bus_speed speed_cap, platform_speed_cap;
5414 enum pcie_link_width platform_link_width;
5415
5416 if (amdgpu_pcie_gen_cap)
5417 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5418
5419 if (amdgpu_pcie_lane_cap)
5420 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5421
5422 /* covers APUs as well */
5423 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5424 if (adev->pm.pcie_gen_mask == 0)
5425 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5426 if (adev->pm.pcie_mlw_mask == 0)
5427 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5428 return;
5429 }
5430
5431 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5432 return;
5433
5434 pcie_bandwidth_available(adev->pdev, NULL,
5435 &platform_speed_cap, &platform_link_width);
5436
5437 if (adev->pm.pcie_gen_mask == 0) {
5438 /* asic caps */
5439 pdev = adev->pdev;
5440 speed_cap = pcie_get_speed_cap(pdev);
5441 if (speed_cap == PCI_SPEED_UNKNOWN) {
5442 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5443 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5444 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5445 } else {
5446 if (speed_cap == PCIE_SPEED_32_0GT)
5447 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5448 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5449 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5450 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5451 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
5452 else if (speed_cap == PCIE_SPEED_16_0GT)
5453 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5454 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5455 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5456 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
5457 else if (speed_cap == PCIE_SPEED_8_0GT)
5458 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5459 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5460 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
5461 else if (speed_cap == PCIE_SPEED_5_0GT)
5462 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5463 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
5464 else
5465 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5466 }
5467 /* platform caps */
5468 if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
5469 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5470 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5471 } else {
5472 if (platform_speed_cap == PCIE_SPEED_32_0GT)
5473 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5474 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5475 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5476 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
5477 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
5478 else if (platform_speed_cap == PCIE_SPEED_16_0GT)
5479 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5480 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5481 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
5482 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
5483 else if (platform_speed_cap == PCIE_SPEED_8_0GT)
5484 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5485 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
5486 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
5487 else if (platform_speed_cap == PCIE_SPEED_5_0GT)
5488 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5489 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
5490 else
5491 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5492
5493 }
5494 }
5495 if (adev->pm.pcie_mlw_mask == 0) {
5496 if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
5497 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5498 } else {
5499 switch (platform_link_width) {
5500 case PCIE_LNK_X32:
5501 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5502 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5503 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5504 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5505 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5506 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5507 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5508 break;
5509 case PCIE_LNK_X16:
5510 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5511 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5512 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5513 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5514 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5515 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5516 break;
5517 case PCIE_LNK_X12:
5518 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5519 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5520 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5521 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5522 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5523 break;
5524 case PCIE_LNK_X8:
5525 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5526 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5527 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5528 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5529 break;
5530 case PCIE_LNK_X4:
5531 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5532 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5533 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5534 break;
5535 case PCIE_LNK_X2:
5536 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5537 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
5538 break;
5539 case PCIE_LNK_X1:
5540 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5541 break;
5542 default:
5543 break;
5544 }
5545 }
5546 }
5547 }
5548
5549 /**
5550 * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
5551 *
5552 * @adev: amdgpu_device pointer
5553 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5554 *
5555 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5556 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5557 * @peer_adev.
5558 */
amdgpu_device_is_peer_accessible(struct amdgpu_device * adev,struct amdgpu_device * peer_adev)5559 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5560 struct amdgpu_device *peer_adev)
5561 {
5562 #ifdef CONFIG_HSA_AMD_P2P
5563 uint64_t address_mask = peer_adev->dev->dma_mask ?
5564 ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
5565 resource_size_t aper_limit =
5566 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5567 bool p2p_access =
5568 !adev->gmc.xgmi.connected_to_cpu &&
5569 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5570
5571 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5572 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5573 !(adev->gmc.aper_base & address_mask ||
5574 aper_limit & address_mask));
5575 #else
5576 return false;
5577 #endif
5578 }
5579
amdgpu_device_baco_enter(struct drm_device * dev)5580 int amdgpu_device_baco_enter(struct drm_device *dev)
5581 {
5582 struct amdgpu_device *adev = drm_to_adev(dev);
5583 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5584
5585 if (!amdgpu_device_supports_baco(dev))
5586 return -ENOTSUPP;
5587
5588 if (ras && adev->ras_enabled &&
5589 adev->nbio.funcs->enable_doorbell_interrupt)
5590 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5591
5592 return amdgpu_dpm_baco_enter(adev);
5593 }
5594
amdgpu_device_baco_exit(struct drm_device * dev)5595 int amdgpu_device_baco_exit(struct drm_device *dev)
5596 {
5597 struct amdgpu_device *adev = drm_to_adev(dev);
5598 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5599 int ret = 0;
5600
5601 if (!amdgpu_device_supports_baco(dev))
5602 return -ENOTSUPP;
5603
5604 ret = amdgpu_dpm_baco_exit(adev);
5605 if (ret)
5606 return ret;
5607
5608 if (ras && adev->ras_enabled &&
5609 adev->nbio.funcs->enable_doorbell_interrupt)
5610 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5611
5612 if (amdgpu_passthrough(adev) &&
5613 adev->nbio.funcs->clear_doorbell_interrupt)
5614 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5615
5616 return 0;
5617 }
5618
5619 /**
5620 * amdgpu_pci_error_detected - Called when a PCI error is detected.
5621 * @pdev: PCI device struct
5622 * @state: PCI channel state
5623 *
5624 * Description: Called when a PCI error is detected.
5625 *
5626 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
5627 */
amdgpu_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5628 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5629 {
5630 struct drm_device *dev = pci_get_drvdata(pdev);
5631 struct amdgpu_device *adev = drm_to_adev(dev);
5632 int i;
5633
5634 DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);
5635
5636 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5637 DRM_WARN("No support for XGMI hive yet...");
5638 return PCI_ERS_RESULT_DISCONNECT;
5639 }
5640
5641 adev->pci_channel_state = state;
5642
5643 switch (state) {
5644 case pci_channel_io_normal:
5645 return PCI_ERS_RESULT_CAN_RECOVER;
5646 /* Fatal error, prepare for slot reset */
5647 case pci_channel_io_frozen:
5648 /*
5649 * Locking adev->reset_domain->sem will prevent any external access
5650 * to GPU during PCI error recovery
5651 */
5652 amdgpu_device_lock_reset_domain(adev->reset_domain);
5653 amdgpu_device_set_mp1_state(adev);
5654
5655 /*
5656 * Block any work scheduling as we do for regular GPU reset
5657 * for the duration of the recovery
5658 */
5659 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5660 struct amdgpu_ring *ring = adev->rings[i];
5661
5662 if (!ring || !ring->sched.thread)
5663 continue;
5664
5665 drm_sched_stop(&ring->sched, NULL);
5666 }
5667 atomic_inc(&adev->gpu_reset_counter);
5668 return PCI_ERS_RESULT_NEED_RESET;
5669 case pci_channel_io_perm_failure:
5670 /* Permanent error, prepare for device removal */
5671 return PCI_ERS_RESULT_DISCONNECT;
5672 }
5673
5674 return PCI_ERS_RESULT_NEED_RESET;
5675 }
5676
5677 /**
5678 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
5679 * @pdev: pointer to PCI device
5680 */
amdgpu_pci_mmio_enabled(struct pci_dev * pdev)5681 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
5682 {
5683
5684 DRM_INFO("PCI error: mmio enabled callback!!\n");
5685
5686 /* TODO - dump whatever for debugging purposes */
5687
5688 /* This called only if amdgpu_pci_error_detected returns
5689 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
5690 * works, no need to reset slot.
5691 */
5692
5693 return PCI_ERS_RESULT_RECOVERED;
5694 }
5695
5696 /**
5697 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
5698 * @pdev: PCI device struct
5699 *
5700 * Description: This routine is called by the pci error recovery
5701 * code after the PCI slot has been reset, just before we
5702 * should resume normal operations.
5703 */
amdgpu_pci_slot_reset(struct pci_dev * pdev)5704 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
5705 {
5706 struct drm_device *dev = pci_get_drvdata(pdev);
5707 struct amdgpu_device *adev = drm_to_adev(dev);
5708 int r, i;
5709 struct amdgpu_reset_context reset_context;
5710 u32 memsize;
5711 struct list_head device_list;
5712
5713 DRM_INFO("PCI error: slot reset callback!!\n");
5714
5715 memset(&reset_context, 0, sizeof(reset_context));
5716
5717 INIT_LIST_HEAD(&device_list);
5718 list_add_tail(&adev->reset_list, &device_list);
5719
5720 /* wait for asic to come out of reset */
5721 msleep(500);
5722
5723 /* Restore PCI confspace */
5724 amdgpu_device_load_pci_state(pdev);
5725
5726 /* confirm ASIC came out of reset */
5727 for (i = 0; i < adev->usec_timeout; i++) {
5728 memsize = amdgpu_asic_get_config_memsize(adev);
5729
5730 if (memsize != 0xffffffff)
5731 break;
5732 udelay(1);
5733 }
5734 if (memsize == 0xffffffff) {
5735 r = -ETIME;
5736 goto out;
5737 }
5738
5739 reset_context.method = AMD_RESET_METHOD_NONE;
5740 reset_context.reset_req_dev = adev;
5741 set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
5742 set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
5743
5744 adev->no_hw_access = true;
5745 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5746 adev->no_hw_access = false;
5747 if (r)
5748 goto out;
5749
5750 r = amdgpu_do_asic_reset(&device_list, &reset_context);
5751
5752 out:
5753 if (!r) {
5754 if (amdgpu_device_cache_pci_state(adev->pdev))
5755 pci_restore_state(adev->pdev);
5756
5757 DRM_INFO("PCIe error recovery succeeded\n");
5758 } else {
5759 DRM_ERROR("PCIe error recovery failed, err:%d", r);
5760 amdgpu_device_unset_mp1_state(adev);
5761 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5762 }
5763
5764 return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
5765 }
5766
5767 /**
5768 * amdgpu_pci_resume() - resume normal ops after PCI reset
5769 * @pdev: pointer to PCI device
5770 *
5771 * Called when the error recovery driver tells us that its
5772 * OK to resume normal operation.
5773 */
amdgpu_pci_resume(struct pci_dev * pdev)5774 void amdgpu_pci_resume(struct pci_dev *pdev)
5775 {
5776 struct drm_device *dev = pci_get_drvdata(pdev);
5777 struct amdgpu_device *adev = drm_to_adev(dev);
5778 int i;
5779
5780
5781 DRM_INFO("PCI error: resume callback!!\n");
5782
5783 /* Only continue execution for the case of pci_channel_io_frozen */
5784 if (adev->pci_channel_state != pci_channel_io_frozen)
5785 return;
5786
5787 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
5788 struct amdgpu_ring *ring = adev->rings[i];
5789
5790 if (!ring || !ring->sched.thread)
5791 continue;
5792
5793 drm_sched_start(&ring->sched, true);
5794 }
5795
5796 amdgpu_device_unset_mp1_state(adev);
5797 amdgpu_device_unlock_reset_domain(adev->reset_domain);
5798 }
5799
amdgpu_device_cache_pci_state(struct pci_dev * pdev)5800 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
5801 {
5802 struct drm_device *dev = pci_get_drvdata(pdev);
5803 struct amdgpu_device *adev = drm_to_adev(dev);
5804 int r;
5805
5806 r = pci_save_state(pdev);
5807 if (!r) {
5808 kfree(adev->pci_state);
5809
5810 adev->pci_state = pci_store_saved_state(pdev);
5811
5812 if (!adev->pci_state) {
5813 DRM_ERROR("Failed to store PCI saved state");
5814 return false;
5815 }
5816 } else {
5817 DRM_WARN("Failed to save PCI state, err:%d\n", r);
5818 return false;
5819 }
5820
5821 return true;
5822 }
5823
amdgpu_device_load_pci_state(struct pci_dev * pdev)5824 bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
5825 {
5826 struct drm_device *dev = pci_get_drvdata(pdev);
5827 struct amdgpu_device *adev = drm_to_adev(dev);
5828 int r;
5829
5830 if (!adev->pci_state)
5831 return false;
5832
5833 r = pci_load_saved_state(pdev, adev->pci_state);
5834
5835 if (!r) {
5836 pci_restore_state(pdev);
5837 } else {
5838 DRM_WARN("Failed to load PCI state, err:%d\n", r);
5839 return false;
5840 }
5841
5842 return true;
5843 }
5844
amdgpu_device_flush_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)5845 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
5846 struct amdgpu_ring *ring)
5847 {
5848 #ifdef CONFIG_X86_64
5849 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5850 return;
5851 #endif
5852 if (adev->gmc.xgmi.connected_to_cpu)
5853 return;
5854
5855 if (ring && ring->funcs->emit_hdp_flush)
5856 amdgpu_ring_emit_hdp_flush(ring);
5857 else
5858 amdgpu_asic_flush_hdp(adev, ring);
5859 }
5860
amdgpu_device_invalidate_hdp(struct amdgpu_device * adev,struct amdgpu_ring * ring)5861 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
5862 struct amdgpu_ring *ring)
5863 {
5864 #ifdef CONFIG_X86_64
5865 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
5866 return;
5867 #endif
5868 if (adev->gmc.xgmi.connected_to_cpu)
5869 return;
5870
5871 amdgpu_asic_invalidate_hdp(adev, ring);
5872 }
5873
amdgpu_in_reset(struct amdgpu_device * adev)5874 int amdgpu_in_reset(struct amdgpu_device *adev)
5875 {
5876 return atomic_read(&adev->reset_domain->in_gpu_reset);
5877 }
5878
5879 /**
5880 * amdgpu_device_halt() - bring hardware to some kind of halt state
5881 *
5882 * @adev: amdgpu_device pointer
5883 *
5884 * Bring hardware to some kind of halt state so that no one can touch it
5885 * any more. It will help to maintain error context when error occurred.
5886 * Compare to a simple hang, the system will keep stable at least for SSH
5887 * access. Then it should be trivial to inspect the hardware state and
5888 * see what's going on. Implemented as following:
5889 *
5890 * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
5891 * clears all CPU mappings to device, disallows remappings through page faults
5892 * 2. amdgpu_irq_disable_all() disables all interrupts
5893 * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
5894 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
5895 * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
5896 * 6. pci_disable_device() and pci_wait_for_pending_transaction()
5897 * flush any in flight DMA operations
5898 */
amdgpu_device_halt(struct amdgpu_device * adev)5899 void amdgpu_device_halt(struct amdgpu_device *adev)
5900 {
5901 struct pci_dev *pdev = adev->pdev;
5902 struct drm_device *ddev = adev_to_drm(adev);
5903
5904 amdgpu_xcp_dev_unplug(adev);
5905 drm_dev_unplug(ddev);
5906
5907 amdgpu_irq_disable_all(adev);
5908
5909 amdgpu_fence_driver_hw_fini(adev);
5910
5911 adev->no_hw_access = true;
5912
5913 amdgpu_device_unmap_mmio(adev);
5914
5915 pci_disable_device(pdev);
5916 pci_wait_for_pending_transaction(pdev);
5917 }
5918
amdgpu_device_pcie_port_rreg(struct amdgpu_device * adev,u32 reg)5919 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
5920 u32 reg)
5921 {
5922 unsigned long flags, address, data;
5923 u32 r;
5924
5925 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5926 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5927
5928 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5929 WREG32(address, reg * 4);
5930 (void)RREG32(address);
5931 r = RREG32(data);
5932 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5933 return r;
5934 }
5935
amdgpu_device_pcie_port_wreg(struct amdgpu_device * adev,u32 reg,u32 v)5936 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
5937 u32 reg, u32 v)
5938 {
5939 unsigned long flags, address, data;
5940
5941 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
5942 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
5943
5944 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
5945 WREG32(address, reg * 4);
5946 (void)RREG32(address);
5947 WREG32(data, v);
5948 (void)RREG32(data);
5949 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
5950 }
5951
5952 /**
5953 * amdgpu_device_switch_gang - switch to a new gang
5954 * @adev: amdgpu_device pointer
5955 * @gang: the gang to switch to
5956 *
5957 * Try to switch to a new gang.
5958 * Returns: NULL if we switched to the new gang or a reference to the current
5959 * gang leader.
5960 */
amdgpu_device_switch_gang(struct amdgpu_device * adev,struct dma_fence * gang)5961 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
5962 struct dma_fence *gang)
5963 {
5964 struct dma_fence *old = NULL;
5965
5966 do {
5967 dma_fence_put(old);
5968 rcu_read_lock();
5969 old = dma_fence_get_rcu_safe(&adev->gang_submit);
5970 rcu_read_unlock();
5971
5972 if (old == gang)
5973 break;
5974
5975 if (!dma_fence_is_signaled(old))
5976 return old;
5977
5978 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
5979 old, gang) != old);
5980
5981 dma_fence_put(old);
5982 return NULL;
5983 }
5984
amdgpu_device_has_display_hardware(struct amdgpu_device * adev)5985 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
5986 {
5987 switch (adev->asic_type) {
5988 #ifdef CONFIG_DRM_AMDGPU_SI
5989 case CHIP_HAINAN:
5990 #endif
5991 case CHIP_TOPAZ:
5992 /* chips with no display hardware */
5993 return false;
5994 #ifdef CONFIG_DRM_AMDGPU_SI
5995 case CHIP_TAHITI:
5996 case CHIP_PITCAIRN:
5997 case CHIP_VERDE:
5998 case CHIP_OLAND:
5999 #endif
6000 #ifdef CONFIG_DRM_AMDGPU_CIK
6001 case CHIP_BONAIRE:
6002 case CHIP_HAWAII:
6003 case CHIP_KAVERI:
6004 case CHIP_KABINI:
6005 case CHIP_MULLINS:
6006 #endif
6007 case CHIP_TONGA:
6008 case CHIP_FIJI:
6009 case CHIP_POLARIS10:
6010 case CHIP_POLARIS11:
6011 case CHIP_POLARIS12:
6012 case CHIP_VEGAM:
6013 case CHIP_CARRIZO:
6014 case CHIP_STONEY:
6015 /* chips with display hardware */
6016 return true;
6017 default:
6018 /* IP discovery */
6019 if (!adev->ip_versions[DCE_HWIP][0] ||
6020 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6021 return false;
6022 return true;
6023 }
6024 }
6025
amdgpu_device_wait_on_rreg(struct amdgpu_device * adev,uint32_t inst,uint32_t reg_addr,char reg_name[],uint32_t expected_value,uint32_t mask)6026 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6027 uint32_t inst, uint32_t reg_addr, char reg_name[],
6028 uint32_t expected_value, uint32_t mask)
6029 {
6030 uint32_t ret = 0;
6031 uint32_t old_ = 0;
6032 uint32_t tmp_ = RREG32(reg_addr);
6033 uint32_t loop = adev->usec_timeout;
6034
6035 while ((tmp_ & (mask)) != (expected_value)) {
6036 if (old_ != tmp_) {
6037 loop = adev->usec_timeout;
6038 old_ = tmp_;
6039 } else
6040 udelay(1);
6041 tmp_ = RREG32(reg_addr);
6042 loop--;
6043 if (!loop) {
6044 DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
6045 inst, reg_name, (uint32_t)expected_value,
6046 (uint32_t)(tmp_ & (mask)));
6047 ret = -ETIMEDOUT;
6048 break;
6049 }
6050 }
6051 return ret;
6052 }
6053