1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24 #ifndef __AMDGPU_RING_H__
25 #define __AMDGPU_RING_H__
26
27 #include <drm/amdgpu_drm.h>
28 #include <drm/gpu_scheduler.h>
29 #include <drm/drm_print.h>
30
31 struct amdgpu_device;
32 struct amdgpu_ring;
33 struct amdgpu_ib;
34 struct amdgpu_cs_parser;
35 struct amdgpu_job;
36 struct amdgpu_vm;
37
38 /* max number of rings */
39 #define AMDGPU_MAX_RINGS 28
40 #define AMDGPU_MAX_HWIP_RINGS 8
41 #define AMDGPU_MAX_GFX_RINGS 2
42 #define AMDGPU_MAX_COMPUTE_RINGS 8
43 #define AMDGPU_MAX_VCE_RINGS 3
44 #define AMDGPU_MAX_UVD_ENC_RINGS 2
45
46 enum amdgpu_ring_priority_level {
47 AMDGPU_RING_PRIO_0,
48 AMDGPU_RING_PRIO_1,
49 AMDGPU_RING_PRIO_DEFAULT = 1,
50 AMDGPU_RING_PRIO_2,
51 AMDGPU_RING_PRIO_MAX
52 };
53
54 /* some special values for the owner field */
55 #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
56 #define AMDGPU_FENCE_OWNER_VM ((void *)1ul)
57 #define AMDGPU_FENCE_OWNER_KFD ((void *)2ul)
58
59 #define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
60 #define AMDGPU_FENCE_FLAG_INT (1 << 1)
61 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
62
63 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
64
65 #define AMDGPU_IB_POOL_SIZE (1024 * 1024)
66
67 enum amdgpu_ring_type {
68 AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX,
69 AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE,
70 AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA,
71 AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD,
72 AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE,
73 AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC,
74 AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC,
75 AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC,
76 AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG,
77 AMDGPU_RING_TYPE_KIQ,
78 AMDGPU_RING_TYPE_MES
79 };
80
81 enum amdgpu_ib_pool_type {
82 /* Normal submissions to the top of the pipeline. */
83 AMDGPU_IB_POOL_DELAYED,
84 /* Immediate submissions to the bottom of the pipeline. */
85 AMDGPU_IB_POOL_IMMEDIATE,
86 /* Direct submission to the ring buffer during init and reset. */
87 AMDGPU_IB_POOL_DIRECT,
88
89 AMDGPU_IB_POOL_MAX
90 };
91
92 struct amdgpu_ib {
93 struct amdgpu_sa_bo *sa_bo;
94 uint32_t length_dw;
95 uint64_t gpu_addr;
96 uint32_t *ptr;
97 uint32_t flags;
98 };
99
100 struct amdgpu_sched {
101 u32 num_scheds;
102 struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS];
103 };
104
105 /*
106 * Fences.
107 */
108 struct amdgpu_fence_driver {
109 uint64_t gpu_addr;
110 volatile uint32_t *cpu_addr;
111 /* sync_seq is protected by ring emission lock */
112 uint32_t sync_seq;
113 atomic_t last_seq;
114 bool initialized;
115 struct amdgpu_irq_src *irq_src;
116 unsigned irq_type;
117 struct timer_list fallback_timer;
118 unsigned num_fences_mask;
119 spinlock_t lock;
120 struct dma_fence **fences;
121 };
122
123 extern const struct drm_sched_backend_ops amdgpu_sched_ops;
124
125 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
126 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
127
128 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
129 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
130 struct amdgpu_irq_src *irq_src,
131 unsigned irq_type);
132 void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
133 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
134 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
135 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
136 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
137 unsigned flags);
138 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
139 uint32_t timeout);
140 bool amdgpu_fence_process(struct amdgpu_ring *ring);
141 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
142 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
143 uint32_t wait_seq,
144 signed long timeout);
145 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
146
147 /*
148 * Rings.
149 */
150
151 /* provided by hw blocks that expose a ring buffer for commands */
152 struct amdgpu_ring_funcs {
153 enum amdgpu_ring_type type;
154 uint32_t align_mask;
155 u32 nop;
156 bool support_64bit_ptrs;
157 bool no_user_fence;
158 bool secure_submission_supported;
159 unsigned vmhub;
160 unsigned extra_dw;
161
162 /* ring read/write ptr handling */
163 u64 (*get_rptr)(struct amdgpu_ring *ring);
164 u64 (*get_wptr)(struct amdgpu_ring *ring);
165 void (*set_wptr)(struct amdgpu_ring *ring);
166 /* validating and patching of IBs */
167 int (*parse_cs)(struct amdgpu_cs_parser *p,
168 struct amdgpu_job *job,
169 struct amdgpu_ib *ib);
170 int (*patch_cs_in_place)(struct amdgpu_cs_parser *p,
171 struct amdgpu_job *job,
172 struct amdgpu_ib *ib);
173 /* constants to calculate how many DW are needed for an emit */
174 unsigned emit_frame_size;
175 unsigned emit_ib_size;
176 /* command emit functions */
177 void (*emit_ib)(struct amdgpu_ring *ring,
178 struct amdgpu_job *job,
179 struct amdgpu_ib *ib,
180 uint32_t flags);
181 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
182 uint64_t seq, unsigned flags);
183 void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
184 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
185 uint64_t pd_addr);
186 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
187 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
188 uint32_t gds_base, uint32_t gds_size,
189 uint32_t gws_base, uint32_t gws_size,
190 uint32_t oa_base, uint32_t oa_size);
191 /* testing functions */
192 int (*test_ring)(struct amdgpu_ring *ring);
193 int (*test_ib)(struct amdgpu_ring *ring, long timeout);
194 /* insert NOP packets */
195 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
196 void (*insert_start)(struct amdgpu_ring *ring);
197 void (*insert_end)(struct amdgpu_ring *ring);
198 /* pad the indirect buffer to the necessary number of dw */
199 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
200 unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
201 void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
202 /* note usage for clock and power gating */
203 void (*begin_use)(struct amdgpu_ring *ring);
204 void (*end_use)(struct amdgpu_ring *ring);
205 void (*emit_switch_buffer) (struct amdgpu_ring *ring);
206 void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
207 void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
208 uint32_t reg_val_offs);
209 void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
210 void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
211 uint32_t val, uint32_t mask);
212 void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
213 uint32_t reg0, uint32_t reg1,
214 uint32_t ref, uint32_t mask);
215 void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
216 bool secure);
217 /* Try to soft recover the ring to make the fence signal */
218 void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
219 int (*preempt_ib)(struct amdgpu_ring *ring);
220 void (*emit_mem_sync)(struct amdgpu_ring *ring);
221 void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
222 };
223
224 struct amdgpu_ring {
225 struct amdgpu_device *adev;
226 const struct amdgpu_ring_funcs *funcs;
227 struct amdgpu_fence_driver fence_drv;
228 struct drm_gpu_scheduler sched;
229
230 struct amdgpu_bo *ring_obj;
231 volatile uint32_t *ring;
232 unsigned rptr_offs;
233 u64 rptr_gpu_addr;
234 volatile u32 *rptr_cpu_addr;
235 u64 wptr;
236 u64 wptr_old;
237 unsigned ring_size;
238 unsigned max_dw;
239 int count_dw;
240 uint64_t gpu_addr;
241 uint64_t ptr_mask;
242 uint32_t buf_mask;
243 u32 idx;
244 u32 me;
245 u32 pipe;
246 u32 queue;
247 struct amdgpu_bo *mqd_obj;
248 uint64_t mqd_gpu_addr;
249 void *mqd_ptr;
250 uint64_t eop_gpu_addr;
251 u32 doorbell_index;
252 bool use_doorbell;
253 bool use_pollmem;
254 unsigned wptr_offs;
255 u64 wptr_gpu_addr;
256 volatile u32 *wptr_cpu_addr;
257 unsigned fence_offs;
258 u64 fence_gpu_addr;
259 volatile u32 *fence_cpu_addr;
260 uint64_t current_ctx;
261 char name[16];
262 u32 trail_seq;
263 unsigned trail_fence_offs;
264 u64 trail_fence_gpu_addr;
265 volatile u32 *trail_fence_cpu_addr;
266 unsigned cond_exe_offs;
267 u64 cond_exe_gpu_addr;
268 volatile u32 *cond_exe_cpu_addr;
269 unsigned vm_inv_eng;
270 struct dma_fence *vmid_wait;
271 bool has_compute_vm_bug;
272 bool no_scheduler;
273 int hw_prio;
274 unsigned num_hw_submission;
275 atomic_t *sched_score;
276
277 /* used for mes */
278 bool is_mes_queue;
279 uint32_t hw_queue_id;
280 struct amdgpu_mes_ctx_data *mes_ctx;
281 };
282
283 #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
284 #define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
285 #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
286 #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
287 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
288 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
289 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
290 #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
291 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
292 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
293 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
294 #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
295 #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
296 #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
297 #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
298 #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
299 #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
300 #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
301 #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
302 #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
303 #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
304 #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
305 #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
306 #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
307
308 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
309 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
310 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
311 void amdgpu_ring_commit(struct amdgpu_ring *ring);
312 void amdgpu_ring_undo(struct amdgpu_ring *ring);
313 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
314 unsigned int max_dw, struct amdgpu_irq_src *irq_src,
315 unsigned int irq_type, unsigned int hw_prio,
316 atomic_t *sched_score);
317 void amdgpu_ring_fini(struct amdgpu_ring *ring);
318 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
319 uint32_t reg0, uint32_t val0,
320 uint32_t reg1, uint32_t val1);
321 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
322 struct dma_fence *fence);
323
amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring * ring,bool cond_exec)324 static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
325 bool cond_exec)
326 {
327 *ring->cond_exe_cpu_addr = cond_exec;
328 }
329
amdgpu_ring_clear_ring(struct amdgpu_ring * ring)330 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
331 {
332 int i = 0;
333 while (i <= ring->buf_mask)
334 ring->ring[i++] = ring->funcs->nop;
335
336 }
337
amdgpu_ring_write(struct amdgpu_ring * ring,uint32_t v)338 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
339 {
340 if (ring->count_dw <= 0)
341 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
342 ring->ring[ring->wptr++ & ring->buf_mask] = v;
343 ring->wptr &= ring->ptr_mask;
344 ring->count_dw--;
345 }
346
amdgpu_ring_write_multiple(struct amdgpu_ring * ring,void * src,int count_dw)347 static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
348 void *src, int count_dw)
349 {
350 unsigned occupied, chunk1, chunk2;
351 void *dst;
352
353 if (unlikely(ring->count_dw < count_dw))
354 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
355
356 occupied = ring->wptr & ring->buf_mask;
357 dst = (void *)&ring->ring[occupied];
358 chunk1 = ring->buf_mask + 1 - occupied;
359 chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
360 chunk2 = count_dw - chunk1;
361 chunk1 <<= 2;
362 chunk2 <<= 2;
363
364 if (chunk1)
365 memcpy(dst, src, chunk1);
366
367 if (chunk2) {
368 src += chunk1;
369 dst = (void *)ring->ring;
370 memcpy(dst, src, chunk2);
371 }
372
373 ring->wptr += count_dw;
374 ring->wptr &= ring->ptr_mask;
375 ring->count_dw -= count_dw;
376 }
377
378 #define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \
379 (ring->is_mes_queue && ring->mes_ctx ? \
380 (ring->mes_ctx->meta_data_gpu_addr + offset) : 0)
381
382 #define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \
383 (ring->is_mes_queue && ring->mes_ctx ? \
384 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
385 NULL)
386
387 int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
388
389 void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
390 struct amdgpu_ring *ring);
391
392 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring);
393
amdgpu_ib_get_value(struct amdgpu_ib * ib,int idx)394 static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx)
395 {
396 return ib->ptr[idx];
397 }
398
amdgpu_ib_set_value(struct amdgpu_ib * ib,int idx,uint32_t value)399 static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx,
400 uint32_t value)
401 {
402 ib->ptr[idx] = value;
403 }
404
405 int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
406 unsigned size,
407 enum amdgpu_ib_pool_type pool,
408 struct amdgpu_ib *ib);
409 void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
410 struct dma_fence *f);
411 int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
412 struct amdgpu_ib *ibs, struct amdgpu_job *job,
413 struct dma_fence **f);
414 int amdgpu_ib_pool_init(struct amdgpu_device *adev);
415 void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
416 int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
417
418 #endif
419