1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include "gem/i915_gem_internal.h"
7 #include "gem/i915_gem_lmem.h"
8 #include "gem/i915_gem_object.h"
9
10 #include "i915_drv.h"
11 #include "i915_vma.h"
12 #include "intel_engine.h"
13 #include "intel_engine_regs.h"
14 #include "intel_gpu_commands.h"
15 #include "intel_ring.h"
16 #include "intel_timeline.h"
17
intel_ring_update_space(struct intel_ring * ring)18 unsigned int intel_ring_update_space(struct intel_ring *ring)
19 {
20 unsigned int space;
21
22 space = __intel_ring_space(ring->head, ring->emit, ring->size);
23
24 ring->space = space;
25 return space;
26 }
27
__intel_ring_pin(struct intel_ring * ring)28 void __intel_ring_pin(struct intel_ring *ring)
29 {
30 GEM_BUG_ON(!atomic_read(&ring->pin_count));
31 atomic_inc(&ring->pin_count);
32 }
33
intel_ring_pin(struct intel_ring * ring,struct i915_gem_ww_ctx * ww)34 int intel_ring_pin(struct intel_ring *ring, struct i915_gem_ww_ctx *ww)
35 {
36 struct i915_vma *vma = ring->vma;
37 unsigned int flags;
38 void *addr;
39 int ret;
40
41 if (atomic_fetch_inc(&ring->pin_count))
42 return 0;
43
44 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
45 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
46
47 if (i915_gem_object_is_stolen(vma->obj))
48 flags |= PIN_MAPPABLE;
49 else
50 flags |= PIN_HIGH;
51
52 ret = i915_ggtt_pin(vma, ww, 0, flags);
53 if (unlikely(ret))
54 goto err_unpin;
55
56 if (i915_vma_is_map_and_fenceable(vma)) {
57 addr = (void __force *)i915_vma_pin_iomap(vma);
58 } else {
59 int type = i915_coherent_map_type(vma->vm->i915, vma->obj, false);
60
61 addr = i915_gem_object_pin_map(vma->obj, type);
62 }
63
64 if (IS_ERR(addr)) {
65 ret = PTR_ERR(addr);
66 goto err_ring;
67 }
68
69 i915_vma_make_unshrinkable(vma);
70
71 /* Discard any unused bytes beyond that submitted to hw. */
72 intel_ring_reset(ring, ring->emit);
73
74 ring->vaddr = addr;
75 return 0;
76
77 err_ring:
78 i915_vma_unpin(vma);
79 err_unpin:
80 atomic_dec(&ring->pin_count);
81 return ret;
82 }
83
intel_ring_reset(struct intel_ring * ring,u32 tail)84 void intel_ring_reset(struct intel_ring *ring, u32 tail)
85 {
86 tail = intel_ring_wrap(ring, tail);
87 ring->tail = tail;
88 ring->head = tail;
89 ring->emit = tail;
90 intel_ring_update_space(ring);
91 }
92
intel_ring_unpin(struct intel_ring * ring)93 void intel_ring_unpin(struct intel_ring *ring)
94 {
95 struct i915_vma *vma = ring->vma;
96
97 if (!atomic_dec_and_test(&ring->pin_count))
98 return;
99
100 i915_vma_unset_ggtt_write(vma);
101 if (i915_vma_is_map_and_fenceable(vma))
102 i915_vma_unpin_iomap(vma);
103 else
104 i915_gem_object_unpin_map(vma->obj);
105
106 i915_vma_make_purgeable(vma);
107 i915_vma_unpin(vma);
108 }
109
create_ring_vma(struct i915_ggtt * ggtt,int size)110 static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
111 {
112 struct i915_address_space *vm = &ggtt->vm;
113 struct drm_i915_private *i915 = vm->i915;
114 struct drm_i915_gem_object *obj;
115 struct i915_vma *vma;
116
117 obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
118 I915_BO_ALLOC_PM_VOLATILE);
119 if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
120 obj = i915_gem_object_create_stolen(i915, size);
121 if (IS_ERR(obj))
122 obj = i915_gem_object_create_internal(i915, size);
123 if (IS_ERR(obj))
124 return ERR_CAST(obj);
125
126 /*
127 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
128 * if supported by the platform's GGTT.
129 */
130 if (vm->has_read_only)
131 i915_gem_object_set_readonly(obj);
132
133 vma = i915_vma_instance(obj, vm, NULL);
134 if (IS_ERR(vma))
135 goto err;
136
137 return vma;
138
139 err:
140 i915_gem_object_put(obj);
141 return vma;
142 }
143
144 struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs * engine,int size)145 intel_engine_create_ring(struct intel_engine_cs *engine, int size)
146 {
147 struct drm_i915_private *i915 = engine->i915;
148 struct intel_ring *ring;
149 struct i915_vma *vma;
150
151 GEM_BUG_ON(!is_power_of_2(size));
152 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
153
154 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
155 if (!ring)
156 return ERR_PTR(-ENOMEM);
157
158 kref_init(&ring->ref);
159 ring->size = size;
160 ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
161
162 /*
163 * Workaround an erratum on the i830 which causes a hang if
164 * the TAIL pointer points to within the last 2 cachelines
165 * of the buffer.
166 */
167 ring->effective_size = size;
168 if (IS_I830(i915) || IS_I845G(i915))
169 ring->effective_size -= 2 * CACHELINE_BYTES;
170
171 intel_ring_update_space(ring);
172
173 vma = create_ring_vma(engine->gt->ggtt, size);
174 if (IS_ERR(vma)) {
175 kfree(ring);
176 return ERR_CAST(vma);
177 }
178 ring->vma = vma;
179
180 return ring;
181 }
182
intel_ring_free(struct kref * ref)183 void intel_ring_free(struct kref *ref)
184 {
185 struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
186
187 i915_vma_put(ring->vma);
188 kfree(ring);
189 }
190
191 static noinline int
wait_for_space(struct intel_ring * ring,struct intel_timeline * tl,unsigned int bytes)192 wait_for_space(struct intel_ring *ring,
193 struct intel_timeline *tl,
194 unsigned int bytes)
195 {
196 struct i915_request *target;
197 long timeout;
198
199 if (intel_ring_update_space(ring) >= bytes)
200 return 0;
201
202 GEM_BUG_ON(list_empty(&tl->requests));
203 list_for_each_entry(target, &tl->requests, link) {
204 if (target->ring != ring)
205 continue;
206
207 /* Would completion of this request free enough space? */
208 if (bytes <= __intel_ring_space(target->postfix,
209 ring->emit, ring->size))
210 break;
211 }
212
213 if (GEM_WARN_ON(&target->link == &tl->requests))
214 return -ENOSPC;
215
216 timeout = i915_request_wait(target,
217 I915_WAIT_INTERRUPTIBLE,
218 MAX_SCHEDULE_TIMEOUT);
219 if (timeout < 0)
220 return timeout;
221
222 i915_request_retire_upto(target);
223
224 intel_ring_update_space(ring);
225 GEM_BUG_ON(ring->space < bytes);
226 return 0;
227 }
228
intel_ring_begin(struct i915_request * rq,unsigned int num_dwords)229 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
230 {
231 struct intel_ring *ring = rq->ring;
232 const unsigned int remain_usable = ring->effective_size - ring->emit;
233 const unsigned int bytes = num_dwords * sizeof(u32);
234 unsigned int need_wrap = 0;
235 unsigned int total_bytes;
236 u32 *cs;
237
238 /* Packets must be qword aligned. */
239 GEM_BUG_ON(num_dwords & 1);
240
241 total_bytes = bytes + rq->reserved_space;
242 GEM_BUG_ON(total_bytes > ring->effective_size);
243
244 if (unlikely(total_bytes > remain_usable)) {
245 const int remain_actual = ring->size - ring->emit;
246
247 if (bytes > remain_usable) {
248 /*
249 * Not enough space for the basic request. So need to
250 * flush out the remainder and then wait for
251 * base + reserved.
252 */
253 total_bytes += remain_actual;
254 need_wrap = remain_actual | 1;
255 } else {
256 /*
257 * The base request will fit but the reserved space
258 * falls off the end. So we don't need an immediate
259 * wrap and only need to effectively wait for the
260 * reserved size from the start of ringbuffer.
261 */
262 total_bytes = rq->reserved_space + remain_actual;
263 }
264 }
265
266 if (unlikely(total_bytes > ring->space)) {
267 int ret;
268
269 /*
270 * Space is reserved in the ringbuffer for finalising the
271 * request, as that cannot be allowed to fail. During request
272 * finalisation, reserved_space is set to 0 to stop the
273 * overallocation and the assumption is that then we never need
274 * to wait (which has the risk of failing with EINTR).
275 *
276 * See also i915_request_alloc() and i915_request_add().
277 */
278 GEM_BUG_ON(!rq->reserved_space);
279
280 ret = wait_for_space(ring,
281 i915_request_timeline(rq),
282 total_bytes);
283 if (unlikely(ret))
284 return ERR_PTR(ret);
285 }
286
287 if (unlikely(need_wrap)) {
288 need_wrap &= ~1;
289 GEM_BUG_ON(need_wrap > ring->space);
290 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
291 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
292
293 /* Fill the tail with MI_NOOP */
294 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
295 ring->space -= need_wrap;
296 ring->emit = 0;
297 }
298
299 GEM_BUG_ON(ring->emit > ring->size - bytes);
300 GEM_BUG_ON(ring->space < bytes);
301 cs = ring->vaddr + ring->emit;
302 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
303 memset32(cs, POISON_INUSE, bytes / sizeof(*cs));
304 ring->emit += bytes;
305 ring->space -= bytes;
306
307 return cs;
308 }
309
310 /* Align the ring tail to a cacheline boundary */
intel_ring_cacheline_align(struct i915_request * rq)311 int intel_ring_cacheline_align(struct i915_request *rq)
312 {
313 int num_dwords;
314 void *cs;
315
316 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
317 if (num_dwords == 0)
318 return 0;
319
320 num_dwords = CACHELINE_DWORDS - num_dwords;
321 GEM_BUG_ON(num_dwords & 1);
322
323 cs = intel_ring_begin(rq, num_dwords);
324 if (IS_ERR(cs))
325 return PTR_ERR(cs);
326
327 memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
328 intel_ring_advance(rq, cs + num_dwords);
329
330 GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
331 return 0;
332 }
333
334 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
335 #include "selftest_ring.c"
336 #endif
337