1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright 2012 Red Hat Inc
5 */
6
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
10 #include <linux/module.h>
11
12 #include <asm/smp.h>
13
14 #include "gem/i915_gem_dmabuf.h"
15 #include "i915_drv.h"
16 #include "i915_gem_object.h"
17 #include "i915_scatterlist.h"
18
19 MODULE_IMPORT_NS(DMA_BUF);
20
I915_SELFTEST_DECLARE(static bool force_different_devices;)21 I915_SELFTEST_DECLARE(static bool force_different_devices;)
22
23 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
24 {
25 return to_intel_bo(buf->priv);
26 }
27
i915_gem_map_dma_buf(struct dma_buf_attachment * attach,enum dma_data_direction dir)28 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attach,
29 enum dma_data_direction dir)
30 {
31 struct drm_i915_gem_object *obj = dma_buf_to_obj(attach->dmabuf);
32 struct sg_table *sgt;
33 struct scatterlist *src, *dst;
34 int ret, i;
35
36 /*
37 * Make a copy of the object's sgt, so that we can make an independent
38 * mapping
39 */
40 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
41 if (!sgt) {
42 ret = -ENOMEM;
43 goto err;
44 }
45
46 ret = sg_alloc_table(sgt, obj->mm.pages->orig_nents, GFP_KERNEL);
47 if (ret)
48 goto err_free;
49
50 dst = sgt->sgl;
51 for_each_sg(obj->mm.pages->sgl, src, obj->mm.pages->orig_nents, i) {
52 sg_set_page(dst, sg_page(src), src->length, 0);
53 dst = sg_next(dst);
54 }
55
56 ret = dma_map_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
57 if (ret)
58 goto err_free_sg;
59
60 return sgt;
61
62 err_free_sg:
63 sg_free_table(sgt);
64 err_free:
65 kfree(sgt);
66 err:
67 return ERR_PTR(ret);
68 }
69
i915_gem_dmabuf_vmap(struct dma_buf * dma_buf,struct iosys_map * map)70 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf,
71 struct iosys_map *map)
72 {
73 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
74 void *vaddr;
75
76 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
77 if (IS_ERR(vaddr))
78 return PTR_ERR(vaddr);
79
80 iosys_map_set_vaddr(map, vaddr);
81
82 return 0;
83 }
84
i915_gem_dmabuf_vunmap(struct dma_buf * dma_buf,struct iosys_map * map)85 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf,
86 struct iosys_map *map)
87 {
88 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
89
90 i915_gem_object_flush_map(obj);
91 i915_gem_object_unpin_map(obj);
92 }
93
i915_gem_dmabuf_mmap(struct dma_buf * dma_buf,struct vm_area_struct * vma)94 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
95 {
96 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
97 struct drm_i915_private *i915 = to_i915(obj->base.dev);
98 int ret;
99
100 if (obj->base.size < vma->vm_end - vma->vm_start)
101 return -EINVAL;
102
103 if (HAS_LMEM(i915))
104 return drm_gem_prime_mmap(&obj->base, vma);
105
106 if (!obj->base.filp)
107 return -ENODEV;
108
109 ret = call_mmap(obj->base.filp, vma);
110 if (ret)
111 return ret;
112
113 vma_set_file(vma, obj->base.filp);
114
115 return 0;
116 }
117
i915_gem_begin_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)118 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
119 {
120 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
121 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
122 struct i915_gem_ww_ctx ww;
123 int err;
124
125 i915_gem_ww_ctx_init(&ww, true);
126 retry:
127 err = i915_gem_object_lock(obj, &ww);
128 if (!err)
129 err = i915_gem_object_pin_pages(obj);
130 if (!err) {
131 err = i915_gem_object_set_to_cpu_domain(obj, write);
132 i915_gem_object_unpin_pages(obj);
133 }
134 if (err == -EDEADLK) {
135 err = i915_gem_ww_ctx_backoff(&ww);
136 if (!err)
137 goto retry;
138 }
139 i915_gem_ww_ctx_fini(&ww);
140 return err;
141 }
142
i915_gem_end_cpu_access(struct dma_buf * dma_buf,enum dma_data_direction direction)143 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
144 {
145 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
146 struct i915_gem_ww_ctx ww;
147 int err;
148
149 i915_gem_ww_ctx_init(&ww, true);
150 retry:
151 err = i915_gem_object_lock(obj, &ww);
152 if (!err)
153 err = i915_gem_object_pin_pages(obj);
154 if (!err) {
155 err = i915_gem_object_set_to_gtt_domain(obj, false);
156 i915_gem_object_unpin_pages(obj);
157 }
158 if (err == -EDEADLK) {
159 err = i915_gem_ww_ctx_backoff(&ww);
160 if (!err)
161 goto retry;
162 }
163 i915_gem_ww_ctx_fini(&ww);
164 return err;
165 }
166
i915_gem_dmabuf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)167 static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
168 struct dma_buf_attachment *attach)
169 {
170 struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
171 struct i915_gem_ww_ctx ww;
172 int err;
173
174 if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
175 return -EOPNOTSUPP;
176
177 for_i915_gem_ww(&ww, err, true) {
178 err = i915_gem_object_lock(obj, &ww);
179 if (err)
180 continue;
181
182 err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
183 if (err)
184 continue;
185
186 err = i915_gem_object_wait_migration(obj, 0);
187 if (err)
188 continue;
189
190 err = i915_gem_object_pin_pages(obj);
191 }
192
193 return err;
194 }
195
i915_gem_dmabuf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)196 static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
197 struct dma_buf_attachment *attach)
198 {
199 struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
200
201 i915_gem_object_unpin_pages(obj);
202 }
203
204 static const struct dma_buf_ops i915_dmabuf_ops = {
205 .attach = i915_gem_dmabuf_attach,
206 .detach = i915_gem_dmabuf_detach,
207 .map_dma_buf = i915_gem_map_dma_buf,
208 .unmap_dma_buf = drm_gem_unmap_dma_buf,
209 .release = drm_gem_dmabuf_release,
210 .mmap = i915_gem_dmabuf_mmap,
211 .vmap = i915_gem_dmabuf_vmap,
212 .vunmap = i915_gem_dmabuf_vunmap,
213 .begin_cpu_access = i915_gem_begin_cpu_access,
214 .end_cpu_access = i915_gem_end_cpu_access,
215 };
216
i915_gem_prime_export(struct drm_gem_object * gem_obj,int flags)217 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
218 {
219 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
220 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
221
222 exp_info.ops = &i915_dmabuf_ops;
223 exp_info.size = gem_obj->size;
224 exp_info.flags = flags;
225 exp_info.priv = gem_obj;
226 exp_info.resv = obj->base.resv;
227
228 if (obj->ops->dmabuf_export) {
229 int ret = obj->ops->dmabuf_export(obj);
230 if (ret)
231 return ERR_PTR(ret);
232 }
233
234 return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
235 }
236
i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object * obj)237 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
238 {
239 struct drm_i915_private *i915 = to_i915(obj->base.dev);
240 struct sg_table *sgt;
241
242 assert_object_held(obj);
243
244 sgt = dma_buf_map_attachment(obj->base.import_attach,
245 DMA_BIDIRECTIONAL);
246 if (IS_ERR(sgt))
247 return PTR_ERR(sgt);
248
249 /*
250 * DG1 is special here since it still snoops transactions even with
251 * CACHE_NONE. This is not the case with other HAS_SNOOP platforms. We
252 * might need to revisit this as we add new discrete platforms.
253 *
254 * XXX: Consider doing a vmap flush or something, where possible.
255 * Currently we just do a heavy handed wbinvd_on_all_cpus() here since
256 * the underlying sg_table might not even point to struct pages, so we
257 * can't just call drm_clflush_sg or similar, like we do elsewhere in
258 * the driver.
259 */
260 if (i915_gem_object_can_bypass_llc(obj) ||
261 (!HAS_LLC(i915) && !IS_DG1(i915)))
262 wbinvd_on_all_cpus();
263
264 __i915_gem_object_set_pages(obj, sgt);
265
266 return 0;
267 }
268
i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object * obj,struct sg_table * sgt)269 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
270 struct sg_table *sgt)
271 {
272 dma_buf_unmap_attachment(obj->base.import_attach, sgt,
273 DMA_BIDIRECTIONAL);
274 }
275
276 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
277 .name = "i915_gem_object_dmabuf",
278 .get_pages = i915_gem_object_get_pages_dmabuf,
279 .put_pages = i915_gem_object_put_pages_dmabuf,
280 };
281
i915_gem_prime_import(struct drm_device * dev,struct dma_buf * dma_buf)282 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
283 struct dma_buf *dma_buf)
284 {
285 static struct lock_class_key lock_class;
286 struct dma_buf_attachment *attach;
287 struct drm_i915_gem_object *obj;
288 int ret;
289
290 /* is this one of own objects? */
291 if (dma_buf->ops == &i915_dmabuf_ops) {
292 obj = dma_buf_to_obj(dma_buf);
293 /* is it from our device? */
294 if (obj->base.dev == dev &&
295 !I915_SELFTEST_ONLY(force_different_devices)) {
296 /*
297 * Importing dmabuf exported from out own gem increases
298 * refcount on gem itself instead of f_count of dmabuf.
299 */
300 return &i915_gem_object_get(obj)->base;
301 }
302 }
303
304 if (i915_gem_object_size_2big(dma_buf->size))
305 return ERR_PTR(-E2BIG);
306
307 /* need to attach */
308 attach = dma_buf_attach(dma_buf, dev->dev);
309 if (IS_ERR(attach))
310 return ERR_CAST(attach);
311
312 get_dma_buf(dma_buf);
313
314 obj = i915_gem_object_alloc();
315 if (!obj) {
316 ret = -ENOMEM;
317 goto fail_detach;
318 }
319
320 drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
321 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class,
322 I915_BO_ALLOC_USER);
323 obj->base.import_attach = attach;
324 obj->base.resv = dma_buf->resv;
325
326 /* We use GTT as shorthand for a coherent domain, one that is
327 * neither in the GPU cache nor in the CPU cache, where all
328 * writes are immediately visible in memory. (That's not strictly
329 * true, but it's close! There are internal buffers such as the
330 * write-combined buffer or a delay through the chipset for GTT
331 * writes that do require us to treat GTT as a separate cache domain.)
332 */
333 obj->read_domains = I915_GEM_DOMAIN_GTT;
334 obj->write_domain = 0;
335
336 return &obj->base;
337
338 fail_detach:
339 dma_buf_detach(dma_buf, attach);
340 dma_buf_put(dma_buf);
341
342 return ERR_PTR(ret);
343 }
344
345 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
346 #include "selftests/mock_dmabuf.c"
347 #include "selftests/i915_gem_dmabuf.c"
348 #endif
349