1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2012-2014 Intel Corporation
5 *
6 * Based on amdgpu_mn, which bears the following notice:
7 *
8 * Copyright 2014 Advanced Micro Devices, Inc.
9 * All Rights Reserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * The above copyright notice and this permission notice (including the
28 * next paragraph) shall be included in all copies or substantial portions
29 * of the Software.
30 *
31 */
32 /*
33 * Authors:
34 * Christian König <christian.koenig@amd.com>
35 */
36
37 #include <linux/mmu_context.h>
38 #include <linux/mempolicy.h>
39 #include <linux/swap.h>
40 #include <linux/sched/mm.h>
41
42 #include "i915_drv.h"
43 #include "i915_gem_ioctls.h"
44 #include "i915_gem_object.h"
45 #include "i915_gem_userptr.h"
46 #include "i915_scatterlist.h"
47
48 #ifdef CONFIG_MMU_NOTIFIER
49
50 /**
51 * i915_gem_userptr_invalidate - callback to notify about mm change
52 *
53 * @mni: the range (mm) is about to update
54 * @range: details on the invalidation
55 * @cur_seq: Value to pass to mmu_interval_set_seq()
56 *
57 * Block for operations on BOs to finish and mark pages as accessed and
58 * potentially dirty.
59 */
i915_gem_userptr_invalidate(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)60 static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
61 const struct mmu_notifier_range *range,
62 unsigned long cur_seq)
63 {
64 struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
65 struct drm_i915_private *i915 = to_i915(obj->base.dev);
66 long r;
67
68 if (!mmu_notifier_range_blockable(range))
69 return false;
70
71 write_lock(&i915->mm.notifier_lock);
72
73 mmu_interval_set_seq(mni, cur_seq);
74
75 write_unlock(&i915->mm.notifier_lock);
76
77 /*
78 * We don't wait when the process is exiting. This is valid
79 * because the object will be cleaned up anyway.
80 *
81 * This is also temporarily required as a hack, because we
82 * cannot currently force non-consistent batch buffers to preempt
83 * and reschedule by waiting on it, hanging processes on exit.
84 */
85 if (current->flags & PF_EXITING)
86 return true;
87
88 /* we will unbind on next submission, still have userptr pins */
89 r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
90 MAX_SCHEDULE_TIMEOUT);
91 if (r <= 0)
92 drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
93
94 return true;
95 }
96
97 static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
98 .invalidate = i915_gem_userptr_invalidate,
99 };
100
101 static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object * obj)102 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
103 {
104 return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
105 obj->userptr.ptr, obj->base.size,
106 &i915_gem_userptr_notifier_ops);
107 }
108
i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object * obj)109 static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
110 {
111 struct page **pvec = NULL;
112
113 assert_object_held_shared(obj);
114
115 if (!--obj->userptr.page_ref) {
116 pvec = obj->userptr.pvec;
117 obj->userptr.pvec = NULL;
118 }
119 GEM_BUG_ON(obj->userptr.page_ref < 0);
120
121 if (pvec) {
122 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
123
124 unpin_user_pages(pvec, num_pages);
125 kvfree(pvec);
126 }
127 }
128
i915_gem_userptr_get_pages(struct drm_i915_gem_object * obj)129 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
130 {
131 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
132 unsigned int max_segment = i915_sg_segment_size();
133 struct sg_table *st;
134 unsigned int sg_page_sizes;
135 struct page **pvec;
136 int ret;
137
138 st = kmalloc(sizeof(*st), GFP_KERNEL);
139 if (!st)
140 return -ENOMEM;
141
142 if (!obj->userptr.page_ref) {
143 ret = -EAGAIN;
144 goto err_free;
145 }
146
147 obj->userptr.page_ref++;
148 pvec = obj->userptr.pvec;
149
150 alloc_table:
151 ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
152 num_pages << PAGE_SHIFT,
153 max_segment, GFP_KERNEL);
154 if (ret)
155 goto err;
156
157 ret = i915_gem_gtt_prepare_pages(obj, st);
158 if (ret) {
159 sg_free_table(st);
160
161 if (max_segment > PAGE_SIZE) {
162 max_segment = PAGE_SIZE;
163 goto alloc_table;
164 }
165
166 goto err;
167 }
168
169 WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
170 if (i915_gem_object_can_bypass_llc(obj))
171 obj->cache_dirty = true;
172
173 sg_page_sizes = i915_sg_dma_sizes(st->sgl);
174 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
175
176 return 0;
177
178 err:
179 i915_gem_object_userptr_drop_ref(obj);
180 err_free:
181 kfree(st);
182 return ret;
183 }
184
185 static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)186 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
187 struct sg_table *pages)
188 {
189 struct sgt_iter sgt_iter;
190 struct page *page;
191
192 if (!pages)
193 return;
194
195 __i915_gem_object_release_shmem(obj, pages, true);
196 i915_gem_gtt_finish_pages(obj, pages);
197
198 /*
199 * We always mark objects as dirty when they are used by the GPU,
200 * just in case. However, if we set the vma as being read-only we know
201 * that the object will never have been written to.
202 */
203 if (i915_gem_object_is_readonly(obj))
204 obj->mm.dirty = false;
205
206 for_each_sgt_page(page, sgt_iter, pages) {
207 if (obj->mm.dirty && trylock_page(page)) {
208 /*
209 * As this may not be anonymous memory (e.g. shmem)
210 * but exist on a real mapping, we have to lock
211 * the page in order to dirty it -- holding
212 * the page reference is not sufficient to
213 * prevent the inode from being truncated.
214 * Play safe and take the lock.
215 *
216 * However...!
217 *
218 * The mmu-notifier can be invalidated for a
219 * migrate_page, that is alreadying holding the lock
220 * on the page. Such a try_to_unmap() will result
221 * in us calling put_pages() and so recursively try
222 * to lock the page. We avoid that deadlock with
223 * a trylock_page() and in exchange we risk missing
224 * some page dirtying.
225 */
226 set_page_dirty(page);
227 unlock_page(page);
228 }
229
230 mark_page_accessed(page);
231 }
232 obj->mm.dirty = false;
233
234 sg_free_table(pages);
235 kfree(pages);
236
237 i915_gem_object_userptr_drop_ref(obj);
238 }
239
i915_gem_object_userptr_unbind(struct drm_i915_gem_object * obj)240 static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
241 {
242 struct sg_table *pages;
243 int err;
244
245 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
246 if (err)
247 return err;
248
249 if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
250 return -EBUSY;
251
252 assert_object_held(obj);
253
254 pages = __i915_gem_object_unset_pages(obj);
255 if (!IS_ERR_OR_NULL(pages))
256 i915_gem_userptr_put_pages(obj, pages);
257
258 return err;
259 }
260
i915_gem_object_userptr_submit_init(struct drm_i915_gem_object * obj)261 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
262 {
263 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
264 struct page **pvec;
265 unsigned int gup_flags = 0;
266 unsigned long notifier_seq;
267 int pinned, ret;
268
269 if (obj->userptr.notifier.mm != current->mm)
270 return -EFAULT;
271
272 notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
273
274 ret = i915_gem_object_lock_interruptible(obj, NULL);
275 if (ret)
276 return ret;
277
278 if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
279 i915_gem_object_unlock(obj);
280 return 0;
281 }
282
283 ret = i915_gem_object_userptr_unbind(obj);
284 i915_gem_object_unlock(obj);
285 if (ret)
286 return ret;
287
288 pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
289 if (!pvec)
290 return -ENOMEM;
291
292 if (!i915_gem_object_is_readonly(obj))
293 gup_flags |= FOLL_WRITE;
294
295 pinned = ret = 0;
296 while (pinned < num_pages) {
297 ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
298 num_pages - pinned, gup_flags,
299 &pvec[pinned]);
300 if (ret < 0)
301 goto out;
302
303 pinned += ret;
304 }
305 ret = 0;
306
307 ret = i915_gem_object_lock_interruptible(obj, NULL);
308 if (ret)
309 goto out;
310
311 if (mmu_interval_read_retry(&obj->userptr.notifier,
312 !obj->userptr.page_ref ? notifier_seq :
313 obj->userptr.notifier_seq)) {
314 ret = -EAGAIN;
315 goto out_unlock;
316 }
317
318 if (!obj->userptr.page_ref++) {
319 obj->userptr.pvec = pvec;
320 obj->userptr.notifier_seq = notifier_seq;
321 pvec = NULL;
322 ret = ____i915_gem_object_get_pages(obj);
323 }
324
325 obj->userptr.page_ref--;
326
327 out_unlock:
328 i915_gem_object_unlock(obj);
329
330 out:
331 if (pvec) {
332 unpin_user_pages(pvec, pinned);
333 kvfree(pvec);
334 }
335
336 return ret;
337 }
338
i915_gem_object_userptr_submit_done(struct drm_i915_gem_object * obj)339 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
340 {
341 if (mmu_interval_read_retry(&obj->userptr.notifier,
342 obj->userptr.notifier_seq)) {
343 /* We collided with the mmu notifier, need to retry */
344
345 return -EAGAIN;
346 }
347
348 return 0;
349 }
350
i915_gem_object_userptr_validate(struct drm_i915_gem_object * obj)351 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
352 {
353 int err;
354
355 err = i915_gem_object_userptr_submit_init(obj);
356 if (err)
357 return err;
358
359 err = i915_gem_object_lock_interruptible(obj, NULL);
360 if (!err) {
361 /*
362 * Since we only check validity, not use the pages,
363 * it doesn't matter if we collide with the mmu notifier,
364 * and -EAGAIN handling is not required.
365 */
366 err = i915_gem_object_pin_pages(obj);
367 if (!err)
368 i915_gem_object_unpin_pages(obj);
369
370 i915_gem_object_unlock(obj);
371 }
372
373 return err;
374 }
375
376 static void
i915_gem_userptr_release(struct drm_i915_gem_object * obj)377 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
378 {
379 GEM_WARN_ON(obj->userptr.page_ref);
380
381 mmu_interval_notifier_remove(&obj->userptr.notifier);
382 obj->userptr.notifier.mm = NULL;
383 }
384
385 static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object * obj)386 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
387 {
388 drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
389
390 return -EINVAL;
391 }
392
393 static int
i915_gem_userptr_pwrite(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * args)394 i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
395 const struct drm_i915_gem_pwrite *args)
396 {
397 drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
398
399 return -EINVAL;
400 }
401
402 static int
i915_gem_userptr_pread(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * args)403 i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
404 const struct drm_i915_gem_pread *args)
405 {
406 drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
407
408 return -EINVAL;
409 }
410
411 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
412 .name = "i915_gem_object_userptr",
413 .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
414 I915_GEM_OBJECT_NO_MMAP |
415 I915_GEM_OBJECT_IS_PROXY,
416 .get_pages = i915_gem_userptr_get_pages,
417 .put_pages = i915_gem_userptr_put_pages,
418 .dmabuf_export = i915_gem_userptr_dmabuf_export,
419 .pwrite = i915_gem_userptr_pwrite,
420 .pread = i915_gem_userptr_pread,
421 .release = i915_gem_userptr_release,
422 };
423
424 #endif
425
426 static int
probe_range(struct mm_struct * mm,unsigned long addr,unsigned long len)427 probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
428 {
429 const unsigned long end = addr + len;
430 struct vm_area_struct *vma;
431 int ret = -EFAULT;
432
433 mmap_read_lock(mm);
434 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
435 /* Check for holes, note that we also update the addr below */
436 if (vma->vm_start > addr)
437 break;
438
439 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
440 break;
441
442 if (vma->vm_end >= end) {
443 ret = 0;
444 break;
445 }
446
447 addr = vma->vm_end;
448 }
449 mmap_read_unlock(mm);
450
451 return ret;
452 }
453
454 /*
455 * Creates a new mm object that wraps some normal memory from the process
456 * context - user memory.
457 *
458 * We impose several restrictions upon the memory being mapped
459 * into the GPU.
460 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
461 * 2. It must be normal system memory, not a pointer into another map of IO
462 * space (e.g. it must not be a GTT mmapping of another object).
463 * 3. We only allow a bo as large as we could in theory map into the GTT,
464 * that is we limit the size to the total size of the GTT.
465 * 4. The bo is marked as being snoopable. The backing pages are left
466 * accessible directly by the CPU, but reads and writes by the GPU may
467 * incur the cost of a snoop (unless you have an LLC architecture).
468 *
469 * Synchronisation between multiple users and the GPU is left to userspace
470 * through the normal set-domain-ioctl. The kernel will enforce that the
471 * GPU relinquishes the VMA before it is returned back to the system
472 * i.e. upon free(), munmap() or process termination. However, the userspace
473 * malloc() library may not immediately relinquish the VMA after free() and
474 * instead reuse it whilst the GPU is still reading and writing to the VMA.
475 * Caveat emptor.
476 *
477 * Also note, that the object created here is not currently a "first class"
478 * object, in that several ioctls are banned. These are the CPU access
479 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
480 * direct access via your pointer rather than use those ioctls. Another
481 * restriction is that we do not allow userptr surfaces to be pinned to the
482 * hardware and so we reject any attempt to create a framebuffer out of a
483 * userptr.
484 *
485 * If you think this is a good interface to use to pass GPU memory between
486 * drivers, please use dma-buf instead. In fact, wherever possible use
487 * dma-buf instead.
488 */
489 int
i915_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * file)490 i915_gem_userptr_ioctl(struct drm_device *dev,
491 void *data,
492 struct drm_file *file)
493 {
494 static struct lock_class_key __maybe_unused lock_class;
495 struct drm_i915_private *dev_priv = to_i915(dev);
496 struct drm_i915_gem_userptr *args = data;
497 struct drm_i915_gem_object __maybe_unused *obj;
498 int __maybe_unused ret;
499 u32 __maybe_unused handle;
500
501 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
502 /* We cannot support coherent userptr objects on hw without
503 * LLC and broken snooping.
504 */
505 return -ENODEV;
506 }
507
508 if (args->flags & ~(I915_USERPTR_READ_ONLY |
509 I915_USERPTR_UNSYNCHRONIZED |
510 I915_USERPTR_PROBE))
511 return -EINVAL;
512
513 if (i915_gem_object_size_2big(args->user_size))
514 return -E2BIG;
515
516 if (!args->user_size)
517 return -EINVAL;
518
519 if (offset_in_page(args->user_ptr | args->user_size))
520 return -EINVAL;
521
522 if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
523 return -EFAULT;
524
525 if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
526 return -ENODEV;
527
528 if (args->flags & I915_USERPTR_READ_ONLY) {
529 /*
530 * On almost all of the older hw, we cannot tell the GPU that
531 * a page is readonly.
532 */
533 if (!to_gt(dev_priv)->vm->has_read_only)
534 return -ENODEV;
535 }
536
537 if (args->flags & I915_USERPTR_PROBE) {
538 /*
539 * Check that the range pointed to represents real struct
540 * pages and not iomappings (at this moment in time!)
541 */
542 ret = probe_range(current->mm, args->user_ptr, args->user_size);
543 if (ret)
544 return ret;
545 }
546
547 #ifdef CONFIG_MMU_NOTIFIER
548 obj = i915_gem_object_alloc();
549 if (obj == NULL)
550 return -ENOMEM;
551
552 drm_gem_private_object_init(dev, &obj->base, args->user_size);
553 i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
554 I915_BO_ALLOC_USER);
555 obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
556 obj->read_domains = I915_GEM_DOMAIN_CPU;
557 obj->write_domain = I915_GEM_DOMAIN_CPU;
558 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
559
560 obj->userptr.ptr = args->user_ptr;
561 obj->userptr.notifier_seq = ULONG_MAX;
562 if (args->flags & I915_USERPTR_READ_ONLY)
563 i915_gem_object_set_readonly(obj);
564
565 /* And keep a pointer to the current->mm for resolving the user pages
566 * at binding. This means that we need to hook into the mmu_notifier
567 * in order to detect if the mmu is destroyed.
568 */
569 ret = i915_gem_userptr_init__mmu_notifier(obj);
570 if (ret == 0)
571 ret = drm_gem_handle_create(file, &obj->base, &handle);
572
573 /* drop reference from allocate - handle holds it now */
574 i915_gem_object_put(obj);
575 if (ret)
576 return ret;
577
578 args->handle = handle;
579 return 0;
580 #else
581 return -ENODEV;
582 #endif
583 }
584
i915_gem_init_userptr(struct drm_i915_private * dev_priv)585 int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
586 {
587 #ifdef CONFIG_MMU_NOTIFIER
588 rwlock_init(&dev_priv->mm.notifier_lock);
589 #endif
590
591 return 0;
592 }
593
i915_gem_cleanup_userptr(struct drm_i915_private * dev_priv)594 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
595 {
596 }
597