1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2020-2021 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23 #include <linux/types.h>
24 #include <linux/hmm.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/migrate.h>
28 #include "amdgpu_sync.h"
29 #include "amdgpu_object.h"
30 #include "amdgpu_vm.h"
31 #include "amdgpu_mn.h"
32 #include "amdgpu_res_cursor.h"
33 #include "kfd_priv.h"
34 #include "kfd_svm.h"
35 #include "kfd_migrate.h"
36 #include "kfd_smi_events.h"
37
38 #ifdef dev_fmt
39 #undef dev_fmt
40 #endif
41 #define dev_fmt(fmt) "kfd_migrate: " fmt
42
43 static uint64_t
svm_migrate_direct_mapping_addr(struct amdgpu_device * adev,uint64_t addr)44 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
45 {
46 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
47 }
48
49 static int
svm_migrate_gart_map(struct amdgpu_ring * ring,uint64_t npages,dma_addr_t * addr,uint64_t * gart_addr,uint64_t flags)50 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
51 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
52 {
53 struct amdgpu_device *adev = ring->adev;
54 struct amdgpu_job *job;
55 unsigned int num_dw, num_bytes;
56 struct dma_fence *fence;
57 uint64_t src_addr, dst_addr;
58 uint64_t pte_flags;
59 void *cpu_addr;
60 int r;
61
62 /* use gart window 0 */
63 *gart_addr = adev->gmc.gart_start;
64
65 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
66 num_bytes = npages * 8;
67
68 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
69 AMDGPU_IB_POOL_DELAYED, &job);
70 if (r)
71 return r;
72
73 src_addr = num_dw * 4;
74 src_addr += job->ibs[0].gpu_addr;
75
76 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
77 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
78 dst_addr, num_bytes, false);
79
80 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
81 WARN_ON(job->ibs[0].length_dw > num_dw);
82
83 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
84 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED;
85 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
86 pte_flags |= AMDGPU_PTE_WRITEABLE;
87 pte_flags |= adev->gart.gart_pte_flags;
88
89 cpu_addr = &job->ibs[0].ptr[num_dw];
90
91 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
92 r = amdgpu_job_submit(job, &adev->mman.entity,
93 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
94 if (r)
95 goto error_free;
96
97 dma_fence_put(fence);
98
99 return r;
100
101 error_free:
102 amdgpu_job_free(job);
103 return r;
104 }
105
106 /**
107 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram
108 *
109 * @adev: amdgpu device the sdma ring running
110 * @sys: system DMA pointer to be copied
111 * @vram: vram destination DMA pointer
112 * @npages: number of pages to copy
113 * @direction: enum MIGRATION_COPY_DIR
114 * @mfence: output, sdma fence to signal after sdma is done
115 *
116 * ram address uses GART table continuous entries mapping to ram pages,
117 * vram address uses direct mapping of vram pages, which must have npages
118 * number of continuous pages.
119 * GART update and sdma uses same buf copy function ring, sdma is splited to
120 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for
121 * the last sdma finish fence which is returned to check copy memory is done.
122 *
123 * Context: Process context, takes and releases gtt_window_lock
124 *
125 * Return:
126 * 0 - OK, otherwise error code
127 */
128
129 static int
svm_migrate_copy_memory_gart(struct amdgpu_device * adev,dma_addr_t * sys,uint64_t * vram,uint64_t npages,enum MIGRATION_COPY_DIR direction,struct dma_fence ** mfence)130 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
131 uint64_t *vram, uint64_t npages,
132 enum MIGRATION_COPY_DIR direction,
133 struct dma_fence **mfence)
134 {
135 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
136 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
137 uint64_t gart_s, gart_d;
138 struct dma_fence *next;
139 uint64_t size;
140 int r;
141
142 mutex_lock(&adev->mman.gtt_window_lock);
143
144 while (npages) {
145 size = min(GTT_MAX_PAGES, npages);
146
147 if (direction == FROM_VRAM_TO_RAM) {
148 gart_s = svm_migrate_direct_mapping_addr(adev, *vram);
149 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0);
150
151 } else if (direction == FROM_RAM_TO_VRAM) {
152 r = svm_migrate_gart_map(ring, size, sys, &gart_s,
153 KFD_IOCTL_SVM_FLAG_GPU_RO);
154 gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
155 }
156 if (r) {
157 dev_err(adev->dev, "fail %d create gart mapping\n", r);
158 goto out_unlock;
159 }
160
161 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
162 NULL, &next, false, true, false);
163 if (r) {
164 dev_err(adev->dev, "fail %d to copy memory\n", r);
165 goto out_unlock;
166 }
167
168 dma_fence_put(*mfence);
169 *mfence = next;
170 npages -= size;
171 if (npages) {
172 sys += size;
173 vram += size;
174 }
175 }
176
177 out_unlock:
178 mutex_unlock(&adev->mman.gtt_window_lock);
179
180 return r;
181 }
182
183 /**
184 * svm_migrate_copy_done - wait for memory copy sdma is done
185 *
186 * @adev: amdgpu device the sdma memory copy is executing on
187 * @mfence: migrate fence
188 *
189 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma
190 * operations, this is the last sdma operation fence.
191 *
192 * Context: called after svm_migrate_copy_memory
193 *
194 * Return:
195 * 0 - success
196 * otherwise - error code from dma fence signal
197 */
198 static int
svm_migrate_copy_done(struct amdgpu_device * adev,struct dma_fence * mfence)199 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence)
200 {
201 int r = 0;
202
203 if (mfence) {
204 r = dma_fence_wait(mfence, false);
205 dma_fence_put(mfence);
206 pr_debug("sdma copy memory fence done\n");
207 }
208
209 return r;
210 }
211
212 unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device * adev,unsigned long addr)213 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr)
214 {
215 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT;
216 }
217
218 static void
svm_migrate_get_vram_page(struct svm_range * prange,unsigned long pfn)219 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
220 {
221 struct page *page;
222
223 page = pfn_to_page(pfn);
224 svm_range_bo_ref(prange->svm_bo);
225 page->zone_device_data = prange->svm_bo;
226 zone_device_page_init(page);
227 }
228
229 static void
svm_migrate_put_vram_page(struct amdgpu_device * adev,unsigned long addr)230 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr)
231 {
232 struct page *page;
233
234 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr));
235 unlock_page(page);
236 put_page(page);
237 }
238
239 static unsigned long
svm_migrate_addr(struct amdgpu_device * adev,struct page * page)240 svm_migrate_addr(struct amdgpu_device *adev, struct page *page)
241 {
242 unsigned long addr;
243
244 addr = page_to_pfn(page) << PAGE_SHIFT;
245 return (addr - adev->kfd.dev->pgmap.range.start);
246 }
247
248 static struct page *
svm_migrate_get_sys_page(struct vm_area_struct * vma,unsigned long addr)249 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr)
250 {
251 struct page *page;
252
253 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
254 if (page)
255 lock_page(page);
256
257 return page;
258 }
259
svm_migrate_put_sys_page(unsigned long addr)260 static void svm_migrate_put_sys_page(unsigned long addr)
261 {
262 struct page *page;
263
264 page = pfn_to_page(addr >> PAGE_SHIFT);
265 unlock_page(page);
266 put_page(page);
267 }
268
svm_migrate_successful_pages(struct migrate_vma * migrate)269 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
270 {
271 unsigned long cpages = 0;
272 unsigned long i;
273
274 for (i = 0; i < migrate->npages; i++) {
275 if (migrate->src[i] & MIGRATE_PFN_VALID &&
276 migrate->src[i] & MIGRATE_PFN_MIGRATE)
277 cpages++;
278 }
279 return cpages;
280 }
281
svm_migrate_unsuccessful_pages(struct migrate_vma * migrate)282 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
283 {
284 unsigned long upages = 0;
285 unsigned long i;
286
287 for (i = 0; i < migrate->npages; i++) {
288 if (migrate->src[i] & MIGRATE_PFN_VALID &&
289 !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
290 upages++;
291 }
292 return upages;
293 }
294
295 static int
svm_migrate_copy_to_vram(struct amdgpu_device * adev,struct svm_range * prange,struct migrate_vma * migrate,struct dma_fence ** mfence,dma_addr_t * scratch)296 svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
297 struct migrate_vma *migrate, struct dma_fence **mfence,
298 dma_addr_t *scratch)
299 {
300 uint64_t npages = migrate->npages;
301 struct device *dev = adev->dev;
302 struct amdgpu_res_cursor cursor;
303 dma_addr_t *src;
304 uint64_t *dst;
305 uint64_t i, j;
306 int r;
307
308 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
309 prange->last);
310
311 src = scratch;
312 dst = (uint64_t *)(scratch + npages);
313
314 r = svm_range_vram_node_new(adev, prange, true);
315 if (r) {
316 dev_dbg(adev->dev, "fail %d to alloc vram\n", r);
317 goto out;
318 }
319
320 amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT,
321 npages << PAGE_SHIFT, &cursor);
322 for (i = j = 0; i < npages; i++) {
323 struct page *spage;
324
325 dst[i] = cursor.start + (j << PAGE_SHIFT);
326 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
327 svm_migrate_get_vram_page(prange, migrate->dst[i]);
328 migrate->dst[i] = migrate_pfn(migrate->dst[i]);
329
330 spage = migrate_pfn_to_page(migrate->src[i]);
331 if (spage && !is_zone_device_page(spage)) {
332 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
333 DMA_TO_DEVICE);
334 r = dma_mapping_error(dev, src[i]);
335 if (r) {
336 dev_err(adev->dev, "%s: fail %d dma_map_page\n",
337 __func__, r);
338 goto out_free_vram_pages;
339 }
340 } else {
341 if (j) {
342 r = svm_migrate_copy_memory_gart(
343 adev, src + i - j,
344 dst + i - j, j,
345 FROM_RAM_TO_VRAM,
346 mfence);
347 if (r)
348 goto out_free_vram_pages;
349 amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT);
350 j = 0;
351 } else {
352 amdgpu_res_next(&cursor, PAGE_SIZE);
353 }
354 continue;
355 }
356
357 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
358 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
359
360 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
361 r = svm_migrate_copy_memory_gart(adev, src + i - j,
362 dst + i - j, j + 1,
363 FROM_RAM_TO_VRAM,
364 mfence);
365 if (r)
366 goto out_free_vram_pages;
367 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE);
368 j = 0;
369 } else {
370 j++;
371 }
372 }
373
374 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j,
375 FROM_RAM_TO_VRAM, mfence);
376
377 out_free_vram_pages:
378 if (r) {
379 pr_debug("failed %d to copy memory to vram\n", r);
380 while (i--) {
381 svm_migrate_put_vram_page(adev, dst[i]);
382 migrate->dst[i] = 0;
383 }
384 }
385
386 #ifdef DEBUG_FORCE_MIXED_DOMAINS
387 for (i = 0, j = 0; i < npages; i += 4, j++) {
388 if (j & 1)
389 continue;
390 svm_migrate_put_vram_page(adev, dst[i]);
391 migrate->dst[i] = 0;
392 svm_migrate_put_vram_page(adev, dst[i + 1]);
393 migrate->dst[i + 1] = 0;
394 svm_migrate_put_vram_page(adev, dst[i + 2]);
395 migrate->dst[i + 2] = 0;
396 svm_migrate_put_vram_page(adev, dst[i + 3]);
397 migrate->dst[i + 3] = 0;
398 }
399 #endif
400 out:
401 return r;
402 }
403
404 static long
svm_migrate_vma_to_vram(struct amdgpu_device * adev,struct svm_range * prange,struct vm_area_struct * vma,uint64_t start,uint64_t end,uint32_t trigger)405 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
406 struct vm_area_struct *vma, uint64_t start,
407 uint64_t end, uint32_t trigger)
408 {
409 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
410 uint64_t npages = (end - start) >> PAGE_SHIFT;
411 struct kfd_process_device *pdd;
412 struct dma_fence *mfence = NULL;
413 struct migrate_vma migrate = { 0 };
414 unsigned long cpages = 0;
415 dma_addr_t *scratch;
416 void *buf;
417 int r = -ENOMEM;
418
419 memset(&migrate, 0, sizeof(migrate));
420 migrate.vma = vma;
421 migrate.start = start;
422 migrate.end = end;
423 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM;
424 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
425
426 buf = kvcalloc(npages,
427 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
428 GFP_KERNEL);
429 if (!buf)
430 goto out;
431
432 migrate.src = buf;
433 migrate.dst = migrate.src + npages;
434 scratch = (dma_addr_t *)(migrate.dst + npages);
435
436 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
437 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
438 0, adev->kfd.dev->id, prange->prefetch_loc,
439 prange->preferred_loc, trigger);
440
441 r = migrate_vma_setup(&migrate);
442 if (r) {
443 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
444 __func__, r, prange->start, prange->last);
445 goto out_free;
446 }
447
448 cpages = migrate.cpages;
449 if (!cpages) {
450 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
451 prange->start, prange->last);
452 goto out_free;
453 }
454 if (cpages != npages)
455 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
456 cpages, npages);
457 else
458 pr_debug("0x%lx pages migrated\n", cpages);
459
460 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
461 migrate_vma_pages(&migrate);
462
463 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
464 svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
465
466 svm_migrate_copy_done(adev, mfence);
467 migrate_vma_finalize(&migrate);
468
469 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
470 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
471 0, adev->kfd.dev->id, trigger);
472
473 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
474 svm_range_free_dma_mappings(prange);
475
476 out_free:
477 kvfree(buf);
478 out:
479 if (!r && cpages) {
480 pdd = svm_range_get_pdd_by_adev(prange, adev);
481 if (pdd)
482 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
483
484 return cpages;
485 }
486 return r;
487 }
488
489 /**
490 * svm_migrate_ram_to_vram - migrate svm range from system to device
491 * @prange: range structure
492 * @best_loc: the device to migrate to
493 * @mm: the process mm structure
494 * @trigger: reason of migration
495 *
496 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
497 *
498 * Return:
499 * 0 - OK, otherwise error code
500 */
501 static int
svm_migrate_ram_to_vram(struct svm_range * prange,uint32_t best_loc,struct mm_struct * mm,uint32_t trigger)502 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
503 struct mm_struct *mm, uint32_t trigger)
504 {
505 unsigned long addr, start, end;
506 struct vm_area_struct *vma;
507 struct amdgpu_device *adev;
508 unsigned long cpages = 0;
509 long r = 0;
510
511 if (prange->actual_loc == best_loc) {
512 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
513 prange->svms, prange->start, prange->last, best_loc);
514 return 0;
515 }
516
517 adev = svm_range_get_adev_by_id(prange, best_loc);
518 if (!adev) {
519 pr_debug("failed to get device by id 0x%x\n", best_loc);
520 return -ENODEV;
521 }
522
523 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms,
524 prange->start, prange->last, best_loc);
525
526 start = prange->start << PAGE_SHIFT;
527 end = (prange->last + 1) << PAGE_SHIFT;
528
529 for (addr = start; addr < end;) {
530 unsigned long next;
531
532 vma = find_vma(mm, addr);
533 if (!vma || addr < vma->vm_start)
534 break;
535
536 next = min(vma->vm_end, end);
537 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger);
538 if (r < 0) {
539 pr_debug("failed %ld to migrate\n", r);
540 break;
541 } else {
542 cpages += r;
543 }
544 addr = next;
545 }
546
547 if (cpages)
548 prange->actual_loc = best_loc;
549
550 return r < 0 ? r : 0;
551 }
552
svm_migrate_page_free(struct page * page)553 static void svm_migrate_page_free(struct page *page)
554 {
555 struct svm_range_bo *svm_bo = page->zone_device_data;
556
557 if (svm_bo) {
558 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
559 svm_range_bo_unref_async(svm_bo);
560 }
561 }
562
563 static int
svm_migrate_copy_to_ram(struct amdgpu_device * adev,struct svm_range * prange,struct migrate_vma * migrate,struct dma_fence ** mfence,dma_addr_t * scratch,uint64_t npages)564 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
565 struct migrate_vma *migrate, struct dma_fence **mfence,
566 dma_addr_t *scratch, uint64_t npages)
567 {
568 struct device *dev = adev->dev;
569 uint64_t *src;
570 dma_addr_t *dst;
571 struct page *dpage;
572 uint64_t i = 0, j;
573 uint64_t addr;
574 int r = 0;
575
576 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
577 prange->last);
578
579 addr = prange->start << PAGE_SHIFT;
580
581 src = (uint64_t *)(scratch + npages);
582 dst = scratch;
583
584 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
585 struct page *spage;
586
587 spage = migrate_pfn_to_page(migrate->src[i]);
588 if (!spage || !is_zone_device_page(spage)) {
589 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n",
590 prange->svms, prange->start, prange->last);
591 if (j) {
592 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
593 src + i - j, j,
594 FROM_VRAM_TO_RAM,
595 mfence);
596 if (r)
597 goto out_oom;
598 j = 0;
599 }
600 continue;
601 }
602 src[i] = svm_migrate_addr(adev, spage);
603 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) {
604 r = svm_migrate_copy_memory_gart(adev, dst + i - j,
605 src + i - j, j,
606 FROM_VRAM_TO_RAM,
607 mfence);
608 if (r)
609 goto out_oom;
610 j = 0;
611 }
612
613 dpage = svm_migrate_get_sys_page(migrate->vma, addr);
614 if (!dpage) {
615 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n",
616 prange->svms, prange->start, prange->last);
617 r = -ENOMEM;
618 goto out_oom;
619 }
620
621 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
622 r = dma_mapping_error(dev, dst[i]);
623 if (r) {
624 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
625 goto out_oom;
626 }
627
628 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
629 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
630
631 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
632 j++;
633 }
634
635 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j,
636 FROM_VRAM_TO_RAM, mfence);
637
638 out_oom:
639 if (r) {
640 pr_debug("failed %d copy to ram\n", r);
641 while (i--) {
642 svm_migrate_put_sys_page(dst[i]);
643 migrate->dst[i] = 0;
644 }
645 }
646
647 return r;
648 }
649
650 /**
651 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system
652 *
653 * @adev: amdgpu device to migrate from
654 * @prange: svm range structure
655 * @vma: vm_area_struct that range [start, end] belongs to
656 * @start: range start virtual address in pages
657 * @end: range end virtual address in pages
658 *
659 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
660 *
661 * Return:
662 * 0 - success with all pages migrated
663 * negative values - indicate error
664 * positive values - partial migration, number of pages not migrated
665 */
666 static long
svm_migrate_vma_to_ram(struct amdgpu_device * adev,struct svm_range * prange,struct vm_area_struct * vma,uint64_t start,uint64_t end,uint32_t trigger,struct page * fault_page)667 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
668 struct vm_area_struct *vma, uint64_t start, uint64_t end,
669 uint32_t trigger, struct page *fault_page)
670 {
671 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
672 uint64_t npages = (end - start) >> PAGE_SHIFT;
673 unsigned long upages = npages;
674 unsigned long cpages = 0;
675 struct kfd_process_device *pdd;
676 struct dma_fence *mfence = NULL;
677 struct migrate_vma migrate = { 0 };
678 dma_addr_t *scratch;
679 void *buf;
680 int r = -ENOMEM;
681
682 memset(&migrate, 0, sizeof(migrate));
683 migrate.vma = vma;
684 migrate.start = start;
685 migrate.end = end;
686 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
687 if (adev->gmc.xgmi.connected_to_cpu)
688 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT;
689 else
690 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
691
692 buf = kvcalloc(npages,
693 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
694 GFP_KERNEL);
695 if (!buf)
696 goto out;
697
698 migrate.src = buf;
699 migrate.dst = migrate.src + npages;
700 migrate.fault_page = fault_page;
701 scratch = (dma_addr_t *)(migrate.dst + npages);
702
703 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid,
704 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
705 adev->kfd.dev->id, 0, prange->prefetch_loc,
706 prange->preferred_loc, trigger);
707
708 r = migrate_vma_setup(&migrate);
709 if (r) {
710 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n",
711 __func__, r, prange->start, prange->last);
712 goto out_free;
713 }
714
715 cpages = migrate.cpages;
716 if (!cpages) {
717 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
718 prange->start, prange->last);
719 upages = svm_migrate_unsuccessful_pages(&migrate);
720 goto out_free;
721 }
722 if (cpages != npages)
723 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
724 cpages, npages);
725 else
726 pr_debug("0x%lx pages migrated\n", cpages);
727
728 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
729 scratch, npages);
730 migrate_vma_pages(&migrate);
731
732 upages = svm_migrate_unsuccessful_pages(&migrate);
733 pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
734 upages, cpages, migrate.npages);
735
736 svm_migrate_copy_done(adev, mfence);
737 migrate_vma_finalize(&migrate);
738
739 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid,
740 start >> PAGE_SHIFT, end >> PAGE_SHIFT,
741 adev->kfd.dev->id, 0, trigger);
742
743 svm_range_dma_unmap(adev->dev, scratch, 0, npages);
744
745 out_free:
746 kvfree(buf);
747 out:
748 if (!r && cpages) {
749 pdd = svm_range_get_pdd_by_adev(prange, adev);
750 if (pdd)
751 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
752 }
753 return r ? r : upages;
754 }
755
756 /**
757 * svm_migrate_vram_to_ram - migrate svm range from device to system
758 * @prange: range structure
759 * @mm: process mm, use current->mm if NULL
760 * @trigger: reason of migration
761 *
762 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex
763 *
764 * Return:
765 * 0 - OK, otherwise error code
766 */
svm_migrate_vram_to_ram(struct svm_range * prange,struct mm_struct * mm,uint32_t trigger,struct page * fault_page)767 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
768 uint32_t trigger, struct page *fault_page)
769 {
770 struct amdgpu_device *adev;
771 struct vm_area_struct *vma;
772 unsigned long addr;
773 unsigned long start;
774 unsigned long end;
775 unsigned long upages = 0;
776 long r = 0;
777
778 if (!prange->actual_loc) {
779 pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
780 prange->start, prange->last);
781 return 0;
782 }
783
784 adev = svm_range_get_adev_by_id(prange, prange->actual_loc);
785 if (!adev) {
786 pr_debug("failed to get device by id 0x%x\n",
787 prange->actual_loc);
788 return -ENODEV;
789 }
790
791 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n",
792 prange->svms, prange, prange->start, prange->last,
793 prange->actual_loc);
794
795 start = prange->start << PAGE_SHIFT;
796 end = (prange->last + 1) << PAGE_SHIFT;
797
798 for (addr = start; addr < end;) {
799 unsigned long next;
800
801 vma = find_vma(mm, addr);
802 if (!vma || addr < vma->vm_start) {
803 pr_debug("failed to find vma for prange %p\n", prange);
804 r = -EFAULT;
805 break;
806 }
807
808 next = min(vma->vm_end, end);
809 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger,
810 fault_page);
811 if (r < 0) {
812 pr_debug("failed %ld to migrate prange %p\n", r, prange);
813 break;
814 } else {
815 upages += r;
816 }
817 addr = next;
818 }
819
820 if (r >= 0 && !upages) {
821 svm_range_vram_node_free(prange);
822 prange->actual_loc = 0;
823 }
824
825 return r < 0 ? r : 0;
826 }
827
828 /**
829 * svm_migrate_vram_to_vram - migrate svm range from device to device
830 * @prange: range structure
831 * @best_loc: the device to migrate to
832 * @mm: process mm, use current->mm if NULL
833 * @trigger: reason of migration
834 *
835 * Context: Process context, caller hold mmap read lock, svms lock, prange lock
836 *
837 * Return:
838 * 0 - OK, otherwise error code
839 */
840 static int
svm_migrate_vram_to_vram(struct svm_range * prange,uint32_t best_loc,struct mm_struct * mm,uint32_t trigger)841 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc,
842 struct mm_struct *mm, uint32_t trigger)
843 {
844 int r, retries = 3;
845
846 /*
847 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip
848 * system memory as migration bridge
849 */
850
851 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc);
852
853 do {
854 r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL);
855 if (r)
856 return r;
857 } while (prange->actual_loc && --retries);
858
859 if (prange->actual_loc)
860 return -EDEADLK;
861
862 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
863 }
864
865 int
svm_migrate_to_vram(struct svm_range * prange,uint32_t best_loc,struct mm_struct * mm,uint32_t trigger)866 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
867 struct mm_struct *mm, uint32_t trigger)
868 {
869 if (!prange->actual_loc)
870 return svm_migrate_ram_to_vram(prange, best_loc, mm, trigger);
871 else
872 return svm_migrate_vram_to_vram(prange, best_loc, mm, trigger);
873
874 }
875
876 /**
877 * svm_migrate_to_ram - CPU page fault handler
878 * @vmf: CPU vm fault vma, address
879 *
880 * Context: vm fault handler, caller holds the mmap read lock
881 *
882 * Return:
883 * 0 - OK
884 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault
885 */
svm_migrate_to_ram(struct vm_fault * vmf)886 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
887 {
888 unsigned long addr = vmf->address;
889 struct svm_range_bo *svm_bo;
890 enum svm_work_list_ops op;
891 struct svm_range *parent;
892 struct svm_range *prange;
893 struct kfd_process *p;
894 struct mm_struct *mm;
895 int r = 0;
896
897 svm_bo = vmf->page->zone_device_data;
898 if (!svm_bo) {
899 pr_debug("failed get device page at addr 0x%lx\n", addr);
900 return VM_FAULT_SIGBUS;
901 }
902 if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
903 pr_debug("addr 0x%lx of process mm is destroyed\n", addr);
904 return VM_FAULT_SIGBUS;
905 }
906
907 mm = svm_bo->eviction_fence->mm;
908 if (mm != vmf->vma->vm_mm)
909 pr_debug("addr 0x%lx is COW mapping in child process\n", addr);
910
911 p = kfd_lookup_process_by_mm(mm);
912 if (!p) {
913 pr_debug("failed find process at fault address 0x%lx\n", addr);
914 r = VM_FAULT_SIGBUS;
915 goto out_mmput;
916 }
917 if (READ_ONCE(p->svms.faulting_task) == current) {
918 pr_debug("skipping ram migration\n");
919 r = 0;
920 goto out_unref_process;
921 }
922
923 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr);
924 addr >>= PAGE_SHIFT;
925
926 mutex_lock(&p->svms.lock);
927
928 prange = svm_range_from_addr(&p->svms, addr, &parent);
929 if (!prange) {
930 pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr);
931 r = -EFAULT;
932 goto out_unlock_svms;
933 }
934
935 mutex_lock(&parent->migrate_mutex);
936 if (prange != parent)
937 mutex_lock_nested(&prange->migrate_mutex, 1);
938
939 if (!prange->actual_loc)
940 goto out_unlock_prange;
941
942 svm_range_lock(parent);
943 if (prange != parent)
944 mutex_lock_nested(&prange->lock, 1);
945 r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
946 if (prange != parent)
947 mutex_unlock(&prange->lock);
948 svm_range_unlock(parent);
949 if (r) {
950 pr_debug("failed %d to split range by granularity\n", r);
951 goto out_unlock_prange;
952 }
953
954 r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm,
955 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
956 vmf->page);
957 if (r)
958 pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n",
959 r, prange->svms, prange, prange->start, prange->last);
960
961 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
962 if (p->xnack_enabled && parent == prange)
963 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
964 else
965 op = SVM_OP_UPDATE_RANGE_NOTIFIER;
966 svm_range_add_list_work(&p->svms, parent, mm, op);
967 schedule_deferred_list_work(&p->svms);
968
969 out_unlock_prange:
970 if (prange != parent)
971 mutex_unlock(&prange->migrate_mutex);
972 mutex_unlock(&parent->migrate_mutex);
973 out_unlock_svms:
974 mutex_unlock(&p->svms.lock);
975 out_unref_process:
976 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr);
977 kfd_unref_process(p);
978 out_mmput:
979 mmput(mm);
980 return r ? VM_FAULT_SIGBUS : 0;
981 }
982
983 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
984 .page_free = svm_migrate_page_free,
985 .migrate_to_ram = svm_migrate_to_ram,
986 };
987
988 /* Each VRAM page uses sizeof(struct page) on system memory */
989 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page))
990
svm_migrate_init(struct amdgpu_device * adev)991 int svm_migrate_init(struct amdgpu_device *adev)
992 {
993 struct kfd_dev *kfddev = adev->kfd.dev;
994 struct dev_pagemap *pgmap;
995 struct resource *res = NULL;
996 unsigned long size;
997 void *r;
998
999 /* Page migration works on Vega10 or newer */
1000 if (!KFD_IS_SOC15(kfddev))
1001 return -EINVAL;
1002
1003 pgmap = &kfddev->pgmap;
1004 memset(pgmap, 0, sizeof(*pgmap));
1005
1006 /* TODO: register all vram to HMM for now.
1007 * should remove reserved size
1008 */
1009 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20);
1010 if (adev->gmc.xgmi.connected_to_cpu) {
1011 pgmap->range.start = adev->gmc.aper_base;
1012 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1;
1013 pgmap->type = MEMORY_DEVICE_COHERENT;
1014 } else {
1015 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size);
1016 if (IS_ERR(res))
1017 return -ENOMEM;
1018 pgmap->range.start = res->start;
1019 pgmap->range.end = res->end;
1020 pgmap->type = MEMORY_DEVICE_PRIVATE;
1021 }
1022
1023 pgmap->nr_range = 1;
1024 pgmap->ops = &svm_migrate_pgmap_ops;
1025 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
1026 pgmap->flags = 0;
1027 /* Device manager releases device-specific resources, memory region and
1028 * pgmap when driver disconnects from device.
1029 */
1030 r = devm_memremap_pages(adev->dev, pgmap);
1031 if (IS_ERR(r)) {
1032 pr_err("failed to register HMM device memory\n");
1033 /* Disable SVM support capability */
1034 pgmap->type = 0;
1035 if (pgmap->type == MEMORY_DEVICE_PRIVATE)
1036 devm_release_mem_region(adev->dev, res->start,
1037 res->end - res->start + 1);
1038 return PTR_ERR(r);
1039 }
1040
1041 pr_debug("reserve %ldMB system memory for VRAM pages struct\n",
1042 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20);
1043
1044 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size));
1045
1046 svm_range_set_max_pages(adev);
1047
1048 pr_info("HMM registered %ldMB device memory\n", size >> 20);
1049
1050 return 0;
1051 }
1052