1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31 /**
32 * DOC: MMU Notifier
33 *
34 * For coherent userptr handling registers an MMU notifier to inform the driver
35 * about updates on the page tables of a process.
36 *
37 * When somebody tries to invalidate the page tables we block the update until
38 * all operations on the pages in question are completed, then those pages are
39 * marked as accessed and also dirty if it wasn't a read only access.
40 *
41 * New command submissions using the userptrs in question are delayed until all
42 * page table invalidation are completed and we once more see a coherent process
43 * address space.
44 */
45
46 #include <linux/firmware.h>
47 #include <linux/module.h>
48 #include <drm/drm.h>
49
50 #include "amdgpu.h"
51 #include "amdgpu_amdkfd.h"
52 #include "amdgpu_hmm.h"
53
54 #define MAX_WALK_BYTE (2UL << 30)
55
56 /**
57 * amdgpu_hmm_invalidate_gfx - callback to notify about mm change
58 *
59 * @mni: the range (mm) is about to update
60 * @range: details on the invalidation
61 * @cur_seq: Value to pass to mmu_interval_set_seq()
62 *
63 * Block for operations on BOs to finish and mark pages as accessed and
64 * potentially dirty.
65 */
amdgpu_hmm_invalidate_gfx(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)66 static bool amdgpu_hmm_invalidate_gfx(struct mmu_interval_notifier *mni,
67 const struct mmu_notifier_range *range,
68 unsigned long cur_seq)
69 {
70 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
71 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
72 long r;
73
74 if (!mmu_notifier_range_blockable(range))
75 return false;
76
77 mutex_lock(&adev->notifier_lock);
78
79 mmu_interval_set_seq(mni, cur_seq);
80
81 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
82 false, MAX_SCHEDULE_TIMEOUT);
83 mutex_unlock(&adev->notifier_lock);
84 if (r <= 0)
85 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
86 return true;
87 }
88
89 static const struct mmu_interval_notifier_ops amdgpu_hmm_gfx_ops = {
90 .invalidate = amdgpu_hmm_invalidate_gfx,
91 };
92
93 /**
94 * amdgpu_hmm_invalidate_hsa - callback to notify about mm change
95 *
96 * @mni: the range (mm) is about to update
97 * @range: details on the invalidation
98 * @cur_seq: Value to pass to mmu_interval_set_seq()
99 *
100 * We temporarily evict the BO attached to this range. This necessitates
101 * evicting all user-mode queues of the process.
102 */
amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)103 static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni,
104 const struct mmu_notifier_range *range,
105 unsigned long cur_seq)
106 {
107 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
108
109 if (!mmu_notifier_range_blockable(range))
110 return false;
111
112 amdgpu_amdkfd_evict_userptr(mni, cur_seq, bo->kfd_bo);
113
114 return true;
115 }
116
117 static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
118 .invalidate = amdgpu_hmm_invalidate_hsa,
119 };
120
121 /**
122 * amdgpu_hmm_register - register a BO for notifier updates
123 *
124 * @bo: amdgpu buffer object
125 * @addr: userptr addr we should monitor
126 *
127 * Registers a mmu_notifier for the given BO at the specified address.
128 * Returns 0 on success, -ERRNO if anything goes wrong.
129 */
amdgpu_hmm_register(struct amdgpu_bo * bo,unsigned long addr)130 int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
131 {
132 if (bo->kfd_bo)
133 return mmu_interval_notifier_insert(&bo->notifier, current->mm,
134 addr, amdgpu_bo_size(bo),
135 &amdgpu_hmm_hsa_ops);
136 return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
137 amdgpu_bo_size(bo),
138 &amdgpu_hmm_gfx_ops);
139 }
140
141 /**
142 * amdgpu_hmm_unregister - unregister a BO for notifier updates
143 *
144 * @bo: amdgpu buffer object
145 *
146 * Remove any registration of mmu notifier updates from the buffer object.
147 */
amdgpu_hmm_unregister(struct amdgpu_bo * bo)148 void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
149 {
150 if (!bo->notifier.mm)
151 return;
152 mmu_interval_notifier_remove(&bo->notifier);
153 bo->notifier.mm = NULL;
154 }
155
amdgpu_hmm_range_get_pages(struct mmu_interval_notifier * notifier,uint64_t start,uint64_t npages,bool readonly,void * owner,struct page ** pages,struct hmm_range ** phmm_range)156 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
157 uint64_t start, uint64_t npages, bool readonly,
158 void *owner, struct page **pages,
159 struct hmm_range **phmm_range)
160 {
161 struct hmm_range *hmm_range;
162 unsigned long end;
163 unsigned long timeout;
164 unsigned long i;
165 unsigned long *pfns;
166 int r = 0;
167
168 hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
169 if (unlikely(!hmm_range))
170 return -ENOMEM;
171
172 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
173 if (unlikely(!pfns)) {
174 r = -ENOMEM;
175 goto out_free_range;
176 }
177
178 hmm_range->notifier = notifier;
179 hmm_range->default_flags = HMM_PFN_REQ_FAULT;
180 if (!readonly)
181 hmm_range->default_flags |= HMM_PFN_REQ_WRITE;
182 hmm_range->hmm_pfns = pfns;
183 hmm_range->start = start;
184 end = start + npages * PAGE_SIZE;
185 hmm_range->dev_private_owner = owner;
186
187 do {
188 hmm_range->end = min(hmm_range->start + MAX_WALK_BYTE, end);
189
190 pr_debug("hmm range: start = 0x%lx, end = 0x%lx",
191 hmm_range->start, hmm_range->end);
192
193 /* Assuming 128MB takes maximum 1 second to fault page address */
194 timeout = max((hmm_range->end - hmm_range->start) >> 27, 1UL);
195 timeout *= HMM_RANGE_DEFAULT_TIMEOUT;
196 timeout = jiffies + msecs_to_jiffies(timeout);
197
198 retry:
199 hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
200 r = hmm_range_fault(hmm_range);
201 if (unlikely(r)) {
202 /*
203 * FIXME: This timeout should encompass the retry from
204 * mmu_interval_read_retry() as well.
205 */
206 if (r == -EBUSY && !time_after(jiffies, timeout))
207 goto retry;
208 goto out_free_pfns;
209 }
210
211 if (hmm_range->end == end)
212 break;
213 hmm_range->hmm_pfns += MAX_WALK_BYTE >> PAGE_SHIFT;
214 hmm_range->start = hmm_range->end;
215 schedule();
216 } while (hmm_range->end < end);
217
218 hmm_range->start = start;
219 hmm_range->hmm_pfns = pfns;
220
221 /*
222 * Due to default_flags, all pages are HMM_PFN_VALID or
223 * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
224 * the notifier_lock, and mmu_interval_read_retry() must be done first.
225 */
226 for (i = 0; pages && i < npages; i++)
227 pages[i] = hmm_pfn_to_page(pfns[i]);
228
229 *phmm_range = hmm_range;
230
231 return 0;
232
233 out_free_pfns:
234 kvfree(pfns);
235 out_free_range:
236 kfree(hmm_range);
237
238 return r;
239 }
240
amdgpu_hmm_range_get_pages_done(struct hmm_range * hmm_range)241 bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
242 {
243 bool r;
244
245 r = mmu_interval_read_retry(hmm_range->notifier,
246 hmm_range->notifier_seq);
247 kvfree(hmm_range->hmm_pfns);
248 kfree(hmm_range);
249
250 return r;
251 }
252