1 /*
2 * Copyright 2017 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22 #define NVKM_VMM_LEVELS_MAX 5
23 #include "vmm.h"
24
25 #include <subdev/fb.h>
26
27 static void
nvkm_vmm_pt_del(struct nvkm_vmm_pt ** ppgt)28 nvkm_vmm_pt_del(struct nvkm_vmm_pt **ppgt)
29 {
30 struct nvkm_vmm_pt *pgt = *ppgt;
31 if (pgt) {
32 kvfree(pgt->pde);
33 kfree(pgt);
34 *ppgt = NULL;
35 }
36 }
37
38
39 static struct nvkm_vmm_pt *
nvkm_vmm_pt_new(const struct nvkm_vmm_desc * desc,bool sparse,const struct nvkm_vmm_page * page)40 nvkm_vmm_pt_new(const struct nvkm_vmm_desc *desc, bool sparse,
41 const struct nvkm_vmm_page *page)
42 {
43 const u32 pten = 1 << desc->bits;
44 struct nvkm_vmm_pt *pgt;
45 u32 lpte = 0;
46
47 if (desc->type > PGT) {
48 if (desc->type == SPT) {
49 const struct nvkm_vmm_desc *pair = page[-1].desc;
50 lpte = pten >> (desc->bits - pair->bits);
51 } else {
52 lpte = pten;
53 }
54 }
55
56 if (!(pgt = kzalloc(sizeof(*pgt) + lpte, GFP_KERNEL)))
57 return NULL;
58 pgt->page = page ? page->shift : 0;
59 pgt->sparse = sparse;
60
61 if (desc->type == PGD) {
62 pgt->pde = kvcalloc(pten, sizeof(*pgt->pde), GFP_KERNEL);
63 if (!pgt->pde) {
64 kfree(pgt);
65 return NULL;
66 }
67 }
68
69 return pgt;
70 }
71
72 struct nvkm_vmm_iter {
73 const struct nvkm_vmm_page *page;
74 const struct nvkm_vmm_desc *desc;
75 struct nvkm_vmm *vmm;
76 u64 cnt;
77 u16 max, lvl;
78 u32 pte[NVKM_VMM_LEVELS_MAX];
79 struct nvkm_vmm_pt *pt[NVKM_VMM_LEVELS_MAX];
80 int flush;
81 };
82
83 #ifdef CONFIG_NOUVEAU_DEBUG_MMU
84 static const char *
nvkm_vmm_desc_type(const struct nvkm_vmm_desc * desc)85 nvkm_vmm_desc_type(const struct nvkm_vmm_desc *desc)
86 {
87 switch (desc->type) {
88 case PGD: return "PGD";
89 case PGT: return "PGT";
90 case SPT: return "SPT";
91 case LPT: return "LPT";
92 default:
93 return "UNKNOWN";
94 }
95 }
96
97 static void
nvkm_vmm_trace(struct nvkm_vmm_iter * it,char * buf)98 nvkm_vmm_trace(struct nvkm_vmm_iter *it, char *buf)
99 {
100 int lvl;
101 for (lvl = it->max; lvl >= 0; lvl--) {
102 if (lvl >= it->lvl)
103 buf += sprintf(buf, "%05x:", it->pte[lvl]);
104 else
105 buf += sprintf(buf, "xxxxx:");
106 }
107 }
108
109 #define TRA(i,f,a...) do { \
110 char _buf[NVKM_VMM_LEVELS_MAX * 7]; \
111 struct nvkm_vmm_iter *_it = (i); \
112 nvkm_vmm_trace(_it, _buf); \
113 VMM_TRACE(_it->vmm, "%s "f, _buf, ##a); \
114 } while(0)
115 #else
116 #define TRA(i,f,a...)
117 #endif
118
119 static inline void
nvkm_vmm_flush_mark(struct nvkm_vmm_iter * it)120 nvkm_vmm_flush_mark(struct nvkm_vmm_iter *it)
121 {
122 it->flush = min(it->flush, it->max - it->lvl);
123 }
124
125 static inline void
nvkm_vmm_flush(struct nvkm_vmm_iter * it)126 nvkm_vmm_flush(struct nvkm_vmm_iter *it)
127 {
128 if (it->flush != NVKM_VMM_LEVELS_MAX) {
129 if (it->vmm->func->flush) {
130 TRA(it, "flush: %d", it->flush);
131 it->vmm->func->flush(it->vmm, it->flush);
132 }
133 it->flush = NVKM_VMM_LEVELS_MAX;
134 }
135 }
136
137 static void
nvkm_vmm_unref_pdes(struct nvkm_vmm_iter * it)138 nvkm_vmm_unref_pdes(struct nvkm_vmm_iter *it)
139 {
140 const struct nvkm_vmm_desc *desc = it->desc;
141 const int type = desc[it->lvl].type == SPT;
142 struct nvkm_vmm_pt *pgd = it->pt[it->lvl + 1];
143 struct nvkm_vmm_pt *pgt = it->pt[it->lvl];
144 struct nvkm_mmu_pt *pt = pgt->pt[type];
145 struct nvkm_vmm *vmm = it->vmm;
146 u32 pdei = it->pte[it->lvl + 1];
147
148 /* Recurse up the tree, unreferencing/destroying unneeded PDs. */
149 it->lvl++;
150 if (--pgd->refs[0]) {
151 const struct nvkm_vmm_desc_func *func = desc[it->lvl].func;
152 /* PD has other valid PDEs, so we need a proper update. */
153 TRA(it, "PDE unmap %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
154 pgt->pt[type] = NULL;
155 if (!pgt->refs[!type]) {
156 /* PDE no longer required. */
157 if (pgd->pt[0]) {
158 if (pgt->sparse) {
159 func->sparse(vmm, pgd->pt[0], pdei, 1);
160 pgd->pde[pdei] = NVKM_VMM_PDE_SPARSE;
161 } else {
162 func->unmap(vmm, pgd->pt[0], pdei, 1);
163 pgd->pde[pdei] = NULL;
164 }
165 } else {
166 /* Special handling for Tesla-class GPUs,
167 * where there's no central PD, but each
168 * instance has its own embedded PD.
169 */
170 func->pde(vmm, pgd, pdei);
171 pgd->pde[pdei] = NULL;
172 }
173 } else {
174 /* PDE was pointing at dual-PTs and we're removing
175 * one of them, leaving the other in place.
176 */
177 func->pde(vmm, pgd, pdei);
178 }
179
180 /* GPU may have cached the PTs, flush before freeing. */
181 nvkm_vmm_flush_mark(it);
182 nvkm_vmm_flush(it);
183 } else {
184 /* PD has no valid PDEs left, so we can just destroy it. */
185 nvkm_vmm_unref_pdes(it);
186 }
187
188 /* Destroy PD/PT. */
189 TRA(it, "PDE free %s", nvkm_vmm_desc_type(&desc[it->lvl - 1]));
190 nvkm_mmu_ptc_put(vmm->mmu, vmm->bootstrapped, &pt);
191 if (!pgt->refs[!type])
192 nvkm_vmm_pt_del(&pgt);
193 it->lvl--;
194 }
195
196 static void
nvkm_vmm_unref_sptes(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgt,const struct nvkm_vmm_desc * desc,u32 ptei,u32 ptes)197 nvkm_vmm_unref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
198 const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
199 {
200 const struct nvkm_vmm_desc *pair = it->page[-1].desc;
201 const u32 sptb = desc->bits - pair->bits;
202 const u32 sptn = 1 << sptb;
203 struct nvkm_vmm *vmm = it->vmm;
204 u32 spti = ptei & (sptn - 1), lpti, pteb;
205
206 /* Determine how many SPTEs are being touched under each LPTE,
207 * and drop reference counts.
208 */
209 for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
210 const u32 pten = min(sptn - spti, ptes);
211 pgt->pte[lpti] -= pten;
212 ptes -= pten;
213 }
214
215 /* We're done here if there's no corresponding LPT. */
216 if (!pgt->refs[0])
217 return;
218
219 for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
220 /* Skip over any LPTEs that still have valid SPTEs. */
221 if (pgt->pte[pteb] & NVKM_VMM_PTE_SPTES) {
222 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
223 if (!(pgt->pte[ptei] & NVKM_VMM_PTE_SPTES))
224 break;
225 }
226 continue;
227 }
228
229 /* As there's no more non-UNMAPPED SPTEs left in the range
230 * covered by a number of LPTEs, the LPTEs once again take
231 * control over their address range.
232 *
233 * Determine how many LPTEs need to transition state.
234 */
235 pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
236 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
237 if (pgt->pte[ptei] & NVKM_VMM_PTE_SPTES)
238 break;
239 pgt->pte[ptei] &= ~NVKM_VMM_PTE_VALID;
240 }
241
242 if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
243 TRA(it, "LPTE %05x: U -> S %d PTEs", pteb, ptes);
244 pair->func->sparse(vmm, pgt->pt[0], pteb, ptes);
245 } else
246 if (pair->func->invalid) {
247 /* If the MMU supports it, restore the LPTE to the
248 * INVALID state to tell the MMU there is no point
249 * trying to fetch the corresponding SPTEs.
250 */
251 TRA(it, "LPTE %05x: U -> I %d PTEs", pteb, ptes);
252 pair->func->invalid(vmm, pgt->pt[0], pteb, ptes);
253 }
254 }
255 }
256
257 static bool
nvkm_vmm_unref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)258 nvkm_vmm_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
259 {
260 const struct nvkm_vmm_desc *desc = it->desc;
261 const int type = desc->type == SPT;
262 struct nvkm_vmm_pt *pgt = it->pt[0];
263 bool dma;
264
265 if (pfn) {
266 /* Need to clear PTE valid bits before we dma_unmap_page(). */
267 dma = desc->func->pfn_clear(it->vmm, pgt->pt[type], ptei, ptes);
268 if (dma) {
269 /* GPU may have cached the PT, flush before unmap. */
270 nvkm_vmm_flush_mark(it);
271 nvkm_vmm_flush(it);
272 desc->func->pfn_unmap(it->vmm, pgt->pt[type], ptei, ptes);
273 }
274 }
275
276 /* Drop PTE references. */
277 pgt->refs[type] -= ptes;
278
279 /* Dual-PTs need special handling, unless PDE becoming invalid. */
280 if (desc->type == SPT && (pgt->refs[0] || pgt->refs[1]))
281 nvkm_vmm_unref_sptes(it, pgt, desc, ptei, ptes);
282
283 /* PT no longer needed? Destroy it. */
284 if (!pgt->refs[type]) {
285 it->lvl++;
286 TRA(it, "%s empty", nvkm_vmm_desc_type(desc));
287 it->lvl--;
288 nvkm_vmm_unref_pdes(it);
289 return false; /* PTE writes for unmap() not necessary. */
290 }
291
292 return true;
293 }
294
295 static void
nvkm_vmm_ref_sptes(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgt,const struct nvkm_vmm_desc * desc,u32 ptei,u32 ptes)296 nvkm_vmm_ref_sptes(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgt,
297 const struct nvkm_vmm_desc *desc, u32 ptei, u32 ptes)
298 {
299 const struct nvkm_vmm_desc *pair = it->page[-1].desc;
300 const u32 sptb = desc->bits - pair->bits;
301 const u32 sptn = 1 << sptb;
302 struct nvkm_vmm *vmm = it->vmm;
303 u32 spti = ptei & (sptn - 1), lpti, pteb;
304
305 /* Determine how many SPTEs are being touched under each LPTE,
306 * and increase reference counts.
307 */
308 for (lpti = ptei >> sptb; ptes; spti = 0, lpti++) {
309 const u32 pten = min(sptn - spti, ptes);
310 pgt->pte[lpti] += pten;
311 ptes -= pten;
312 }
313
314 /* We're done here if there's no corresponding LPT. */
315 if (!pgt->refs[0])
316 return;
317
318 for (ptei = pteb = ptei >> sptb; ptei < lpti; pteb = ptei) {
319 /* Skip over any LPTEs that already have valid SPTEs. */
320 if (pgt->pte[pteb] & NVKM_VMM_PTE_VALID) {
321 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
322 if (!(pgt->pte[ptei] & NVKM_VMM_PTE_VALID))
323 break;
324 }
325 continue;
326 }
327
328 /* As there are now non-UNMAPPED SPTEs in the range covered
329 * by a number of LPTEs, we need to transfer control of the
330 * address range to the SPTEs.
331 *
332 * Determine how many LPTEs need to transition state.
333 */
334 pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
335 for (ptes = 1, ptei++; ptei < lpti; ptes++, ptei++) {
336 if (pgt->pte[ptei] & NVKM_VMM_PTE_VALID)
337 break;
338 pgt->pte[ptei] |= NVKM_VMM_PTE_VALID;
339 }
340
341 if (pgt->pte[pteb] & NVKM_VMM_PTE_SPARSE) {
342 const u32 spti = pteb * sptn;
343 const u32 sptc = ptes * sptn;
344 /* The entire LPTE is marked as sparse, we need
345 * to make sure that the SPTEs are too.
346 */
347 TRA(it, "SPTE %05x: U -> S %d PTEs", spti, sptc);
348 desc->func->sparse(vmm, pgt->pt[1], spti, sptc);
349 /* Sparse LPTEs prevent SPTEs from being accessed. */
350 TRA(it, "LPTE %05x: S -> U %d PTEs", pteb, ptes);
351 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
352 } else
353 if (pair->func->invalid) {
354 /* MMU supports blocking SPTEs by marking an LPTE
355 * as INVALID. We need to reverse that here.
356 */
357 TRA(it, "LPTE %05x: I -> U %d PTEs", pteb, ptes);
358 pair->func->unmap(vmm, pgt->pt[0], pteb, ptes);
359 }
360 }
361 }
362
363 static bool
nvkm_vmm_ref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)364 nvkm_vmm_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
365 {
366 const struct nvkm_vmm_desc *desc = it->desc;
367 const int type = desc->type == SPT;
368 struct nvkm_vmm_pt *pgt = it->pt[0];
369
370 /* Take PTE references. */
371 pgt->refs[type] += ptes;
372
373 /* Dual-PTs need special handling. */
374 if (desc->type == SPT)
375 nvkm_vmm_ref_sptes(it, pgt, desc, ptei, ptes);
376
377 return true;
378 }
379
380 static void
nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc * desc,struct nvkm_vmm_pt * pgt,u32 ptei,u32 ptes)381 nvkm_vmm_sparse_ptes(const struct nvkm_vmm_desc *desc,
382 struct nvkm_vmm_pt *pgt, u32 ptei, u32 ptes)
383 {
384 if (desc->type == PGD) {
385 while (ptes--)
386 pgt->pde[ptei++] = NVKM_VMM_PDE_SPARSE;
387 } else
388 if (desc->type == LPT) {
389 memset(&pgt->pte[ptei], NVKM_VMM_PTE_SPARSE, ptes);
390 }
391 }
392
393 static bool
nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)394 nvkm_vmm_sparse_unref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
395 {
396 struct nvkm_vmm_pt *pt = it->pt[0];
397 if (it->desc->type == PGD)
398 memset(&pt->pde[ptei], 0x00, sizeof(pt->pde[0]) * ptes);
399 else
400 if (it->desc->type == LPT)
401 memset(&pt->pte[ptei], 0x00, sizeof(pt->pte[0]) * ptes);
402 return nvkm_vmm_unref_ptes(it, pfn, ptei, ptes);
403 }
404
405 static bool
nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)406 nvkm_vmm_sparse_ref_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
407 {
408 nvkm_vmm_sparse_ptes(it->desc, it->pt[0], ptei, ptes);
409 return nvkm_vmm_ref_ptes(it, pfn, ptei, ptes);
410 }
411
412 static bool
nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgd,u32 pdei)413 nvkm_vmm_ref_hwpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
414 {
415 const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
416 const int type = desc->type == SPT;
417 struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
418 const bool zero = !pgt->sparse && !desc->func->invalid;
419 struct nvkm_vmm *vmm = it->vmm;
420 struct nvkm_mmu *mmu = vmm->mmu;
421 struct nvkm_mmu_pt *pt;
422 u32 pten = 1 << desc->bits;
423 u32 pteb, ptei, ptes;
424 u32 size = desc->size * pten;
425
426 pgd->refs[0]++;
427
428 pgt->pt[type] = nvkm_mmu_ptc_get(mmu, size, desc->align, zero);
429 if (!pgt->pt[type]) {
430 it->lvl--;
431 nvkm_vmm_unref_pdes(it);
432 return false;
433 }
434
435 if (zero)
436 goto done;
437
438 pt = pgt->pt[type];
439
440 if (desc->type == LPT && pgt->refs[1]) {
441 /* SPT already exists covering the same range as this LPT,
442 * which means we need to be careful that any LPTEs which
443 * overlap valid SPTEs are unmapped as opposed to invalid
444 * or sparse, which would prevent the MMU from looking at
445 * the SPTEs on some GPUs.
446 */
447 for (ptei = pteb = 0; ptei < pten; pteb = ptei) {
448 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
449 for (ptes = 1, ptei++; ptei < pten; ptes++, ptei++) {
450 bool next = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES;
451 if (spte != next)
452 break;
453 }
454
455 if (!spte) {
456 if (pgt->sparse)
457 desc->func->sparse(vmm, pt, pteb, ptes);
458 else
459 desc->func->invalid(vmm, pt, pteb, ptes);
460 memset(&pgt->pte[pteb], 0x00, ptes);
461 } else {
462 desc->func->unmap(vmm, pt, pteb, ptes);
463 while (ptes--)
464 pgt->pte[pteb++] |= NVKM_VMM_PTE_VALID;
465 }
466 }
467 } else {
468 if (pgt->sparse) {
469 nvkm_vmm_sparse_ptes(desc, pgt, 0, pten);
470 desc->func->sparse(vmm, pt, 0, pten);
471 } else {
472 desc->func->invalid(vmm, pt, 0, pten);
473 }
474 }
475
476 done:
477 TRA(it, "PDE write %s", nvkm_vmm_desc_type(desc));
478 it->desc[it->lvl].func->pde(it->vmm, pgd, pdei);
479 nvkm_vmm_flush_mark(it);
480 return true;
481 }
482
483 static bool
nvkm_vmm_ref_swpt(struct nvkm_vmm_iter * it,struct nvkm_vmm_pt * pgd,u32 pdei)484 nvkm_vmm_ref_swpt(struct nvkm_vmm_iter *it, struct nvkm_vmm_pt *pgd, u32 pdei)
485 {
486 const struct nvkm_vmm_desc *desc = &it->desc[it->lvl - 1];
487 struct nvkm_vmm_pt *pgt = pgd->pde[pdei];
488
489 pgt = nvkm_vmm_pt_new(desc, NVKM_VMM_PDE_SPARSED(pgt), it->page);
490 if (!pgt) {
491 if (!pgd->refs[0])
492 nvkm_vmm_unref_pdes(it);
493 return false;
494 }
495
496 pgd->pde[pdei] = pgt;
497 return true;
498 }
499
500 static inline u64
nvkm_vmm_iter(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,const char * name,bool ref,bool pfn,bool (* REF_PTES)(struct nvkm_vmm_iter *,bool pfn,u32,u32),nvkm_vmm_pte_func MAP_PTES,struct nvkm_vmm_map * map,nvkm_vmm_pxe_func CLR_PTES)501 nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
502 u64 addr, u64 size, const char *name, bool ref, bool pfn,
503 bool (*REF_PTES)(struct nvkm_vmm_iter *, bool pfn, u32, u32),
504 nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map,
505 nvkm_vmm_pxe_func CLR_PTES)
506 {
507 const struct nvkm_vmm_desc *desc = page->desc;
508 struct nvkm_vmm_iter it;
509 u64 bits = addr >> page->shift;
510
511 it.page = page;
512 it.desc = desc;
513 it.vmm = vmm;
514 it.cnt = size >> page->shift;
515 it.flush = NVKM_VMM_LEVELS_MAX;
516
517 /* Deconstruct address into PTE indices for each mapping level. */
518 for (it.lvl = 0; desc[it.lvl].bits; it.lvl++) {
519 it.pte[it.lvl] = bits & ((1 << desc[it.lvl].bits) - 1);
520 bits >>= desc[it.lvl].bits;
521 }
522 it.max = --it.lvl;
523 it.pt[it.max] = vmm->pd;
524
525 it.lvl = 0;
526 TRA(&it, "%s: %016llx %016llx %d %lld PTEs", name,
527 addr, size, page->shift, it.cnt);
528 it.lvl = it.max;
529
530 /* Depth-first traversal of page tables. */
531 while (it.cnt) {
532 struct nvkm_vmm_pt *pgt = it.pt[it.lvl];
533 const int type = desc->type == SPT;
534 const u32 pten = 1 << desc->bits;
535 const u32 ptei = it.pte[0];
536 const u32 ptes = min_t(u64, it.cnt, pten - ptei);
537
538 /* Walk down the tree, finding page tables for each level. */
539 for (; it.lvl; it.lvl--) {
540 const u32 pdei = it.pte[it.lvl];
541 struct nvkm_vmm_pt *pgd = pgt;
542
543 /* Software PT. */
544 if (ref && NVKM_VMM_PDE_INVALID(pgd->pde[pdei])) {
545 if (!nvkm_vmm_ref_swpt(&it, pgd, pdei))
546 goto fail;
547 }
548 it.pt[it.lvl - 1] = pgt = pgd->pde[pdei];
549
550 /* Hardware PT.
551 *
552 * This is a separate step from above due to GF100 and
553 * newer having dual page tables at some levels, which
554 * are refcounted independently.
555 */
556 if (ref && !pgt->refs[desc[it.lvl - 1].type == SPT]) {
557 if (!nvkm_vmm_ref_hwpt(&it, pgd, pdei))
558 goto fail;
559 }
560 }
561
562 /* Handle PTE updates. */
563 if (!REF_PTES || REF_PTES(&it, pfn, ptei, ptes)) {
564 struct nvkm_mmu_pt *pt = pgt->pt[type];
565 if (MAP_PTES || CLR_PTES) {
566 if (MAP_PTES)
567 MAP_PTES(vmm, pt, ptei, ptes, map);
568 else
569 CLR_PTES(vmm, pt, ptei, ptes);
570 nvkm_vmm_flush_mark(&it);
571 }
572 }
573
574 /* Walk back up the tree to the next position. */
575 it.pte[it.lvl] += ptes;
576 it.cnt -= ptes;
577 if (it.cnt) {
578 while (it.pte[it.lvl] == (1 << desc[it.lvl].bits)) {
579 it.pte[it.lvl++] = 0;
580 it.pte[it.lvl]++;
581 }
582 }
583 }
584
585 nvkm_vmm_flush(&it);
586 return ~0ULL;
587
588 fail:
589 /* Reconstruct the failure address so the caller is able to
590 * reverse any partially completed operations.
591 */
592 addr = it.pte[it.max--];
593 do {
594 addr = addr << desc[it.max].bits;
595 addr |= it.pte[it.max];
596 } while (it.max--);
597
598 return addr << page->shift;
599 }
600
601 static void
nvkm_vmm_ptes_sparse_put(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)602 nvkm_vmm_ptes_sparse_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
603 u64 addr, u64 size)
604 {
605 nvkm_vmm_iter(vmm, page, addr, size, "sparse unref", false, false,
606 nvkm_vmm_sparse_unref_ptes, NULL, NULL,
607 page->desc->func->invalid ?
608 page->desc->func->invalid : page->desc->func->unmap);
609 }
610
611 static int
nvkm_vmm_ptes_sparse_get(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)612 nvkm_vmm_ptes_sparse_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
613 u64 addr, u64 size)
614 {
615 if ((page->type & NVKM_VMM_PAGE_SPARSE)) {
616 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "sparse ref",
617 true, false, nvkm_vmm_sparse_ref_ptes,
618 NULL, NULL, page->desc->func->sparse);
619 if (fail != ~0ULL) {
620 if ((size = fail - addr))
621 nvkm_vmm_ptes_sparse_put(vmm, page, addr, size);
622 return -ENOMEM;
623 }
624 return 0;
625 }
626 return -EINVAL;
627 }
628
629 static int
nvkm_vmm_ptes_sparse(struct nvkm_vmm * vmm,u64 addr,u64 size,bool ref)630 nvkm_vmm_ptes_sparse(struct nvkm_vmm *vmm, u64 addr, u64 size, bool ref)
631 {
632 const struct nvkm_vmm_page *page = vmm->func->page;
633 int m = 0, i;
634 u64 start = addr;
635 u64 block;
636
637 while (size) {
638 /* Limit maximum page size based on remaining size. */
639 while (size < (1ULL << page[m].shift))
640 m++;
641 i = m;
642
643 /* Find largest page size suitable for alignment. */
644 while (!IS_ALIGNED(addr, 1ULL << page[i].shift))
645 i++;
646
647 /* Determine number of PTEs at this page size. */
648 if (i != m) {
649 /* Limited to alignment boundary of next page size. */
650 u64 next = 1ULL << page[i - 1].shift;
651 u64 part = ALIGN(addr, next) - addr;
652 if (size - part >= next)
653 block = (part >> page[i].shift) << page[i].shift;
654 else
655 block = (size >> page[i].shift) << page[i].shift;
656 } else {
657 block = (size >> page[i].shift) << page[i].shift;
658 }
659
660 /* Perform operation. */
661 if (ref) {
662 int ret = nvkm_vmm_ptes_sparse_get(vmm, &page[i], addr, block);
663 if (ret) {
664 if ((size = addr - start))
665 nvkm_vmm_ptes_sparse(vmm, start, size, false);
666 return ret;
667 }
668 } else {
669 nvkm_vmm_ptes_sparse_put(vmm, &page[i], addr, block);
670 }
671
672 size -= block;
673 addr += block;
674 }
675
676 return 0;
677 }
678
679 static void
nvkm_vmm_ptes_unmap_put(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,bool sparse,bool pfn)680 nvkm_vmm_ptes_unmap_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
681 u64 addr, u64 size, bool sparse, bool pfn)
682 {
683 const struct nvkm_vmm_desc_func *func = page->desc->func;
684 nvkm_vmm_iter(vmm, page, addr, size, "unmap + unref",
685 false, pfn, nvkm_vmm_unref_ptes, NULL, NULL,
686 sparse ? func->sparse : func->invalid ? func->invalid :
687 func->unmap);
688 }
689
690 static int
nvkm_vmm_ptes_get_map(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,struct nvkm_vmm_map * map,nvkm_vmm_pte_func func)691 nvkm_vmm_ptes_get_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
692 u64 addr, u64 size, struct nvkm_vmm_map *map,
693 nvkm_vmm_pte_func func)
694 {
695 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref + map", true,
696 false, nvkm_vmm_ref_ptes, func, map, NULL);
697 if (fail != ~0ULL) {
698 if ((size = fail - addr))
699 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size, false, false);
700 return -ENOMEM;
701 }
702 return 0;
703 }
704
705 static void
nvkm_vmm_ptes_unmap(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,bool sparse,bool pfn)706 nvkm_vmm_ptes_unmap(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
707 u64 addr, u64 size, bool sparse, bool pfn)
708 {
709 const struct nvkm_vmm_desc_func *func = page->desc->func;
710 nvkm_vmm_iter(vmm, page, addr, size, "unmap", false, pfn,
711 NULL, NULL, NULL,
712 sparse ? func->sparse : func->invalid ? func->invalid :
713 func->unmap);
714 }
715
716 static void
nvkm_vmm_ptes_map(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size,struct nvkm_vmm_map * map,nvkm_vmm_pte_func func)717 nvkm_vmm_ptes_map(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
718 u64 addr, u64 size, struct nvkm_vmm_map *map,
719 nvkm_vmm_pte_func func)
720 {
721 nvkm_vmm_iter(vmm, page, addr, size, "map", false, false,
722 NULL, func, map, NULL);
723 }
724
725 static void
nvkm_vmm_ptes_put(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)726 nvkm_vmm_ptes_put(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
727 u64 addr, u64 size)
728 {
729 nvkm_vmm_iter(vmm, page, addr, size, "unref", false, false,
730 nvkm_vmm_unref_ptes, NULL, NULL, NULL);
731 }
732
733 static int
nvkm_vmm_ptes_get(struct nvkm_vmm * vmm,const struct nvkm_vmm_page * page,u64 addr,u64 size)734 nvkm_vmm_ptes_get(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
735 u64 addr, u64 size)
736 {
737 u64 fail = nvkm_vmm_iter(vmm, page, addr, size, "ref", true, false,
738 nvkm_vmm_ref_ptes, NULL, NULL, NULL);
739 if (fail != ~0ULL) {
740 if (fail != addr)
741 nvkm_vmm_ptes_put(vmm, page, addr, fail - addr);
742 return -ENOMEM;
743 }
744 return 0;
745 }
746
747 static inline struct nvkm_vma *
nvkm_vma_new(u64 addr,u64 size)748 nvkm_vma_new(u64 addr, u64 size)
749 {
750 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
751 if (vma) {
752 vma->addr = addr;
753 vma->size = size;
754 vma->page = NVKM_VMA_PAGE_NONE;
755 vma->refd = NVKM_VMA_PAGE_NONE;
756 }
757 return vma;
758 }
759
760 struct nvkm_vma *
nvkm_vma_tail(struct nvkm_vma * vma,u64 tail)761 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
762 {
763 struct nvkm_vma *new;
764
765 BUG_ON(vma->size == tail);
766
767 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail)))
768 return NULL;
769 vma->size -= tail;
770
771 new->mapref = vma->mapref;
772 new->sparse = vma->sparse;
773 new->page = vma->page;
774 new->refd = vma->refd;
775 new->used = vma->used;
776 new->part = vma->part;
777 new->busy = vma->busy;
778 new->mapped = vma->mapped;
779 list_add(&new->head, &vma->head);
780 return new;
781 }
782
783 static inline void
nvkm_vmm_free_remove(struct nvkm_vmm * vmm,struct nvkm_vma * vma)784 nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
785 {
786 rb_erase(&vma->tree, &vmm->free);
787 }
788
789 static inline void
nvkm_vmm_free_delete(struct nvkm_vmm * vmm,struct nvkm_vma * vma)790 nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
791 {
792 nvkm_vmm_free_remove(vmm, vma);
793 list_del(&vma->head);
794 kfree(vma);
795 }
796
797 static void
nvkm_vmm_free_insert(struct nvkm_vmm * vmm,struct nvkm_vma * vma)798 nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
799 {
800 struct rb_node **ptr = &vmm->free.rb_node;
801 struct rb_node *parent = NULL;
802
803 while (*ptr) {
804 struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
805 parent = *ptr;
806 if (vma->size < this->size)
807 ptr = &parent->rb_left;
808 else
809 if (vma->size > this->size)
810 ptr = &parent->rb_right;
811 else
812 if (vma->addr < this->addr)
813 ptr = &parent->rb_left;
814 else
815 if (vma->addr > this->addr)
816 ptr = &parent->rb_right;
817 else
818 BUG();
819 }
820
821 rb_link_node(&vma->tree, parent, ptr);
822 rb_insert_color(&vma->tree, &vmm->free);
823 }
824
825 static inline void
nvkm_vmm_node_remove(struct nvkm_vmm * vmm,struct nvkm_vma * vma)826 nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
827 {
828 rb_erase(&vma->tree, &vmm->root);
829 }
830
831 static inline void
nvkm_vmm_node_delete(struct nvkm_vmm * vmm,struct nvkm_vma * vma)832 nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
833 {
834 nvkm_vmm_node_remove(vmm, vma);
835 list_del(&vma->head);
836 kfree(vma);
837 }
838
839 static void
nvkm_vmm_node_insert(struct nvkm_vmm * vmm,struct nvkm_vma * vma)840 nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
841 {
842 struct rb_node **ptr = &vmm->root.rb_node;
843 struct rb_node *parent = NULL;
844
845 while (*ptr) {
846 struct nvkm_vma *this = rb_entry(*ptr, typeof(*this), tree);
847 parent = *ptr;
848 if (vma->addr < this->addr)
849 ptr = &parent->rb_left;
850 else
851 if (vma->addr > this->addr)
852 ptr = &parent->rb_right;
853 else
854 BUG();
855 }
856
857 rb_link_node(&vma->tree, parent, ptr);
858 rb_insert_color(&vma->tree, &vmm->root);
859 }
860
861 struct nvkm_vma *
nvkm_vmm_node_search(struct nvkm_vmm * vmm,u64 addr)862 nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
863 {
864 struct rb_node *node = vmm->root.rb_node;
865 while (node) {
866 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
867 if (addr < vma->addr)
868 node = node->rb_left;
869 else
870 if (addr >= vma->addr + vma->size)
871 node = node->rb_right;
872 else
873 return vma;
874 }
875 return NULL;
876 }
877
878 #define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
879 list_entry((root)->head.dir, struct nvkm_vma, head))
880
881 static struct nvkm_vma *
nvkm_vmm_node_merge(struct nvkm_vmm * vmm,struct nvkm_vma * prev,struct nvkm_vma * vma,struct nvkm_vma * next,u64 size)882 nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
883 struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
884 {
885 if (next) {
886 if (vma->size == size) {
887 vma->size += next->size;
888 nvkm_vmm_node_delete(vmm, next);
889 if (prev) {
890 prev->size += vma->size;
891 nvkm_vmm_node_delete(vmm, vma);
892 return prev;
893 }
894 return vma;
895 }
896 BUG_ON(prev);
897
898 nvkm_vmm_node_remove(vmm, next);
899 vma->size -= size;
900 next->addr -= size;
901 next->size += size;
902 nvkm_vmm_node_insert(vmm, next);
903 return next;
904 }
905
906 if (prev) {
907 if (vma->size != size) {
908 nvkm_vmm_node_remove(vmm, vma);
909 prev->size += size;
910 vma->addr += size;
911 vma->size -= size;
912 nvkm_vmm_node_insert(vmm, vma);
913 } else {
914 prev->size += vma->size;
915 nvkm_vmm_node_delete(vmm, vma);
916 }
917 return prev;
918 }
919
920 return vma;
921 }
922
923 struct nvkm_vma *
nvkm_vmm_node_split(struct nvkm_vmm * vmm,struct nvkm_vma * vma,u64 addr,u64 size)924 nvkm_vmm_node_split(struct nvkm_vmm *vmm,
925 struct nvkm_vma *vma, u64 addr, u64 size)
926 {
927 struct nvkm_vma *prev = NULL;
928
929 if (vma->addr != addr) {
930 prev = vma;
931 if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
932 return NULL;
933 vma->part = true;
934 nvkm_vmm_node_insert(vmm, vma);
935 }
936
937 if (vma->size != size) {
938 struct nvkm_vma *tmp;
939 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
940 nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
941 return NULL;
942 }
943 tmp->part = true;
944 nvkm_vmm_node_insert(vmm, tmp);
945 }
946
947 return vma;
948 }
949
950 static void
nvkm_vma_dump(struct nvkm_vma * vma)951 nvkm_vma_dump(struct nvkm_vma *vma)
952 {
953 printk(KERN_ERR "%016llx %016llx %c%c%c%c%c%c%c%c %p\n",
954 vma->addr, (u64)vma->size,
955 vma->used ? '-' : 'F',
956 vma->mapref ? 'R' : '-',
957 vma->sparse ? 'S' : '-',
958 vma->page != NVKM_VMA_PAGE_NONE ? '0' + vma->page : '-',
959 vma->refd != NVKM_VMA_PAGE_NONE ? '0' + vma->refd : '-',
960 vma->part ? 'P' : '-',
961 vma->busy ? 'B' : '-',
962 vma->mapped ? 'M' : '-',
963 vma->memory);
964 }
965
966 static void
nvkm_vmm_dump(struct nvkm_vmm * vmm)967 nvkm_vmm_dump(struct nvkm_vmm *vmm)
968 {
969 struct nvkm_vma *vma;
970 list_for_each_entry(vma, &vmm->list, head) {
971 nvkm_vma_dump(vma);
972 }
973 }
974
975 static void
nvkm_vmm_dtor(struct nvkm_vmm * vmm)976 nvkm_vmm_dtor(struct nvkm_vmm *vmm)
977 {
978 struct nvkm_vma *vma;
979 struct rb_node *node;
980
981 if (0)
982 nvkm_vmm_dump(vmm);
983
984 while ((node = rb_first(&vmm->root))) {
985 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree);
986 nvkm_vmm_put(vmm, &vma);
987 }
988
989 if (vmm->bootstrapped) {
990 const struct nvkm_vmm_page *page = vmm->func->page;
991 const u64 limit = vmm->limit - vmm->start;
992
993 while (page[1].shift)
994 page++;
995
996 nvkm_mmu_ptc_dump(vmm->mmu);
997 nvkm_vmm_ptes_put(vmm, page, vmm->start, limit);
998 }
999
1000 vma = list_first_entry(&vmm->list, typeof(*vma), head);
1001 list_del(&vma->head);
1002 kfree(vma);
1003 WARN_ON(!list_empty(&vmm->list));
1004
1005 if (vmm->nullp) {
1006 dma_free_coherent(vmm->mmu->subdev.device->dev, 16 * 1024,
1007 vmm->nullp, vmm->null);
1008 }
1009
1010 if (vmm->pd) {
1011 nvkm_mmu_ptc_put(vmm->mmu, true, &vmm->pd->pt[0]);
1012 nvkm_vmm_pt_del(&vmm->pd);
1013 }
1014 }
1015
1016 static int
nvkm_vmm_ctor_managed(struct nvkm_vmm * vmm,u64 addr,u64 size)1017 nvkm_vmm_ctor_managed(struct nvkm_vmm *vmm, u64 addr, u64 size)
1018 {
1019 struct nvkm_vma *vma;
1020 if (!(vma = nvkm_vma_new(addr, size)))
1021 return -ENOMEM;
1022 vma->mapref = true;
1023 vma->sparse = false;
1024 vma->used = true;
1025 nvkm_vmm_node_insert(vmm, vma);
1026 list_add_tail(&vma->head, &vmm->list);
1027 return 0;
1028 }
1029
1030 static int
nvkm_vmm_ctor(const struct nvkm_vmm_func * func,struct nvkm_mmu * mmu,u32 pd_header,bool managed,u64 addr,u64 size,struct lock_class_key * key,const char * name,struct nvkm_vmm * vmm)1031 nvkm_vmm_ctor(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
1032 u32 pd_header, bool managed, u64 addr, u64 size,
1033 struct lock_class_key *key, const char *name,
1034 struct nvkm_vmm *vmm)
1035 {
1036 static struct lock_class_key _key;
1037 const struct nvkm_vmm_page *page = func->page;
1038 const struct nvkm_vmm_desc *desc;
1039 struct nvkm_vma *vma;
1040 int levels, bits = 0, ret;
1041
1042 vmm->func = func;
1043 vmm->mmu = mmu;
1044 vmm->name = name;
1045 vmm->debug = mmu->subdev.debug;
1046 kref_init(&vmm->kref);
1047
1048 __mutex_init(&vmm->mutex, "&vmm->mutex", key ? key : &_key);
1049
1050 /* Locate the smallest page size supported by the backend, it will
1051 * have the deepest nesting of page tables.
1052 */
1053 while (page[1].shift)
1054 page++;
1055
1056 /* Locate the structure that describes the layout of the top-level
1057 * page table, and determine the number of valid bits in a virtual
1058 * address.
1059 */
1060 for (levels = 0, desc = page->desc; desc->bits; desc++, levels++)
1061 bits += desc->bits;
1062 bits += page->shift;
1063 desc--;
1064
1065 if (WARN_ON(levels > NVKM_VMM_LEVELS_MAX))
1066 return -EINVAL;
1067
1068 /* Allocate top-level page table. */
1069 vmm->pd = nvkm_vmm_pt_new(desc, false, NULL);
1070 if (!vmm->pd)
1071 return -ENOMEM;
1072 vmm->pd->refs[0] = 1;
1073 INIT_LIST_HEAD(&vmm->join);
1074
1075 /* ... and the GPU storage for it, except on Tesla-class GPUs that
1076 * have the PD embedded in the instance structure.
1077 */
1078 if (desc->size) {
1079 const u32 size = pd_header + desc->size * (1 << desc->bits);
1080 vmm->pd->pt[0] = nvkm_mmu_ptc_get(mmu, size, desc->align, true);
1081 if (!vmm->pd->pt[0])
1082 return -ENOMEM;
1083 }
1084
1085 /* Initialise address-space MM. */
1086 INIT_LIST_HEAD(&vmm->list);
1087 vmm->free = RB_ROOT;
1088 vmm->root = RB_ROOT;
1089
1090 if (managed) {
1091 /* Address-space will be managed by the client for the most
1092 * part, except for a specified area where NVKM allocations
1093 * are allowed to be placed.
1094 */
1095 vmm->start = 0;
1096 vmm->limit = 1ULL << bits;
1097 if (addr + size < addr || addr + size > vmm->limit)
1098 return -EINVAL;
1099
1100 /* Client-managed area before the NVKM-managed area. */
1101 if (addr && (ret = nvkm_vmm_ctor_managed(vmm, 0, addr)))
1102 return ret;
1103
1104 /* NVKM-managed area. */
1105 if (size) {
1106 if (!(vma = nvkm_vma_new(addr, size)))
1107 return -ENOMEM;
1108 nvkm_vmm_free_insert(vmm, vma);
1109 list_add_tail(&vma->head, &vmm->list);
1110 }
1111
1112 /* Client-managed area after the NVKM-managed area. */
1113 addr = addr + size;
1114 size = vmm->limit - addr;
1115 if (size && (ret = nvkm_vmm_ctor_managed(vmm, addr, size)))
1116 return ret;
1117 } else {
1118 /* Address-space fully managed by NVKM, requiring calls to
1119 * nvkm_vmm_get()/nvkm_vmm_put() to allocate address-space.
1120 */
1121 vmm->start = addr;
1122 vmm->limit = size ? (addr + size) : (1ULL << bits);
1123 if (vmm->start > vmm->limit || vmm->limit > (1ULL << bits))
1124 return -EINVAL;
1125
1126 if (!(vma = nvkm_vma_new(vmm->start, vmm->limit - vmm->start)))
1127 return -ENOMEM;
1128
1129 nvkm_vmm_free_insert(vmm, vma);
1130 list_add(&vma->head, &vmm->list);
1131 }
1132
1133 return 0;
1134 }
1135
1136 int
nvkm_vmm_new_(const struct nvkm_vmm_func * func,struct nvkm_mmu * mmu,u32 hdr,bool managed,u64 addr,u64 size,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)1137 nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
1138 u32 hdr, bool managed, u64 addr, u64 size,
1139 struct lock_class_key *key, const char *name,
1140 struct nvkm_vmm **pvmm)
1141 {
1142 if (!(*pvmm = kzalloc(sizeof(**pvmm), GFP_KERNEL)))
1143 return -ENOMEM;
1144 return nvkm_vmm_ctor(func, mmu, hdr, managed, addr, size, key, name, *pvmm);
1145 }
1146
1147 static struct nvkm_vma *
nvkm_vmm_pfn_split_merge(struct nvkm_vmm * vmm,struct nvkm_vma * vma,u64 addr,u64 size,u8 page,bool map)1148 nvkm_vmm_pfn_split_merge(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1149 u64 addr, u64 size, u8 page, bool map)
1150 {
1151 struct nvkm_vma *prev = NULL;
1152 struct nvkm_vma *next = NULL;
1153
1154 if (vma->addr == addr && vma->part && (prev = node(vma, prev))) {
1155 if (prev->memory || prev->mapped != map)
1156 prev = NULL;
1157 }
1158
1159 if (vma->addr + vma->size == addr + size && (next = node(vma, next))) {
1160 if (!next->part ||
1161 next->memory || next->mapped != map)
1162 next = NULL;
1163 }
1164
1165 if (prev || next)
1166 return nvkm_vmm_node_merge(vmm, prev, vma, next, size);
1167 return nvkm_vmm_node_split(vmm, vma, addr, size);
1168 }
1169
1170 int
nvkm_vmm_pfn_unmap(struct nvkm_vmm * vmm,u64 addr,u64 size)1171 nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
1172 {
1173 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr);
1174 struct nvkm_vma *next;
1175 u64 limit = addr + size;
1176 u64 start = addr;
1177
1178 if (!vma)
1179 return -EINVAL;
1180
1181 do {
1182 if (!vma->mapped || vma->memory)
1183 continue;
1184
1185 size = min(limit - start, vma->size - (start - vma->addr));
1186
1187 nvkm_vmm_ptes_unmap_put(vmm, &vmm->func->page[vma->refd],
1188 start, size, false, true);
1189
1190 next = nvkm_vmm_pfn_split_merge(vmm, vma, start, size, 0, false);
1191 if (!WARN_ON(!next)) {
1192 vma = next;
1193 vma->refd = NVKM_VMA_PAGE_NONE;
1194 vma->mapped = false;
1195 }
1196 } while ((vma = node(vma, next)) && (start = vma->addr) < limit);
1197
1198 return 0;
1199 }
1200
1201 /*TODO:
1202 * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
1203 * with inside HMM, which would be a lot nicer for us to deal with.
1204 * - Support for systems without a 4KiB page size.
1205 */
1206 int
nvkm_vmm_pfn_map(struct nvkm_vmm * vmm,u8 shift,u64 addr,u64 size,u64 * pfn)1207 nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
1208 {
1209 const struct nvkm_vmm_page *page = vmm->func->page;
1210 struct nvkm_vma *vma, *tmp;
1211 u64 limit = addr + size;
1212 u64 start = addr;
1213 int pm = size >> shift;
1214 int pi = 0;
1215
1216 /* Only support mapping where the page size of the incoming page
1217 * array matches a page size available for direct mapping.
1218 */
1219 while (page->shift && (page->shift != shift ||
1220 page->desc->func->pfn == NULL))
1221 page++;
1222
1223 if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
1224 !IS_ALIGNED(size, 1ULL << shift) ||
1225 addr + size < addr || addr + size > vmm->limit) {
1226 VMM_DEBUG(vmm, "paged map %d %d %016llx %016llx\n",
1227 shift, page->shift, addr, size);
1228 return -EINVAL;
1229 }
1230
1231 if (!(vma = nvkm_vmm_node_search(vmm, addr)))
1232 return -ENOENT;
1233
1234 do {
1235 bool map = !!(pfn[pi] & NVKM_VMM_PFN_V);
1236 bool mapped = vma->mapped;
1237 u64 size = limit - start;
1238 u64 addr = start;
1239 int pn, ret = 0;
1240
1241 /* Narrow the operation window to cover a single action (page
1242 * should be mapped or not) within a single VMA.
1243 */
1244 for (pn = 0; pi + pn < pm; pn++) {
1245 if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V))
1246 break;
1247 }
1248 size = min_t(u64, size, pn << page->shift);
1249 size = min_t(u64, size, vma->size + vma->addr - addr);
1250
1251 /* Reject any operation to unmanaged regions, and areas that
1252 * have nvkm_memory objects mapped in them already.
1253 */
1254 if (!vma->mapref || vma->memory) {
1255 ret = -EINVAL;
1256 goto next;
1257 }
1258
1259 /* In order to both properly refcount GPU page tables, and
1260 * prevent "normal" mappings and these direct mappings from
1261 * interfering with each other, we need to track contiguous
1262 * ranges that have been mapped with this interface.
1263 *
1264 * Here we attempt to either split an existing VMA so we're
1265 * able to flag the region as either unmapped/mapped, or to
1266 * merge with adjacent VMAs that are already compatible.
1267 *
1268 * If the region is already compatible, nothing is required.
1269 */
1270 if (map != mapped) {
1271 tmp = nvkm_vmm_pfn_split_merge(vmm, vma, addr, size,
1272 page -
1273 vmm->func->page, map);
1274 if (WARN_ON(!tmp)) {
1275 ret = -ENOMEM;
1276 goto next;
1277 }
1278
1279 if ((tmp->mapped = map))
1280 tmp->refd = page - vmm->func->page;
1281 else
1282 tmp->refd = NVKM_VMA_PAGE_NONE;
1283 vma = tmp;
1284 }
1285
1286 /* Update HW page tables. */
1287 if (map) {
1288 struct nvkm_vmm_map args;
1289 args.page = page;
1290 args.pfn = &pfn[pi];
1291
1292 if (!mapped) {
1293 ret = nvkm_vmm_ptes_get_map(vmm, page, addr,
1294 size, &args, page->
1295 desc->func->pfn);
1296 } else {
1297 nvkm_vmm_ptes_map(vmm, page, addr, size, &args,
1298 page->desc->func->pfn);
1299 }
1300 } else {
1301 if (mapped) {
1302 nvkm_vmm_ptes_unmap_put(vmm, page, addr, size,
1303 false, true);
1304 }
1305 }
1306
1307 next:
1308 /* Iterate to next operation. */
1309 if (vma->addr + vma->size == addr + size)
1310 vma = node(vma, next);
1311 start += size;
1312
1313 if (ret) {
1314 /* Failure is signalled by clearing the valid bit on
1315 * any PFN that couldn't be modified as requested.
1316 */
1317 while (size) {
1318 pfn[pi++] = NVKM_VMM_PFN_NONE;
1319 size -= 1 << page->shift;
1320 }
1321 } else {
1322 pi += size >> page->shift;
1323 }
1324 } while (vma && start < limit);
1325
1326 return 0;
1327 }
1328
1329 void
nvkm_vmm_unmap_region(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1330 nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1331 {
1332 struct nvkm_vma *prev = NULL;
1333 struct nvkm_vma *next;
1334
1335 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1336 nvkm_memory_unref(&vma->memory);
1337 vma->mapped = false;
1338
1339 if (vma->part && (prev = node(vma, prev)) && prev->mapped)
1340 prev = NULL;
1341 if ((next = node(vma, next)) && (!next->part || next->mapped))
1342 next = NULL;
1343 nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
1344 }
1345
1346 void
nvkm_vmm_unmap_locked(struct nvkm_vmm * vmm,struct nvkm_vma * vma,bool pfn)1347 nvkm_vmm_unmap_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma, bool pfn)
1348 {
1349 const struct nvkm_vmm_page *page = &vmm->func->page[vma->refd];
1350
1351 if (vma->mapref) {
1352 nvkm_vmm_ptes_unmap_put(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1353 vma->refd = NVKM_VMA_PAGE_NONE;
1354 } else {
1355 nvkm_vmm_ptes_unmap(vmm, page, vma->addr, vma->size, vma->sparse, pfn);
1356 }
1357
1358 nvkm_vmm_unmap_region(vmm, vma);
1359 }
1360
1361 void
nvkm_vmm_unmap(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1362 nvkm_vmm_unmap(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1363 {
1364 if (vma->memory) {
1365 mutex_lock(&vmm->mutex);
1366 nvkm_vmm_unmap_locked(vmm, vma, false);
1367 mutex_unlock(&vmm->mutex);
1368 }
1369 }
1370
1371 static int
nvkm_vmm_map_valid(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1372 nvkm_vmm_map_valid(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1373 void *argv, u32 argc, struct nvkm_vmm_map *map)
1374 {
1375 switch (nvkm_memory_target(map->memory)) {
1376 case NVKM_MEM_TARGET_VRAM:
1377 if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) {
1378 VMM_DEBUG(vmm, "%d !VRAM", map->page->shift);
1379 return -EINVAL;
1380 }
1381 break;
1382 case NVKM_MEM_TARGET_HOST:
1383 case NVKM_MEM_TARGET_NCOH:
1384 if (!(map->page->type & NVKM_VMM_PAGE_HOST)) {
1385 VMM_DEBUG(vmm, "%d !HOST", map->page->shift);
1386 return -EINVAL;
1387 }
1388 break;
1389 default:
1390 WARN_ON(1);
1391 return -ENOSYS;
1392 }
1393
1394 if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) ||
1395 !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) ||
1396 !IS_ALIGNED( map->offset, 1ULL << map->page->shift) ||
1397 nvkm_memory_page(map->memory) < map->page->shift) {
1398 VMM_DEBUG(vmm, "alignment %016llx %016llx %016llx %d %d",
1399 vma->addr, (u64)vma->size, map->offset, map->page->shift,
1400 nvkm_memory_page(map->memory));
1401 return -EINVAL;
1402 }
1403
1404 return vmm->func->valid(vmm, argv, argc, map);
1405 }
1406
1407 static int
nvkm_vmm_map_choose(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1408 nvkm_vmm_map_choose(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1409 void *argv, u32 argc, struct nvkm_vmm_map *map)
1410 {
1411 for (map->page = vmm->func->page; map->page->shift; map->page++) {
1412 VMM_DEBUG(vmm, "trying %d", map->page->shift);
1413 if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map))
1414 return 0;
1415 }
1416 return -EINVAL;
1417 }
1418
1419 static int
nvkm_vmm_map_locked(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1420 nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
1421 void *argv, u32 argc, struct nvkm_vmm_map *map)
1422 {
1423 nvkm_vmm_pte_func func;
1424 int ret;
1425
1426 /* Make sure we won't overrun the end of the memory object. */
1427 if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) {
1428 VMM_DEBUG(vmm, "overrun %016llx %016llx %016llx",
1429 nvkm_memory_size(map->memory),
1430 map->offset, (u64)vma->size);
1431 return -EINVAL;
1432 }
1433
1434 /* Check remaining arguments for validity. */
1435 if (vma->page == NVKM_VMA_PAGE_NONE &&
1436 vma->refd == NVKM_VMA_PAGE_NONE) {
1437 /* Find the largest page size we can perform the mapping at. */
1438 const u32 debug = vmm->debug;
1439 vmm->debug = 0;
1440 ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1441 vmm->debug = debug;
1442 if (ret) {
1443 VMM_DEBUG(vmm, "invalid at any page size");
1444 nvkm_vmm_map_choose(vmm, vma, argv, argc, map);
1445 return -EINVAL;
1446 }
1447 } else {
1448 /* Page size of the VMA is already pre-determined. */
1449 if (vma->refd != NVKM_VMA_PAGE_NONE)
1450 map->page = &vmm->func->page[vma->refd];
1451 else
1452 map->page = &vmm->func->page[vma->page];
1453
1454 ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map);
1455 if (ret) {
1456 VMM_DEBUG(vmm, "invalid %d\n", ret);
1457 return ret;
1458 }
1459 }
1460
1461 /* Deal with the 'offset' argument, and fetch the backend function. */
1462 map->off = map->offset;
1463 if (map->mem) {
1464 for (; map->off; map->mem = map->mem->next) {
1465 u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT;
1466 if (size > map->off)
1467 break;
1468 map->off -= size;
1469 }
1470 func = map->page->desc->func->mem;
1471 } else
1472 if (map->sgl) {
1473 for (; map->off; map->sgl = sg_next(map->sgl)) {
1474 u64 size = sg_dma_len(map->sgl);
1475 if (size > map->off)
1476 break;
1477 map->off -= size;
1478 }
1479 func = map->page->desc->func->sgl;
1480 } else {
1481 map->dma += map->offset >> PAGE_SHIFT;
1482 map->off = map->offset & PAGE_MASK;
1483 func = map->page->desc->func->dma;
1484 }
1485
1486 /* Perform the map. */
1487 if (vma->refd == NVKM_VMA_PAGE_NONE) {
1488 ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func);
1489 if (ret)
1490 return ret;
1491
1492 vma->refd = map->page - vmm->func->page;
1493 } else {
1494 nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func);
1495 }
1496
1497 nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
1498 nvkm_memory_unref(&vma->memory);
1499 vma->memory = nvkm_memory_ref(map->memory);
1500 vma->mapped = true;
1501 vma->tags = map->tags;
1502 return 0;
1503 }
1504
1505 int
nvkm_vmm_map(struct nvkm_vmm * vmm,struct nvkm_vma * vma,void * argv,u32 argc,struct nvkm_vmm_map * map)1506 nvkm_vmm_map(struct nvkm_vmm *vmm, struct nvkm_vma *vma, void *argv, u32 argc,
1507 struct nvkm_vmm_map *map)
1508 {
1509 int ret;
1510 mutex_lock(&vmm->mutex);
1511 ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map);
1512 vma->busy = false;
1513 mutex_unlock(&vmm->mutex);
1514 return ret;
1515 }
1516
1517 static void
nvkm_vmm_put_region(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1518 nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1519 {
1520 struct nvkm_vma *prev, *next;
1521
1522 if ((prev = node(vma, prev)) && !prev->used) {
1523 vma->addr = prev->addr;
1524 vma->size += prev->size;
1525 nvkm_vmm_free_delete(vmm, prev);
1526 }
1527
1528 if ((next = node(vma, next)) && !next->used) {
1529 vma->size += next->size;
1530 nvkm_vmm_free_delete(vmm, next);
1531 }
1532
1533 nvkm_vmm_free_insert(vmm, vma);
1534 }
1535
1536 void
nvkm_vmm_put_locked(struct nvkm_vmm * vmm,struct nvkm_vma * vma)1537 nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
1538 {
1539 const struct nvkm_vmm_page *page = vmm->func->page;
1540 struct nvkm_vma *next = vma;
1541
1542 BUG_ON(vma->part);
1543
1544 if (vma->mapref || !vma->sparse) {
1545 do {
1546 const bool mem = next->memory != NULL;
1547 const bool map = next->mapped;
1548 const u8 refd = next->refd;
1549 const u64 addr = next->addr;
1550 u64 size = next->size;
1551
1552 /* Merge regions that are in the same state. */
1553 while ((next = node(next, next)) && next->part &&
1554 (next->mapped == map) &&
1555 (next->memory != NULL) == mem &&
1556 (next->refd == refd))
1557 size += next->size;
1558
1559 if (map) {
1560 /* Region(s) are mapped, merge the unmap
1561 * and dereference into a single walk of
1562 * the page tree.
1563 */
1564 nvkm_vmm_ptes_unmap_put(vmm, &page[refd], addr,
1565 size, vma->sparse,
1566 !mem);
1567 } else
1568 if (refd != NVKM_VMA_PAGE_NONE) {
1569 /* Drop allocation-time PTE references. */
1570 nvkm_vmm_ptes_put(vmm, &page[refd], addr, size);
1571 }
1572 } while (next && next->part);
1573 }
1574
1575 /* Merge any mapped regions that were split from the initial
1576 * address-space allocation back into the allocated VMA, and
1577 * release memory/compression resources.
1578 */
1579 next = vma;
1580 do {
1581 if (next->mapped)
1582 nvkm_vmm_unmap_region(vmm, next);
1583 } while ((next = node(vma, next)) && next->part);
1584
1585 if (vma->sparse && !vma->mapref) {
1586 /* Sparse region that was allocated with a fixed page size,
1587 * meaning all relevant PTEs were referenced once when the
1588 * region was allocated, and remained that way, regardless
1589 * of whether memory was mapped into it afterwards.
1590 *
1591 * The process of unmapping, unsparsing, and dereferencing
1592 * PTEs can be done in a single page tree walk.
1593 */
1594 nvkm_vmm_ptes_sparse_put(vmm, &page[vma->refd], vma->addr, vma->size);
1595 } else
1596 if (vma->sparse) {
1597 /* Sparse region that wasn't allocated with a fixed page size,
1598 * PTE references were taken both at allocation time (to make
1599 * the GPU see the region as sparse), and when mapping memory
1600 * into the region.
1601 *
1602 * The latter was handled above, and the remaining references
1603 * are dealt with here.
1604 */
1605 nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, false);
1606 }
1607
1608 /* Remove VMA from the list of allocated nodes. */
1609 nvkm_vmm_node_remove(vmm, vma);
1610
1611 /* Merge VMA back into the free list. */
1612 vma->page = NVKM_VMA_PAGE_NONE;
1613 vma->refd = NVKM_VMA_PAGE_NONE;
1614 vma->used = false;
1615 nvkm_vmm_put_region(vmm, vma);
1616 }
1617
1618 void
nvkm_vmm_put(struct nvkm_vmm * vmm,struct nvkm_vma ** pvma)1619 nvkm_vmm_put(struct nvkm_vmm *vmm, struct nvkm_vma **pvma)
1620 {
1621 struct nvkm_vma *vma = *pvma;
1622 if (vma) {
1623 mutex_lock(&vmm->mutex);
1624 nvkm_vmm_put_locked(vmm, vma);
1625 mutex_unlock(&vmm->mutex);
1626 *pvma = NULL;
1627 }
1628 }
1629
1630 int
nvkm_vmm_get_locked(struct nvkm_vmm * vmm,bool getref,bool mapref,bool sparse,u8 shift,u8 align,u64 size,struct nvkm_vma ** pvma)1631 nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1632 u8 shift, u8 align, u64 size, struct nvkm_vma **pvma)
1633 {
1634 const struct nvkm_vmm_page *page = &vmm->func->page[NVKM_VMA_PAGE_NONE];
1635 struct rb_node *node = NULL, *temp;
1636 struct nvkm_vma *vma = NULL, *tmp;
1637 u64 addr, tail;
1638 int ret;
1639
1640 VMM_TRACE(vmm, "getref %d mapref %d sparse %d "
1641 "shift: %d align: %d size: %016llx",
1642 getref, mapref, sparse, shift, align, size);
1643
1644 /* Zero-sized, or lazily-allocated sparse VMAs, make no sense. */
1645 if (unlikely(!size || (!getref && !mapref && sparse))) {
1646 VMM_DEBUG(vmm, "args %016llx %d %d %d",
1647 size, getref, mapref, sparse);
1648 return -EINVAL;
1649 }
1650
1651 /* Tesla-class GPUs can only select page size per-PDE, which means
1652 * we're required to know the mapping granularity up-front to find
1653 * a suitable region of address-space.
1654 *
1655 * The same goes if we're requesting up-front allocation of PTES.
1656 */
1657 if (unlikely((getref || vmm->func->page_block) && !shift)) {
1658 VMM_DEBUG(vmm, "page size required: %d %016llx",
1659 getref, vmm->func->page_block);
1660 return -EINVAL;
1661 }
1662
1663 /* If a specific page size was requested, determine its index and
1664 * make sure the requested size is a multiple of the page size.
1665 */
1666 if (shift) {
1667 for (page = vmm->func->page; page->shift; page++) {
1668 if (shift == page->shift)
1669 break;
1670 }
1671
1672 if (!page->shift || !IS_ALIGNED(size, 1ULL << page->shift)) {
1673 VMM_DEBUG(vmm, "page %d %016llx", shift, size);
1674 return -EINVAL;
1675 }
1676 align = max_t(u8, align, shift);
1677 } else {
1678 align = max_t(u8, align, 12);
1679 }
1680
1681 /* Locate smallest block that can possibly satisfy the allocation. */
1682 temp = vmm->free.rb_node;
1683 while (temp) {
1684 struct nvkm_vma *this = rb_entry(temp, typeof(*this), tree);
1685 if (this->size < size) {
1686 temp = temp->rb_right;
1687 } else {
1688 node = temp;
1689 temp = temp->rb_left;
1690 }
1691 }
1692
1693 if (unlikely(!node))
1694 return -ENOSPC;
1695
1696 /* Take into account alignment restrictions, trying larger blocks
1697 * in turn until we find a suitable free block.
1698 */
1699 do {
1700 struct nvkm_vma *this = rb_entry(node, typeof(*this), tree);
1701 struct nvkm_vma *prev = node(this, prev);
1702 struct nvkm_vma *next = node(this, next);
1703 const int p = page - vmm->func->page;
1704
1705 addr = this->addr;
1706 if (vmm->func->page_block && prev && prev->page != p)
1707 addr = ALIGN(addr, vmm->func->page_block);
1708 addr = ALIGN(addr, 1ULL << align);
1709
1710 tail = this->addr + this->size;
1711 if (vmm->func->page_block && next && next->page != p)
1712 tail = ALIGN_DOWN(tail, vmm->func->page_block);
1713
1714 if (addr <= tail && tail - addr >= size) {
1715 nvkm_vmm_free_remove(vmm, this);
1716 vma = this;
1717 break;
1718 }
1719 } while ((node = rb_next(node)));
1720
1721 if (unlikely(!vma))
1722 return -ENOSPC;
1723
1724 /* If the VMA we found isn't already exactly the requested size,
1725 * it needs to be split, and the remaining free blocks returned.
1726 */
1727 if (addr != vma->addr) {
1728 if (!(tmp = nvkm_vma_tail(vma, vma->size + vma->addr - addr))) {
1729 nvkm_vmm_put_region(vmm, vma);
1730 return -ENOMEM;
1731 }
1732 nvkm_vmm_free_insert(vmm, vma);
1733 vma = tmp;
1734 }
1735
1736 if (size != vma->size) {
1737 if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
1738 nvkm_vmm_put_region(vmm, vma);
1739 return -ENOMEM;
1740 }
1741 nvkm_vmm_free_insert(vmm, tmp);
1742 }
1743
1744 /* Pre-allocate page tables and/or setup sparse mappings. */
1745 if (sparse && getref)
1746 ret = nvkm_vmm_ptes_sparse_get(vmm, page, vma->addr, vma->size);
1747 else if (sparse)
1748 ret = nvkm_vmm_ptes_sparse(vmm, vma->addr, vma->size, true);
1749 else if (getref)
1750 ret = nvkm_vmm_ptes_get(vmm, page, vma->addr, vma->size);
1751 else
1752 ret = 0;
1753 if (ret) {
1754 nvkm_vmm_put_region(vmm, vma);
1755 return ret;
1756 }
1757
1758 vma->mapref = mapref && !getref;
1759 vma->sparse = sparse;
1760 vma->page = page - vmm->func->page;
1761 vma->refd = getref ? vma->page : NVKM_VMA_PAGE_NONE;
1762 vma->used = true;
1763 nvkm_vmm_node_insert(vmm, vma);
1764 *pvma = vma;
1765 return 0;
1766 }
1767
1768 int
nvkm_vmm_get(struct nvkm_vmm * vmm,u8 page,u64 size,struct nvkm_vma ** pvma)1769 nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
1770 {
1771 int ret;
1772 mutex_lock(&vmm->mutex);
1773 ret = nvkm_vmm_get_locked(vmm, false, true, false, page, 0, size, pvma);
1774 mutex_unlock(&vmm->mutex);
1775 return ret;
1776 }
1777
1778 void
nvkm_vmm_part(struct nvkm_vmm * vmm,struct nvkm_memory * inst)1779 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1780 {
1781 if (inst && vmm && vmm->func->part) {
1782 mutex_lock(&vmm->mutex);
1783 vmm->func->part(vmm, inst);
1784 mutex_unlock(&vmm->mutex);
1785 }
1786 }
1787
1788 int
nvkm_vmm_join(struct nvkm_vmm * vmm,struct nvkm_memory * inst)1789 nvkm_vmm_join(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
1790 {
1791 int ret = 0;
1792 if (vmm->func->join) {
1793 mutex_lock(&vmm->mutex);
1794 ret = vmm->func->join(vmm, inst);
1795 mutex_unlock(&vmm->mutex);
1796 }
1797 return ret;
1798 }
1799
1800 static bool
nvkm_vmm_boot_ptes(struct nvkm_vmm_iter * it,bool pfn,u32 ptei,u32 ptes)1801 nvkm_vmm_boot_ptes(struct nvkm_vmm_iter *it, bool pfn, u32 ptei, u32 ptes)
1802 {
1803 const struct nvkm_vmm_desc *desc = it->desc;
1804 const int type = desc->type == SPT;
1805 nvkm_memory_boot(it->pt[0]->pt[type]->memory, it->vmm);
1806 return false;
1807 }
1808
1809 int
nvkm_vmm_boot(struct nvkm_vmm * vmm)1810 nvkm_vmm_boot(struct nvkm_vmm *vmm)
1811 {
1812 const struct nvkm_vmm_page *page = vmm->func->page;
1813 const u64 limit = vmm->limit - vmm->start;
1814 int ret;
1815
1816 while (page[1].shift)
1817 page++;
1818
1819 ret = nvkm_vmm_ptes_get(vmm, page, vmm->start, limit);
1820 if (ret)
1821 return ret;
1822
1823 nvkm_vmm_iter(vmm, page, vmm->start, limit, "bootstrap", false, false,
1824 nvkm_vmm_boot_ptes, NULL, NULL, NULL);
1825 vmm->bootstrapped = true;
1826 return 0;
1827 }
1828
1829 static void
nvkm_vmm_del(struct kref * kref)1830 nvkm_vmm_del(struct kref *kref)
1831 {
1832 struct nvkm_vmm *vmm = container_of(kref, typeof(*vmm), kref);
1833 nvkm_vmm_dtor(vmm);
1834 kfree(vmm);
1835 }
1836
1837 void
nvkm_vmm_unref(struct nvkm_vmm ** pvmm)1838 nvkm_vmm_unref(struct nvkm_vmm **pvmm)
1839 {
1840 struct nvkm_vmm *vmm = *pvmm;
1841 if (vmm) {
1842 kref_put(&vmm->kref, nvkm_vmm_del);
1843 *pvmm = NULL;
1844 }
1845 }
1846
1847 struct nvkm_vmm *
nvkm_vmm_ref(struct nvkm_vmm * vmm)1848 nvkm_vmm_ref(struct nvkm_vmm *vmm)
1849 {
1850 if (vmm)
1851 kref_get(&vmm->kref);
1852 return vmm;
1853 }
1854
1855 int
nvkm_vmm_new(struct nvkm_device * device,u64 addr,u64 size,void * argv,u32 argc,struct lock_class_key * key,const char * name,struct nvkm_vmm ** pvmm)1856 nvkm_vmm_new(struct nvkm_device *device, u64 addr, u64 size, void *argv,
1857 u32 argc, struct lock_class_key *key, const char *name,
1858 struct nvkm_vmm **pvmm)
1859 {
1860 struct nvkm_mmu *mmu = device->mmu;
1861 struct nvkm_vmm *vmm = NULL;
1862 int ret;
1863 ret = mmu->func->vmm.ctor(mmu, false, addr, size, argv, argc,
1864 key, name, &vmm);
1865 if (ret)
1866 nvkm_vmm_unref(&vmm);
1867 *pvmm = vmm;
1868 return ret;
1869 }
1870