1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #include "drmP.h"
26 #include "nouveau_drv.h"
27 #include "nouveau_mm.h"
28 #include "nouveau_vm.h"
29 
30 void
nouveau_vm_map_at(struct nouveau_vma * vma,u64 delta,struct nouveau_mem * node)31 nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
32 {
33 	struct nouveau_vm *vm = vma->vm;
34 	struct nouveau_mm_node *r;
35 	int big = vma->node->type != vm->spg_shift;
36 	u32 offset = vma->node->offset + (delta >> 12);
37 	u32 bits = vma->node->type - 12;
38 	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
39 	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
40 	u32 max  = 1 << (vm->pgt_bits - bits);
41 	u32 end, len;
42 
43 	delta = 0;
44 	list_for_each_entry(r, &node->regions, rl_entry) {
45 		u64 phys = (u64)r->offset << 12;
46 		u32 num  = r->length >> bits;
47 
48 		while (num) {
49 			struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
50 
51 			end = (pte + num);
52 			if (unlikely(end >= max))
53 				end = max;
54 			len = end - pte;
55 
56 			vm->map(vma, pgt, node, pte, len, phys, delta);
57 
58 			num -= len;
59 			pte += len;
60 			if (unlikely(end >= max)) {
61 				phys += len << (bits + 12);
62 				pde++;
63 				pte = 0;
64 			}
65 
66 			delta += (u64)len << vma->node->type;
67 		}
68 	}
69 
70 	vm->flush(vm);
71 }
72 
73 void
nouveau_vm_map(struct nouveau_vma * vma,struct nouveau_mem * node)74 nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
75 {
76 	nouveau_vm_map_at(vma, 0, node);
77 }
78 
79 void
nouveau_vm_map_sg(struct nouveau_vma * vma,u64 delta,u64 length,struct nouveau_mem * mem)80 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
81 		  struct nouveau_mem *mem)
82 {
83 	struct nouveau_vm *vm = vma->vm;
84 	dma_addr_t *list = mem->pages;
85 	int big = vma->node->type != vm->spg_shift;
86 	u32 offset = vma->node->offset + (delta >> 12);
87 	u32 bits = vma->node->type - 12;
88 	u32 num  = length >> vma->node->type;
89 	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
90 	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
91 	u32 max  = 1 << (vm->pgt_bits - bits);
92 	u32 end, len;
93 
94 	while (num) {
95 		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
96 
97 		end = (pte + num);
98 		if (unlikely(end >= max))
99 			end = max;
100 		len = end - pte;
101 
102 		vm->map_sg(vma, pgt, mem, pte, len, list);
103 
104 		num  -= len;
105 		pte  += len;
106 		list += len;
107 		if (unlikely(end >= max)) {
108 			pde++;
109 			pte = 0;
110 		}
111 	}
112 
113 	vm->flush(vm);
114 }
115 
116 void
nouveau_vm_unmap_at(struct nouveau_vma * vma,u64 delta,u64 length)117 nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
118 {
119 	struct nouveau_vm *vm = vma->vm;
120 	int big = vma->node->type != vm->spg_shift;
121 	u32 offset = vma->node->offset + (delta >> 12);
122 	u32 bits = vma->node->type - 12;
123 	u32 num  = length >> vma->node->type;
124 	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
125 	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
126 	u32 max  = 1 << (vm->pgt_bits - bits);
127 	u32 end, len;
128 
129 	while (num) {
130 		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
131 
132 		end = (pte + num);
133 		if (unlikely(end >= max))
134 			end = max;
135 		len = end - pte;
136 
137 		vm->unmap(pgt, pte, len);
138 
139 		num -= len;
140 		pte += len;
141 		if (unlikely(end >= max)) {
142 			pde++;
143 			pte = 0;
144 		}
145 	}
146 
147 	vm->flush(vm);
148 }
149 
150 void
nouveau_vm_unmap(struct nouveau_vma * vma)151 nouveau_vm_unmap(struct nouveau_vma *vma)
152 {
153 	nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
154 }
155 
156 static void
nouveau_vm_unmap_pgt(struct nouveau_vm * vm,int big,u32 fpde,u32 lpde)157 nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
158 {
159 	struct nouveau_vm_pgd *vpgd;
160 	struct nouveau_vm_pgt *vpgt;
161 	struct nouveau_gpuobj *pgt;
162 	u32 pde;
163 
164 	for (pde = fpde; pde <= lpde; pde++) {
165 		vpgt = &vm->pgt[pde - vm->fpde];
166 		if (--vpgt->refcount[big])
167 			continue;
168 
169 		pgt = vpgt->obj[big];
170 		vpgt->obj[big] = NULL;
171 
172 		list_for_each_entry(vpgd, &vm->pgd_list, head) {
173 			vm->map_pgt(vpgd->obj, pde, vpgt->obj);
174 		}
175 
176 		mutex_unlock(&vm->mm.mutex);
177 		nouveau_gpuobj_ref(NULL, &pgt);
178 		mutex_lock(&vm->mm.mutex);
179 	}
180 }
181 
182 static int
nouveau_vm_map_pgt(struct nouveau_vm * vm,u32 pde,u32 type)183 nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
184 {
185 	struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
186 	struct nouveau_vm_pgd *vpgd;
187 	struct nouveau_gpuobj *pgt;
188 	int big = (type != vm->spg_shift);
189 	u32 pgt_size;
190 	int ret;
191 
192 	pgt_size  = (1 << (vm->pgt_bits + 12)) >> type;
193 	pgt_size *= 8;
194 
195 	mutex_unlock(&vm->mm.mutex);
196 	ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
197 				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
198 	mutex_lock(&vm->mm.mutex);
199 	if (unlikely(ret))
200 		return ret;
201 
202 	/* someone beat us to filling the PDE while we didn't have the lock */
203 	if (unlikely(vpgt->refcount[big]++)) {
204 		mutex_unlock(&vm->mm.mutex);
205 		nouveau_gpuobj_ref(NULL, &pgt);
206 		mutex_lock(&vm->mm.mutex);
207 		return 0;
208 	}
209 
210 	vpgt->obj[big] = pgt;
211 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
212 		vm->map_pgt(vpgd->obj, pde, vpgt->obj);
213 	}
214 
215 	return 0;
216 }
217 
218 int
nouveau_vm_get(struct nouveau_vm * vm,u64 size,u32 page_shift,u32 access,struct nouveau_vma * vma)219 nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
220 	       u32 access, struct nouveau_vma *vma)
221 {
222 	u32 align = (1 << page_shift) >> 12;
223 	u32 msize = size >> 12;
224 	u32 fpde, lpde, pde;
225 	int ret;
226 
227 	mutex_lock(&vm->mm.mutex);
228 	ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
229 	if (unlikely(ret != 0)) {
230 		mutex_unlock(&vm->mm.mutex);
231 		return ret;
232 	}
233 
234 	fpde = (vma->node->offset >> vm->pgt_bits);
235 	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
236 	for (pde = fpde; pde <= lpde; pde++) {
237 		struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
238 		int big = (vma->node->type != vm->spg_shift);
239 
240 		if (likely(vpgt->refcount[big])) {
241 			vpgt->refcount[big]++;
242 			continue;
243 		}
244 
245 		ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
246 		if (ret) {
247 			if (pde != fpde)
248 				nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
249 			nouveau_mm_put(&vm->mm, vma->node);
250 			mutex_unlock(&vm->mm.mutex);
251 			vma->node = NULL;
252 			return ret;
253 		}
254 	}
255 	mutex_unlock(&vm->mm.mutex);
256 
257 	vma->vm     = vm;
258 	vma->offset = (u64)vma->node->offset << 12;
259 	vma->access = access;
260 	return 0;
261 }
262 
263 void
nouveau_vm_put(struct nouveau_vma * vma)264 nouveau_vm_put(struct nouveau_vma *vma)
265 {
266 	struct nouveau_vm *vm = vma->vm;
267 	u32 fpde, lpde;
268 
269 	if (unlikely(vma->node == NULL))
270 		return;
271 	fpde = (vma->node->offset >> vm->pgt_bits);
272 	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
273 
274 	mutex_lock(&vm->mm.mutex);
275 	nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
276 	nouveau_mm_put(&vm->mm, vma->node);
277 	vma->node = NULL;
278 	mutex_unlock(&vm->mm.mutex);
279 }
280 
281 int
nouveau_vm_new(struct drm_device * dev,u64 offset,u64 length,u64 mm_offset,struct nouveau_vm ** pvm)282 nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
283 	       struct nouveau_vm **pvm)
284 {
285 	struct drm_nouveau_private *dev_priv = dev->dev_private;
286 	struct nouveau_vm *vm;
287 	u64 mm_length = (offset + length) - mm_offset;
288 	u32 block, pgt_bits;
289 	int ret;
290 
291 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
292 	if (!vm)
293 		return -ENOMEM;
294 
295 	if (dev_priv->card_type == NV_50) {
296 		vm->map_pgt = nv50_vm_map_pgt;
297 		vm->map = nv50_vm_map;
298 		vm->map_sg = nv50_vm_map_sg;
299 		vm->unmap = nv50_vm_unmap;
300 		vm->flush = nv50_vm_flush;
301 		vm->spg_shift = 12;
302 		vm->lpg_shift = 16;
303 
304 		pgt_bits = 29;
305 		block = (1 << pgt_bits);
306 		if (length < block)
307 			block = length;
308 
309 	} else
310 	if (dev_priv->card_type >= NV_C0) {
311 		vm->map_pgt = nvc0_vm_map_pgt;
312 		vm->map = nvc0_vm_map;
313 		vm->map_sg = nvc0_vm_map_sg;
314 		vm->unmap = nvc0_vm_unmap;
315 		vm->flush = nvc0_vm_flush;
316 		vm->spg_shift = 12;
317 		vm->lpg_shift = 17;
318 		pgt_bits = 27;
319 		block = 4096;
320 	} else {
321 		kfree(vm);
322 		return -ENOSYS;
323 	}
324 
325 	vm->fpde   = offset >> pgt_bits;
326 	vm->lpde   = (offset + length - 1) >> pgt_bits;
327 	vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
328 	if (!vm->pgt) {
329 		kfree(vm);
330 		return -ENOMEM;
331 	}
332 
333 	INIT_LIST_HEAD(&vm->pgd_list);
334 	vm->dev = dev;
335 	vm->refcount = 1;
336 	vm->pgt_bits = pgt_bits - 12;
337 
338 	ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
339 			      block >> 12);
340 	if (ret) {
341 		kfree(vm);
342 		return ret;
343 	}
344 
345 	*pvm = vm;
346 	return 0;
347 }
348 
349 static int
nouveau_vm_link(struct nouveau_vm * vm,struct nouveau_gpuobj * pgd)350 nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
351 {
352 	struct nouveau_vm_pgd *vpgd;
353 	int i;
354 
355 	if (!pgd)
356 		return 0;
357 
358 	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
359 	if (!vpgd)
360 		return -ENOMEM;
361 
362 	nouveau_gpuobj_ref(pgd, &vpgd->obj);
363 
364 	mutex_lock(&vm->mm.mutex);
365 	for (i = vm->fpde; i <= vm->lpde; i++)
366 		vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
367 	list_add(&vpgd->head, &vm->pgd_list);
368 	mutex_unlock(&vm->mm.mutex);
369 	return 0;
370 }
371 
372 static void
nouveau_vm_unlink(struct nouveau_vm * vm,struct nouveau_gpuobj * mpgd)373 nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
374 {
375 	struct nouveau_vm_pgd *vpgd, *tmp;
376 	struct nouveau_gpuobj *pgd = NULL;
377 
378 	if (!mpgd)
379 		return;
380 
381 	mutex_lock(&vm->mm.mutex);
382 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
383 		if (vpgd->obj == mpgd) {
384 			pgd = vpgd->obj;
385 			list_del(&vpgd->head);
386 			kfree(vpgd);
387 			break;
388 		}
389 	}
390 	mutex_unlock(&vm->mm.mutex);
391 
392 	nouveau_gpuobj_ref(NULL, &pgd);
393 }
394 
395 static void
nouveau_vm_del(struct nouveau_vm * vm)396 nouveau_vm_del(struct nouveau_vm *vm)
397 {
398 	struct nouveau_vm_pgd *vpgd, *tmp;
399 
400 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
401 		nouveau_vm_unlink(vm, vpgd->obj);
402 	}
403 
404 	nouveau_mm_fini(&vm->mm);
405 	kfree(vm->pgt);
406 	kfree(vm);
407 }
408 
409 int
nouveau_vm_ref(struct nouveau_vm * ref,struct nouveau_vm ** ptr,struct nouveau_gpuobj * pgd)410 nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
411 	       struct nouveau_gpuobj *pgd)
412 {
413 	struct nouveau_vm *vm;
414 	int ret;
415 
416 	vm = ref;
417 	if (vm) {
418 		ret = nouveau_vm_link(vm, pgd);
419 		if (ret)
420 			return ret;
421 
422 		vm->refcount++;
423 	}
424 
425 	vm = *ptr;
426 	*ptr = ref;
427 
428 	if (vm) {
429 		nouveau_vm_unlink(vm, pgd);
430 
431 		if (--vm->refcount == 0)
432 			nouveau_vm_del(vm);
433 	}
434 
435 	return 0;
436 }
437