1 #include "drmP.h"
2 #include "nouveau_drv.h"
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 
6 #define NV_CTXDMA_PAGE_SHIFT 12
7 #define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
8 #define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
9 
10 struct nouveau_sgdma_be {
11 	/* this has to be the first field so populate/unpopulated in
12 	 * nouve_bo.c works properly, otherwise have to move them here
13 	 */
14 	struct ttm_dma_tt ttm;
15 	struct drm_device *dev;
16 	u64 offset;
17 };
18 
19 static void
nouveau_sgdma_destroy(struct ttm_tt * ttm)20 nouveau_sgdma_destroy(struct ttm_tt *ttm)
21 {
22 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
23 
24 	if (ttm) {
25 		NV_DEBUG(nvbe->dev, "\n");
26 		ttm_dma_tt_fini(&nvbe->ttm);
27 		kfree(nvbe);
28 	}
29 }
30 
31 static int
nv04_sgdma_bind(struct ttm_tt * ttm,struct ttm_mem_reg * mem)32 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
33 {
34 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
35 	struct drm_device *dev = nvbe->dev;
36 	struct drm_nouveau_private *dev_priv = dev->dev_private;
37 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
38 	unsigned i, j, pte;
39 
40 	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
41 
42 	nvbe->offset = mem->start << PAGE_SHIFT;
43 	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
44 	for (i = 0; i < ttm->num_pages; i++) {
45 		dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
46 		uint32_t offset_l = lower_32_bits(dma_offset);
47 
48 		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
49 			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
50 			offset_l += NV_CTXDMA_PAGE_SIZE;
51 		}
52 	}
53 
54 	return 0;
55 }
56 
57 static int
nv04_sgdma_unbind(struct ttm_tt * ttm)58 nv04_sgdma_unbind(struct ttm_tt *ttm)
59 {
60 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
61 	struct drm_device *dev = nvbe->dev;
62 	struct drm_nouveau_private *dev_priv = dev->dev_private;
63 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
64 	unsigned i, j, pte;
65 
66 	NV_DEBUG(dev, "\n");
67 
68 	if (ttm->state != tt_bound)
69 		return 0;
70 
71 	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
72 	for (i = 0; i < ttm->num_pages; i++) {
73 		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
74 			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
75 	}
76 
77 	return 0;
78 }
79 
80 static struct ttm_backend_func nv04_sgdma_backend = {
81 	.bind			= nv04_sgdma_bind,
82 	.unbind			= nv04_sgdma_unbind,
83 	.destroy		= nouveau_sgdma_destroy
84 };
85 
86 static void
nv41_sgdma_flush(struct nouveau_sgdma_be * nvbe)87 nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
88 {
89 	struct drm_device *dev = nvbe->dev;
90 
91 	nv_wr32(dev, 0x100810, 0x00000022);
92 	if (!nv_wait(dev, 0x100810, 0x00000100, 0x00000100))
93 		NV_ERROR(dev, "vm flush timeout: 0x%08x\n",
94 			 nv_rd32(dev, 0x100810));
95 	nv_wr32(dev, 0x100810, 0x00000000);
96 }
97 
98 static int
nv41_sgdma_bind(struct ttm_tt * ttm,struct ttm_mem_reg * mem)99 nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
100 {
101 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
102 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
103 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
104 	dma_addr_t *list = nvbe->ttm.dma_address;
105 	u32 pte = mem->start << 2;
106 	u32 cnt = ttm->num_pages;
107 
108 	nvbe->offset = mem->start << PAGE_SHIFT;
109 
110 	while (cnt--) {
111 		nv_wo32(pgt, pte, (*list++ >> 7) | 1);
112 		pte += 4;
113 	}
114 
115 	nv41_sgdma_flush(nvbe);
116 	return 0;
117 }
118 
119 static int
nv41_sgdma_unbind(struct ttm_tt * ttm)120 nv41_sgdma_unbind(struct ttm_tt *ttm)
121 {
122 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
123 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
124 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
125 	u32 pte = (nvbe->offset >> 12) << 2;
126 	u32 cnt = ttm->num_pages;
127 
128 	while (cnt--) {
129 		nv_wo32(pgt, pte, 0x00000000);
130 		pte += 4;
131 	}
132 
133 	nv41_sgdma_flush(nvbe);
134 	return 0;
135 }
136 
137 static struct ttm_backend_func nv41_sgdma_backend = {
138 	.bind			= nv41_sgdma_bind,
139 	.unbind			= nv41_sgdma_unbind,
140 	.destroy		= nouveau_sgdma_destroy
141 };
142 
143 static void
nv44_sgdma_flush(struct ttm_tt * ttm)144 nv44_sgdma_flush(struct ttm_tt *ttm)
145 {
146 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
147 	struct drm_device *dev = nvbe->dev;
148 
149 	nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
150 	nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
151 	if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
152 		NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
153 			 nv_rd32(dev, 0x100808));
154 	nv_wr32(dev, 0x100808, 0x00000000);
155 }
156 
157 static void
nv44_sgdma_fill(struct nouveau_gpuobj * pgt,dma_addr_t * list,u32 base,u32 cnt)158 nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
159 {
160 	struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
161 	dma_addr_t dummy = dev_priv->gart_info.dummy.addr;
162 	u32 pte, tmp[4];
163 
164 	pte   = base >> 2;
165 	base &= ~0x0000000f;
166 
167 	tmp[0] = nv_ro32(pgt, base + 0x0);
168 	tmp[1] = nv_ro32(pgt, base + 0x4);
169 	tmp[2] = nv_ro32(pgt, base + 0x8);
170 	tmp[3] = nv_ro32(pgt, base + 0xc);
171 	while (cnt--) {
172 		u32 addr = list ? (*list++ >> 12) : (dummy >> 12);
173 		switch (pte++ & 0x3) {
174 		case 0:
175 			tmp[0] &= ~0x07ffffff;
176 			tmp[0] |= addr;
177 			break;
178 		case 1:
179 			tmp[0] &= ~0xf8000000;
180 			tmp[0] |= addr << 27;
181 			tmp[1] &= ~0x003fffff;
182 			tmp[1] |= addr >> 5;
183 			break;
184 		case 2:
185 			tmp[1] &= ~0xffc00000;
186 			tmp[1] |= addr << 22;
187 			tmp[2] &= ~0x0001ffff;
188 			tmp[2] |= addr >> 10;
189 			break;
190 		case 3:
191 			tmp[2] &= ~0xfffe0000;
192 			tmp[2] |= addr << 17;
193 			tmp[3] &= ~0x00000fff;
194 			tmp[3] |= addr >> 15;
195 			break;
196 		}
197 	}
198 
199 	tmp[3] |= 0x40000000;
200 
201 	nv_wo32(pgt, base + 0x0, tmp[0]);
202 	nv_wo32(pgt, base + 0x4, tmp[1]);
203 	nv_wo32(pgt, base + 0x8, tmp[2]);
204 	nv_wo32(pgt, base + 0xc, tmp[3]);
205 }
206 
207 static int
nv44_sgdma_bind(struct ttm_tt * ttm,struct ttm_mem_reg * mem)208 nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
209 {
210 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
211 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
212 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
213 	dma_addr_t *list = nvbe->ttm.dma_address;
214 	u32 pte = mem->start << 2, tmp[4];
215 	u32 cnt = ttm->num_pages;
216 	int i;
217 
218 	nvbe->offset = mem->start << PAGE_SHIFT;
219 
220 	if (pte & 0x0000000c) {
221 		u32  max = 4 - ((pte >> 2) & 0x3);
222 		u32 part = (cnt > max) ? max : cnt;
223 		nv44_sgdma_fill(pgt, list, pte, part);
224 		pte  += (part << 2);
225 		list += part;
226 		cnt  -= part;
227 	}
228 
229 	while (cnt >= 4) {
230 		for (i = 0; i < 4; i++)
231 			tmp[i] = *list++ >> 12;
232 		nv_wo32(pgt, pte + 0x0, tmp[0] >>  0 | tmp[1] << 27);
233 		nv_wo32(pgt, pte + 0x4, tmp[1] >>  5 | tmp[2] << 22);
234 		nv_wo32(pgt, pte + 0x8, tmp[2] >> 10 | tmp[3] << 17);
235 		nv_wo32(pgt, pte + 0xc, tmp[3] >> 15 | 0x40000000);
236 		pte  += 0x10;
237 		cnt  -= 4;
238 	}
239 
240 	if (cnt)
241 		nv44_sgdma_fill(pgt, list, pte, cnt);
242 
243 	nv44_sgdma_flush(ttm);
244 	return 0;
245 }
246 
247 static int
nv44_sgdma_unbind(struct ttm_tt * ttm)248 nv44_sgdma_unbind(struct ttm_tt *ttm)
249 {
250 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
251 	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
252 	struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
253 	u32 pte = (nvbe->offset >> 12) << 2;
254 	u32 cnt = ttm->num_pages;
255 
256 	if (pte & 0x0000000c) {
257 		u32  max = 4 - ((pte >> 2) & 0x3);
258 		u32 part = (cnt > max) ? max : cnt;
259 		nv44_sgdma_fill(pgt, NULL, pte, part);
260 		pte  += (part << 2);
261 		cnt  -= part;
262 	}
263 
264 	while (cnt >= 4) {
265 		nv_wo32(pgt, pte + 0x0, 0x00000000);
266 		nv_wo32(pgt, pte + 0x4, 0x00000000);
267 		nv_wo32(pgt, pte + 0x8, 0x00000000);
268 		nv_wo32(pgt, pte + 0xc, 0x00000000);
269 		pte  += 0x10;
270 		cnt  -= 4;
271 	}
272 
273 	if (cnt)
274 		nv44_sgdma_fill(pgt, NULL, pte, cnt);
275 
276 	nv44_sgdma_flush(ttm);
277 	return 0;
278 }
279 
280 static struct ttm_backend_func nv44_sgdma_backend = {
281 	.bind			= nv44_sgdma_bind,
282 	.unbind			= nv44_sgdma_unbind,
283 	.destroy		= nouveau_sgdma_destroy
284 };
285 
286 static int
nv50_sgdma_bind(struct ttm_tt * ttm,struct ttm_mem_reg * mem)287 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
288 {
289 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
290 	struct nouveau_mem *node = mem->mm_node;
291 
292 	/* noop: bound in move_notify() */
293 	node->pages = nvbe->ttm.dma_address;
294 	return 0;
295 }
296 
297 static int
nv50_sgdma_unbind(struct ttm_tt * ttm)298 nv50_sgdma_unbind(struct ttm_tt *ttm)
299 {
300 	/* noop: unbound in move_notify() */
301 	return 0;
302 }
303 
304 static struct ttm_backend_func nv50_sgdma_backend = {
305 	.bind			= nv50_sgdma_bind,
306 	.unbind			= nv50_sgdma_unbind,
307 	.destroy		= nouveau_sgdma_destroy
308 };
309 
310 struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_bo_device * bdev,unsigned long size,uint32_t page_flags,struct page * dummy_read_page)311 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
312 			 unsigned long size, uint32_t page_flags,
313 			 struct page *dummy_read_page)
314 {
315 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
316 	struct drm_device *dev = dev_priv->dev;
317 	struct nouveau_sgdma_be *nvbe;
318 
319 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
320 	if (!nvbe)
321 		return NULL;
322 
323 	nvbe->dev = dev;
324 	nvbe->ttm.ttm.func = dev_priv->gart_info.func;
325 
326 	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
327 		kfree(nvbe);
328 		return NULL;
329 	}
330 	return &nvbe->ttm.ttm;
331 }
332 
333 int
nouveau_sgdma_init(struct drm_device * dev)334 nouveau_sgdma_init(struct drm_device *dev)
335 {
336 	struct drm_nouveau_private *dev_priv = dev->dev_private;
337 	struct nouveau_gpuobj *gpuobj = NULL;
338 	u32 aper_size, align;
339 	int ret;
340 
341 	if (dev_priv->card_type >= NV_40 && pci_is_pcie(dev->pdev))
342 		aper_size = 512 * 1024 * 1024;
343 	else
344 		aper_size = 64 * 1024 * 1024;
345 
346 	/* Dear NVIDIA, NV44+ would like proper present bits in PTEs for
347 	 * christmas.  The cards before it have them, the cards after
348 	 * it have them, why is NV44 so unloved?
349 	 */
350 	dev_priv->gart_info.dummy.page = alloc_page(GFP_DMA32 | GFP_KERNEL);
351 	if (!dev_priv->gart_info.dummy.page)
352 		return -ENOMEM;
353 
354 	dev_priv->gart_info.dummy.addr =
355 		pci_map_page(dev->pdev, dev_priv->gart_info.dummy.page,
356 			     0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
357 	if (pci_dma_mapping_error(dev->pdev, dev_priv->gart_info.dummy.addr)) {
358 		NV_ERROR(dev, "error mapping dummy page\n");
359 		__free_page(dev_priv->gart_info.dummy.page);
360 		dev_priv->gart_info.dummy.page = NULL;
361 		return -ENOMEM;
362 	}
363 
364 	if (dev_priv->card_type >= NV_50) {
365 		dev_priv->gart_info.aper_base = 0;
366 		dev_priv->gart_info.aper_size = aper_size;
367 		dev_priv->gart_info.type = NOUVEAU_GART_HW;
368 		dev_priv->gart_info.func = &nv50_sgdma_backend;
369 	} else
370 	if (0 && pci_is_pcie(dev->pdev) &&
371 	    dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
372 		if (nv44_graph_class(dev)) {
373 			dev_priv->gart_info.func = &nv44_sgdma_backend;
374 			align = 512 * 1024;
375 		} else {
376 			dev_priv->gart_info.func = &nv41_sgdma_backend;
377 			align = 16;
378 		}
379 
380 		ret = nouveau_gpuobj_new(dev, NULL, aper_size / 1024, align,
381 					 NVOBJ_FLAG_ZERO_ALLOC |
382 					 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
383 		if (ret) {
384 			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
385 			return ret;
386 		}
387 
388 		dev_priv->gart_info.sg_ctxdma = gpuobj;
389 		dev_priv->gart_info.aper_base = 0;
390 		dev_priv->gart_info.aper_size = aper_size;
391 		dev_priv->gart_info.type = NOUVEAU_GART_HW;
392 	} else {
393 		ret = nouveau_gpuobj_new(dev, NULL, (aper_size / 1024) + 8, 16,
394 					 NVOBJ_FLAG_ZERO_ALLOC |
395 					 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
396 		if (ret) {
397 			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
398 			return ret;
399 		}
400 
401 		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
402 				   (1 << 12) /* PT present */ |
403 				   (0 << 13) /* PT *not* linear */ |
404 				   (0 << 14) /* RW */ |
405 				   (2 << 16) /* PCI */);
406 		nv_wo32(gpuobj, 4, aper_size - 1);
407 
408 		dev_priv->gart_info.sg_ctxdma = gpuobj;
409 		dev_priv->gart_info.aper_base = 0;
410 		dev_priv->gart_info.aper_size = aper_size;
411 		dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
412 		dev_priv->gart_info.func = &nv04_sgdma_backend;
413 	}
414 
415 	return 0;
416 }
417 
418 void
nouveau_sgdma_takedown(struct drm_device * dev)419 nouveau_sgdma_takedown(struct drm_device *dev)
420 {
421 	struct drm_nouveau_private *dev_priv = dev->dev_private;
422 
423 	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
424 
425 	if (dev_priv->gart_info.dummy.page) {
426 		pci_unmap_page(dev->pdev, dev_priv->gart_info.dummy.addr,
427 			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
428 		__free_page(dev_priv->gart_info.dummy.page);
429 		dev_priv->gart_info.dummy.page = NULL;
430 	}
431 }
432 
433 uint32_t
nouveau_sgdma_get_physical(struct drm_device * dev,uint32_t offset)434 nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
435 {
436 	struct drm_nouveau_private *dev_priv = dev->dev_private;
437 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
438 	int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
439 
440 	BUG_ON(dev_priv->card_type >= NV_50);
441 
442 	return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
443 		(offset & NV_CTXDMA_PAGE_MASK);
444 }
445