1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #include "drmP.h"
26 
27 #include "nouveau_drv.h"
28 #include "nouveau_vm.h"
29 
30 void
nv50_vm_map_pgt(struct nouveau_gpuobj * pgd,u32 pde,struct nouveau_gpuobj * pgt[2])31 nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
32 		struct nouveau_gpuobj *pgt[2])
33 {
34 	u64 phys = 0xdeadcafe00000000ULL;
35 	u32 coverage = 0;
36 
37 	if (pgt[0]) {
38 		phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
39 		coverage = (pgt[0]->size >> 3) << 12;
40 	} else
41 	if (pgt[1]) {
42 		phys = 0x00000001 | pgt[1]->vinst; /* present */
43 		coverage = (pgt[1]->size >> 3) << 16;
44 	}
45 
46 	if (phys & 1) {
47 		if (coverage <= 32 * 1024 * 1024)
48 			phys |= 0x60;
49 		else if (coverage <= 64 * 1024 * 1024)
50 			phys |= 0x40;
51 		else if (coverage <= 128 * 1024 * 1024)
52 			phys |= 0x20;
53 	}
54 
55 	nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
56 	nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
57 }
58 
59 static inline u64
vm_addr(struct nouveau_vma * vma,u64 phys,u32 memtype,u32 target)60 vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
61 {
62 	phys |= 1; /* present */
63 	phys |= (u64)memtype << 40;
64 	phys |= target << 4;
65 	if (vma->access & NV_MEM_ACCESS_SYS)
66 		phys |= (1 << 6);
67 	if (!(vma->access & NV_MEM_ACCESS_WO))
68 		phys |= (1 << 3);
69 	return phys;
70 }
71 
72 void
nv50_vm_map(struct nouveau_vma * vma,struct nouveau_gpuobj * pgt,struct nouveau_mem * mem,u32 pte,u32 cnt,u64 phys,u64 delta)73 nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
74 	    struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
75 {
76 	struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
77 	u32 comp = (mem->memtype & 0x180) >> 7;
78 	u32 block, target;
79 	int i;
80 
81 	/* IGPs don't have real VRAM, re-target to stolen system memory */
82 	target = 0;
83 	if (dev_priv->vram_sys_base) {
84 		phys += dev_priv->vram_sys_base;
85 		target = 3;
86 	}
87 
88 	phys  = vm_addr(vma, phys, mem->memtype, target);
89 	pte <<= 3;
90 	cnt <<= 3;
91 
92 	while (cnt) {
93 		u32 offset_h = upper_32_bits(phys);
94 		u32 offset_l = lower_32_bits(phys);
95 
96 		for (i = 7; i >= 0; i--) {
97 			block = 1 << (i + 3);
98 			if (cnt >= block && !(pte & (block - 1)))
99 				break;
100 		}
101 		offset_l |= (i << 7);
102 
103 		phys += block << (vma->node->type - 3);
104 		cnt  -= block;
105 		if (comp) {
106 			u32 tag = mem->tag->start + ((delta >> 16) * comp);
107 			offset_h |= (tag << 17);
108 			delta    += block << (vma->node->type - 3);
109 		}
110 
111 		while (block) {
112 			nv_wo32(pgt, pte + 0, offset_l);
113 			nv_wo32(pgt, pte + 4, offset_h);
114 			pte += 8;
115 			block -= 8;
116 		}
117 	}
118 }
119 
120 void
nv50_vm_map_sg(struct nouveau_vma * vma,struct nouveau_gpuobj * pgt,struct nouveau_mem * mem,u32 pte,u32 cnt,dma_addr_t * list)121 nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
122 	       struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
123 {
124 	u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
125 	pte <<= 3;
126 	while (cnt--) {
127 		u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
128 		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
129 		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
130 		pte += 8;
131 	}
132 }
133 
134 void
nv50_vm_unmap(struct nouveau_gpuobj * pgt,u32 pte,u32 cnt)135 nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
136 {
137 	pte <<= 3;
138 	while (cnt--) {
139 		nv_wo32(pgt, pte + 0, 0x00000000);
140 		nv_wo32(pgt, pte + 4, 0x00000000);
141 		pte += 8;
142 	}
143 }
144 
145 void
nv50_vm_flush(struct nouveau_vm * vm)146 nv50_vm_flush(struct nouveau_vm *vm)
147 {
148 	struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
149 	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
150 	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
151 	int i;
152 
153 	pinstmem->flush(vm->dev);
154 
155 	/* BAR */
156 	if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
157 		nv50_vm_flush_engine(vm->dev, 6);
158 		return;
159 	}
160 
161 	pfifo->tlb_flush(vm->dev);
162 	for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
163 		if (atomic_read(&vm->engref[i]))
164 			dev_priv->eng[i]->tlb_flush(vm->dev, i);
165 	}
166 }
167 
168 void
nv50_vm_flush_engine(struct drm_device * dev,int engine)169 nv50_vm_flush_engine(struct drm_device *dev, int engine)
170 {
171 	struct drm_nouveau_private *dev_priv = dev->dev_private;
172 	unsigned long flags;
173 
174 	spin_lock_irqsave(&dev_priv->vm_lock, flags);
175 	nv_wr32(dev, 0x100c80, (engine << 16) | 1);
176 	if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
177 		NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
178 	spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
179 }
180