1 /*
2  * Copyright 2021 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #define ga102_fifo(p) container_of((p), struct ga102_fifo, base.engine)
23 #define ga102_chan(p) container_of((p), struct ga102_chan, object)
24 #include <engine/fifo.h>
25 #include "user.h"
26 
27 #include <core/memory.h>
28 #include <subdev/mmu.h>
29 #include <subdev/timer.h>
30 #include <subdev/top.h>
31 
32 #include <nvif/cl0080.h>
33 #include <nvif/clc36f.h>
34 #include <nvif/class.h>
35 
36 struct ga102_fifo {
37 	struct nvkm_fifo base;
38 };
39 
40 struct ga102_chan {
41 	struct nvkm_object object;
42 
43 	struct {
44 		u32 runl;
45 		u32 chan;
46 	} ctrl;
47 
48 	struct nvkm_memory *mthd;
49 	struct nvkm_memory *inst;
50 	struct nvkm_memory *user;
51 	struct nvkm_memory *runl;
52 
53 	struct nvkm_vmm *vmm;
54 };
55 
56 static int
ga102_chan_sclass(struct nvkm_object * object,int index,struct nvkm_oclass * oclass)57 ga102_chan_sclass(struct nvkm_object *object, int index, struct nvkm_oclass *oclass)
58 {
59 	if (index == 0) {
60 		oclass->ctor = nvkm_object_new;
61 		oclass->base = (struct nvkm_sclass) { -1, -1, AMPERE_DMA_COPY_B };
62 		return 0;
63 	}
64 
65 	return -EINVAL;
66 }
67 
68 static int
ga102_chan_map(struct nvkm_object * object,void * argv,u32 argc,enum nvkm_object_map * type,u64 * addr,u64 * size)69 ga102_chan_map(struct nvkm_object *object, void *argv, u32 argc,
70 	       enum nvkm_object_map *type, u64 *addr, u64 *size)
71 {
72 	struct ga102_chan *chan = ga102_chan(object);
73 	struct nvkm_device *device = chan->object.engine->subdev.device;
74 	u64 bar2 = nvkm_memory_bar2(chan->user);
75 
76 	if (bar2 == ~0ULL)
77 		return -EFAULT;
78 
79 	*type = NVKM_OBJECT_MAP_IO;
80 	*addr = device->func->resource_addr(device, 3) + bar2;
81 	*size = 0x1000;
82 	return 0;
83 }
84 
85 static int
ga102_chan_fini(struct nvkm_object * object,bool suspend)86 ga102_chan_fini(struct nvkm_object *object, bool suspend)
87 {
88 	struct ga102_chan *chan = ga102_chan(object);
89 	struct nvkm_device *device = chan->object.engine->subdev.device;
90 
91 	nvkm_wr32(device, chan->ctrl.chan, 0x00000003);
92 
93 	nvkm_wr32(device, chan->ctrl.runl + 0x098, 0x01000000);
94 	nvkm_msec(device, 2000,
95 		if (!(nvkm_rd32(device, chan->ctrl.runl + 0x098) & 0x00100000))
96 			break;
97 	);
98 
99 	nvkm_wr32(device, chan->ctrl.runl + 0x088, 0);
100 
101 	nvkm_wr32(device, chan->ctrl.chan, 0xffffffff);
102 	return 0;
103 }
104 
105 static int
ga102_chan_init(struct nvkm_object * object)106 ga102_chan_init(struct nvkm_object *object)
107 {
108 	struct ga102_chan *chan = ga102_chan(object);
109 	struct nvkm_device *device = chan->object.engine->subdev.device;
110 
111 	nvkm_mask(device, chan->ctrl.runl + 0x300, 0x80000000, 0x80000000);
112 
113 	nvkm_wr32(device, chan->ctrl.runl + 0x080, lower_32_bits(nvkm_memory_addr(chan->runl)));
114 	nvkm_wr32(device, chan->ctrl.runl + 0x084, upper_32_bits(nvkm_memory_addr(chan->runl)));
115 	nvkm_wr32(device, chan->ctrl.runl + 0x088, 2);
116 
117 	nvkm_wr32(device, chan->ctrl.chan, 0x00000002);
118 	nvkm_wr32(device, chan->ctrl.runl + 0x0090, 0);
119 	return 0;
120 }
121 
122 static void *
ga102_chan_dtor(struct nvkm_object * object)123 ga102_chan_dtor(struct nvkm_object *object)
124 {
125 	struct ga102_chan *chan = ga102_chan(object);
126 
127 	if (chan->vmm) {
128 		nvkm_vmm_part(chan->vmm, chan->inst);
129 		nvkm_vmm_unref(&chan->vmm);
130 	}
131 
132 	nvkm_memory_unref(&chan->runl);
133 	nvkm_memory_unref(&chan->user);
134 	nvkm_memory_unref(&chan->inst);
135 	nvkm_memory_unref(&chan->mthd);
136 	return chan;
137 }
138 
139 static const struct nvkm_object_func
140 ga102_chan = {
141 	.dtor = ga102_chan_dtor,
142 	.init = ga102_chan_init,
143 	.fini = ga102_chan_fini,
144 	.map = ga102_chan_map,
145 	.sclass = ga102_chan_sclass,
146 };
147 
148 static int
ga102_chan_new(struct nvkm_device * device,const struct nvkm_oclass * oclass,void * argv,u32 argc,struct nvkm_object ** pobject)149 ga102_chan_new(struct nvkm_device *device,
150 	       const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
151 {
152 	struct volta_channel_gpfifo_a_v0 *args = argv;
153 	struct nvkm_top_device *tdev;
154 	struct nvkm_vmm *vmm;
155 	struct ga102_chan *chan;
156 	int ret;
157 
158 	if (argc != sizeof(*args))
159 		return -ENOSYS;
160 
161 	vmm = nvkm_uvmm_search(oclass->client, args->vmm);
162 	if (IS_ERR(vmm))
163 		return PTR_ERR(vmm);
164 
165 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
166 		return -ENOMEM;
167 
168 	nvkm_object_ctor(&ga102_chan, oclass, &chan->object);
169 	*pobject = &chan->object;
170 
171 	list_for_each_entry(tdev, &device->top->device, head) {
172 		if (tdev->type == NVKM_ENGINE_CE) {
173 			chan->ctrl.runl = tdev->runlist;
174 			break;
175 		}
176 	}
177 
178 	if (!chan->ctrl.runl)
179 		return -ENODEV;
180 
181 	chan->ctrl.chan = nvkm_rd32(device, chan->ctrl.runl + 0x004) & 0xfffffff0;
182 
183 	args->chid = 0;
184 	args->inst = 0;
185 	args->token = nvkm_rd32(device, chan->ctrl.runl + 0x008) & 0xffff0000;
186 
187 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->mthd);
188 	if (ret)
189 		return ret;
190 
191 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->inst);
192 	if (ret)
193 		return ret;
194 
195 	nvkm_kmap(chan->inst);
196 	nvkm_wo32(chan->inst, 0x010, 0x0000face);
197 	nvkm_wo32(chan->inst, 0x030, 0x7ffff902);
198 	nvkm_wo32(chan->inst, 0x048, lower_32_bits(args->ioffset));
199 	nvkm_wo32(chan->inst, 0x04c, upper_32_bits(args->ioffset) |
200 				     (order_base_2(args->ilength / 8) << 16));
201 	nvkm_wo32(chan->inst, 0x084, 0x20400000);
202 	nvkm_wo32(chan->inst, 0x094, 0x30000001);
203 	nvkm_wo32(chan->inst, 0x0ac, 0x00020000);
204 	nvkm_wo32(chan->inst, 0x0e4, 0x00000000);
205 	nvkm_wo32(chan->inst, 0x0e8, 0);
206 	nvkm_wo32(chan->inst, 0x0f4, 0x00001000);
207 	nvkm_wo32(chan->inst, 0x0f8, 0x10003080);
208 	nvkm_mo32(chan->inst, 0x218, 0x00000000, 0x00000000);
209 	nvkm_wo32(chan->inst, 0x220, lower_32_bits(nvkm_memory_bar2(chan->mthd)));
210 	nvkm_wo32(chan->inst, 0x224, upper_32_bits(nvkm_memory_bar2(chan->mthd)));
211 	nvkm_done(chan->inst);
212 
213 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->user);
214 	if (ret)
215 		return ret;
216 
217 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x1000, 0x1000, true, &chan->runl);
218 	if (ret)
219 		return ret;
220 
221 	nvkm_kmap(chan->runl);
222 	nvkm_wo32(chan->runl, 0x00, 0x80030001);
223 	nvkm_wo32(chan->runl, 0x04, 1);
224 	nvkm_wo32(chan->runl, 0x08, 0);
225 	nvkm_wo32(chan->runl, 0x0c, 0x00000000);
226 	nvkm_wo32(chan->runl, 0x10, lower_32_bits(nvkm_memory_addr(chan->user)));
227 	nvkm_wo32(chan->runl, 0x14, upper_32_bits(nvkm_memory_addr(chan->user)));
228 	nvkm_wo32(chan->runl, 0x18, lower_32_bits(nvkm_memory_addr(chan->inst)));
229 	nvkm_wo32(chan->runl, 0x1c, upper_32_bits(nvkm_memory_addr(chan->inst)));
230 	nvkm_done(chan->runl);
231 
232 	ret = nvkm_vmm_join(vmm, chan->inst);
233 	if (ret)
234 		return ret;
235 
236 	chan->vmm = nvkm_vmm_ref(vmm);
237 	return 0;
238 }
239 
240 static const struct nvkm_device_oclass
241 ga102_chan_oclass = {
242 	.ctor = ga102_chan_new,
243 };
244 
245 static int
ga102_user_new(struct nvkm_device * device,const struct nvkm_oclass * oclass,void * argv,u32 argc,struct nvkm_object ** pobject)246 ga102_user_new(struct nvkm_device *device,
247 	       const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nvkm_object **pobject)
248 {
249 	return tu102_fifo_user_new(oclass, argv, argc, pobject);
250 }
251 
252 static const struct nvkm_device_oclass
253 ga102_user_oclass = {
254 	.ctor = ga102_user_new,
255 };
256 
257 static int
ga102_fifo_sclass(struct nvkm_oclass * oclass,int index,const struct nvkm_device_oclass ** class)258 ga102_fifo_sclass(struct nvkm_oclass *oclass, int index, const struct nvkm_device_oclass **class)
259 {
260 	if (index == 0) {
261 		oclass->base = (struct nvkm_sclass) { -1, -1, VOLTA_USERMODE_A };
262 		*class = &ga102_user_oclass;
263 		return 0;
264 	} else
265 	if (index == 1) {
266 		oclass->base = (struct nvkm_sclass) { 0, 0, AMPERE_CHANNEL_GPFIFO_B };
267 		*class = &ga102_chan_oclass;
268 		return 0;
269 	}
270 
271 	return 2;
272 }
273 
274 static int
ga102_fifo_info(struct nvkm_engine * engine,u64 mthd,u64 * data)275 ga102_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
276 {
277 	switch (mthd) {
278 	case NV_DEVICE_HOST_CHANNELS: *data = 1; return 0;
279 	default:
280 		break;
281 	}
282 
283 	return -ENOSYS;
284 }
285 
286 static void *
ga102_fifo_dtor(struct nvkm_engine * engine)287 ga102_fifo_dtor(struct nvkm_engine *engine)
288 {
289 	return ga102_fifo(engine);
290 }
291 
292 static const struct nvkm_engine_func
293 ga102_fifo = {
294 	.dtor = ga102_fifo_dtor,
295 	.info = ga102_fifo_info,
296 	.base.sclass = ga102_fifo_sclass,
297 };
298 
299 int
ga102_fifo_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_fifo ** pfifo)300 ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
301 	       struct nvkm_fifo **pfifo)
302 {
303 	struct ga102_fifo *fifo;
304 
305 	if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
306 		return -ENOMEM;
307 
308 	nvkm_engine_ctor(&ga102_fifo, device, type, inst, true, &fifo->base.engine);
309 	*pfifo = &fifo->base;
310 	return 0;
311 }
312