1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "changk104.h"
23 #include "cgrp.h"
24 
25 #include <core/client.h>
26 #include <core/gpuobj.h>
27 
28 #include <nvif/clc36f.h>
29 #include <nvif/unpack.h>
30 
31 static u32
gv100_fifo_gpfifo_submit_token(struct nvkm_fifo_chan * chan)32 gv100_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *chan)
33 {
34 	return chan->chid;
35 }
36 
37 static int
gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan * chan,bool ce,bool valid)38 gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
39 {
40 	struct nvkm_subdev *subdev = &chan->base.fifo->engine.subdev;
41 	struct nvkm_device *device = subdev->device;
42 	const u32 mask = ce ? 0x00020000 : 0x00010000;
43 	const u32 data = valid ? mask : 0x00000000;
44 	int ret;
45 
46 	/* Block runlist to prevent the channel from being rescheduled. */
47 	mutex_lock(&chan->fifo->base.mutex);
48 	nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
49 
50 	/* Preempt the channel. */
51 	ret = gk104_fifo_gpfifo_kick_locked(chan);
52 	if (ret == 0) {
53 		/* Update engine context validity. */
54 		nvkm_kmap(chan->base.inst);
55 		nvkm_mo32(chan->base.inst, 0x0ac, mask, data);
56 		nvkm_done(chan->base.inst);
57 	}
58 
59 	/* Resume runlist. */
60 	nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
61 	mutex_unlock(&chan->fifo->base.mutex);
62 	return ret;
63 }
64 
65 int
gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan * base,struct nvkm_engine * engine,bool suspend)66 gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
67 			      struct nvkm_engine *engine, bool suspend)
68 {
69 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
70 	struct nvkm_gpuobj *inst = chan->base.inst;
71 	int ret;
72 
73 	if (engine->subdev.type == NVKM_ENGINE_CE)
74 		return gk104_fifo_gpfifo_kick(chan);
75 
76 	ret = gv100_fifo_gpfifo_engine_valid(chan, false, false);
77 	if (ret && suspend)
78 		return ret;
79 
80 	nvkm_kmap(inst);
81 	nvkm_wo32(inst, 0x0210, 0x00000000);
82 	nvkm_wo32(inst, 0x0214, 0x00000000);
83 	nvkm_done(inst);
84 	return ret;
85 }
86 
87 int
gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan * base,struct nvkm_engine * engine)88 gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
89 			      struct nvkm_engine *engine)
90 {
91 	struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
92 	struct gk104_fifo_engn *engn = gk104_fifo_gpfifo_engine(chan, engine);
93 	struct nvkm_gpuobj *inst = chan->base.inst;
94 
95 	if (engine->subdev.type == NVKM_ENGINE_CE)
96 		return 0;
97 
98 	nvkm_kmap(inst);
99 	nvkm_wo32(inst, 0x210, lower_32_bits(engn->vma->addr) | 0x00000004);
100 	nvkm_wo32(inst, 0x214, upper_32_bits(engn->vma->addr));
101 	nvkm_done(inst);
102 
103 	return gv100_fifo_gpfifo_engine_valid(chan, false, true);
104 }
105 
106 static const struct nvkm_fifo_chan_func
107 gv100_fifo_gpfifo = {
108 	.dtor = gk104_fifo_gpfifo_dtor,
109 	.init = gk104_fifo_gpfifo_init,
110 	.fini = gk104_fifo_gpfifo_fini,
111 	.ntfy = gf100_fifo_chan_ntfy,
112 	.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
113 	.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
114 	.engine_init = gv100_fifo_gpfifo_engine_init,
115 	.engine_fini = gv100_fifo_gpfifo_engine_fini,
116 	.submit_token = gv100_fifo_gpfifo_submit_token,
117 };
118 
119 int
gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func * func,struct gk104_fifo * fifo,u64 * runlists,u16 * chid,u64 vmm,u64 ioffset,u64 ilength,u64 * inst,bool priv,u32 * token,const struct nvkm_oclass * oclass,struct nvkm_object ** pobject)120 gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
121 		       struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
122 		       u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
123 		       u32 *token, const struct nvkm_oclass *oclass,
124 		       struct nvkm_object **pobject)
125 {
126 	struct nvkm_device *device = fifo->base.engine.subdev.device;
127 	struct gk104_fifo_chan *chan;
128 	int runlist = ffs(*runlists) -1, ret, i;
129 	u64 usermem, mthd;
130 	u32 size;
131 
132 	if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
133 		return -EINVAL;
134 	*runlists = BIT_ULL(runlist);
135 
136 	/* Allocate the channel. */
137 	if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
138 		return -ENOMEM;
139 	*pobject = &chan->base.object;
140 	chan->fifo = fifo;
141 	chan->runl = runlist;
142 	INIT_LIST_HEAD(&chan->head);
143 
144 	ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm,
145 				  0, fifo->runlist[runlist].engm, 1, fifo->user.bar->addr, 0x200,
146 				  oclass, &chan->base);
147 	if (ret)
148 		return ret;
149 
150 	*chid = chan->base.chid;
151 	*inst = chan->base.inst->addr;
152 	*token = chan->base.func->submit_token(&chan->base);
153 
154 	/* Hack to support GPUs where even individual channels should be
155 	 * part of a channel group.
156 	 */
157 	if (fifo->func->cgrp_force) {
158 		if (!(chan->cgrp = kmalloc(sizeof(*chan->cgrp), GFP_KERNEL)))
159 			return -ENOMEM;
160 		chan->cgrp->id = chan->base.chid;
161 		INIT_LIST_HEAD(&chan->cgrp->head);
162 		INIT_LIST_HEAD(&chan->cgrp->chan);
163 		chan->cgrp->chan_nr = 0;
164 	}
165 
166 	/* Clear channel control registers. */
167 	usermem = chan->base.chid * 0x200;
168 	ilength = order_base_2(ilength / 8);
169 
170 	nvkm_kmap(fifo->user.mem);
171 	for (i = 0; i < 0x200; i += 4)
172 		nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
173 	nvkm_done(fifo->user.mem);
174 	usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
175 
176 	/* Allocate fault method buffer (magics come from nvgpu). */
177 	size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
178 	size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
179 	size = roundup(size, PAGE_SIZE);
180 
181 	ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
182 			      &chan->mthd);
183 	if (ret)
184 		return ret;
185 
186 	mthd = nvkm_memory_bar2(chan->mthd);
187 	if (mthd == ~0ULL)
188 		return -EFAULT;
189 
190 	/* RAMFC */
191 	nvkm_kmap(chan->base.inst);
192 	nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
193 	nvkm_wo32(chan->base.inst, 0x00c, upper_32_bits(usermem));
194 	nvkm_wo32(chan->base.inst, 0x010, 0x0000face);
195 	nvkm_wo32(chan->base.inst, 0x030, 0x7ffff902);
196 	nvkm_wo32(chan->base.inst, 0x048, lower_32_bits(ioffset));
197 	nvkm_wo32(chan->base.inst, 0x04c, upper_32_bits(ioffset) |
198 					  (ilength << 16));
199 	nvkm_wo32(chan->base.inst, 0x084, 0x20400000);
200 	nvkm_wo32(chan->base.inst, 0x094, 0x30000001);
201 	nvkm_wo32(chan->base.inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
202 	nvkm_wo32(chan->base.inst, 0x0e8, chan->base.chid);
203 	nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
204 	nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
205 	nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
206 	nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
207 	nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
208 	nvkm_done(chan->base.inst);
209 	return gv100_fifo_gpfifo_engine_valid(chan, true, true);
210 }
211 
212 int
gv100_fifo_gpfifo_new(struct gk104_fifo * fifo,const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)213 gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
214 		      void *data, u32 size, struct nvkm_object **pobject)
215 {
216 	struct nvkm_object *parent = oclass->parent;
217 	union {
218 		struct volta_channel_gpfifo_a_v0 v0;
219 	} *args = data;
220 	int ret = -ENOSYS;
221 
222 	nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
223 	if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
224 		nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
225 				   "ioffset %016llx ilength %08x "
226 				   "runlist %016llx priv %d\n",
227 			   args->v0.version, args->v0.vmm, args->v0.ioffset,
228 			   args->v0.ilength, args->v0.runlist, args->v0.priv);
229 		return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
230 					      &args->v0.runlist,
231 					      &args->v0.chid,
232 					       args->v0.vmm,
233 					       args->v0.ioffset,
234 					       args->v0.ilength,
235 					      &args->v0.inst,
236 					       args->v0.priv,
237 					      &args->v0.token,
238 					      oclass, pobject);
239 	}
240 
241 	return ret;
242 }
243