1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "chan.h"
25
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <core/oproxy.h>
29 #include <subdev/mmu.h>
30 #include <engine/dma.h>
31
32 struct nvkm_fifo_chan_object {
33 struct nvkm_oproxy oproxy;
34 struct nvkm_fifo_chan *chan;
35 int hash;
36 };
37
38 static struct nvkm_fifo_engn *
nvkm_fifo_chan_engn(struct nvkm_fifo_chan * chan,struct nvkm_engine * engine)39 nvkm_fifo_chan_engn(struct nvkm_fifo_chan *chan, struct nvkm_engine *engine)
40 {
41 int engi = chan->fifo->func->engine_id(chan->fifo, engine);
42 if (engi >= 0)
43 return &chan->engn[engi];
44 return NULL;
45 }
46
47 static int
nvkm_fifo_chan_child_fini(struct nvkm_oproxy * base,bool suspend)48 nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
49 {
50 struct nvkm_fifo_chan_object *object =
51 container_of(base, typeof(*object), oproxy);
52 struct nvkm_engine *engine = object->oproxy.object->engine;
53 struct nvkm_fifo_chan *chan = object->chan;
54 struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
55 const char *name = engine->subdev.name;
56 int ret = 0;
57
58 if (--engn->usecount)
59 return 0;
60
61 if (chan->func->engine_fini) {
62 ret = chan->func->engine_fini(chan, engine, suspend);
63 if (ret) {
64 nvif_error(&chan->object,
65 "detach %s failed, %d\n", name, ret);
66 return ret;
67 }
68 }
69
70 if (engn->object) {
71 ret = nvkm_object_fini(engn->object, suspend);
72 if (ret && suspend)
73 return ret;
74 }
75
76 nvif_trace(&chan->object, "detached %s\n", name);
77 return ret;
78 }
79
80 static int
nvkm_fifo_chan_child_init(struct nvkm_oproxy * base)81 nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
82 {
83 struct nvkm_fifo_chan_object *object =
84 container_of(base, typeof(*object), oproxy);
85 struct nvkm_engine *engine = object->oproxy.object->engine;
86 struct nvkm_fifo_chan *chan = object->chan;
87 struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
88 const char *name = engine->subdev.name;
89 int ret;
90
91 if (engn->usecount++)
92 return 0;
93
94 if (engn->object) {
95 ret = nvkm_object_init(engn->object);
96 if (ret)
97 return ret;
98 }
99
100 if (chan->func->engine_init) {
101 ret = chan->func->engine_init(chan, engine);
102 if (ret) {
103 nvif_error(&chan->object,
104 "attach %s failed, %d\n", name, ret);
105 return ret;
106 }
107 }
108
109 nvif_trace(&chan->object, "attached %s\n", name);
110 return 0;
111 }
112
113 static void
nvkm_fifo_chan_child_del(struct nvkm_oproxy * base)114 nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
115 {
116 struct nvkm_fifo_chan_object *object =
117 container_of(base, typeof(*object), oproxy);
118 struct nvkm_engine *engine = object->oproxy.base.engine;
119 struct nvkm_fifo_chan *chan = object->chan;
120 struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
121
122 if (chan->func->object_dtor)
123 chan->func->object_dtor(chan, object->hash);
124
125 if (!--engn->refcount) {
126 if (chan->func->engine_dtor)
127 chan->func->engine_dtor(chan, engine);
128 nvkm_object_del(&engn->object);
129 if (chan->vmm)
130 atomic_dec(&chan->vmm->engref[engine->subdev.type]);
131 }
132 }
133
134 static const struct nvkm_oproxy_func
135 nvkm_fifo_chan_child_func = {
136 .dtor[0] = nvkm_fifo_chan_child_del,
137 .init[0] = nvkm_fifo_chan_child_init,
138 .fini[0] = nvkm_fifo_chan_child_fini,
139 };
140
141 static int
nvkm_fifo_chan_child_new(const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)142 nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
143 struct nvkm_object **pobject)
144 {
145 struct nvkm_engine *engine = oclass->engine;
146 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
147 struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
148 struct nvkm_fifo_chan_object *object;
149 int ret = 0;
150
151 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
152 return -ENOMEM;
153 nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
154 object->chan = chan;
155 *pobject = &object->oproxy.base;
156
157 if (!engn->refcount++) {
158 struct nvkm_oclass cclass = {
159 .client = oclass->client,
160 .engine = oclass->engine,
161 };
162
163 if (chan->vmm)
164 atomic_inc(&chan->vmm->engref[engine->subdev.type]);
165
166 if (engine->func->fifo.cclass) {
167 ret = engine->func->fifo.cclass(chan, &cclass,
168 &engn->object);
169 } else
170 if (engine->func->cclass) {
171 ret = nvkm_object_new_(engine->func->cclass, &cclass,
172 NULL, 0, &engn->object);
173 }
174 if (ret)
175 return ret;
176
177 if (chan->func->engine_ctor) {
178 ret = chan->func->engine_ctor(chan, oclass->engine,
179 engn->object);
180 if (ret)
181 return ret;
182 }
183 }
184
185 ret = oclass->base.ctor(&(const struct nvkm_oclass) {
186 .base = oclass->base,
187 .engn = oclass->engn,
188 .handle = oclass->handle,
189 .object = oclass->object,
190 .client = oclass->client,
191 .parent = engn->object ?
192 engn->object :
193 oclass->parent,
194 .engine = engine,
195 }, data, size, &object->oproxy.object);
196 if (ret)
197 return ret;
198
199 if (chan->func->object_ctor) {
200 object->hash =
201 chan->func->object_ctor(chan, object->oproxy.object);
202 if (object->hash < 0)
203 return object->hash;
204 }
205
206 return 0;
207 }
208
209 static int
nvkm_fifo_chan_child_get(struct nvkm_object * object,int index,struct nvkm_oclass * oclass)210 nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
211 struct nvkm_oclass *oclass)
212 {
213 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
214 struct nvkm_fifo *fifo = chan->fifo;
215 struct nvkm_engine *engine;
216 u32 engm = chan->engm;
217 int engi, ret, c;
218
219 for (; c = 0, engi = __ffs(engm), engm; engm &= ~(1ULL << engi)) {
220 if (!(engine = fifo->func->id_engine(fifo, engi)))
221 continue;
222 oclass->engine = engine;
223 oclass->base.oclass = 0;
224
225 if (engine->func->fifo.sclass) {
226 ret = engine->func->fifo.sclass(oclass, index);
227 if (oclass->base.oclass) {
228 if (!oclass->base.ctor)
229 oclass->base.ctor = nvkm_object_new;
230 oclass->ctor = nvkm_fifo_chan_child_new;
231 return 0;
232 }
233
234 index -= ret;
235 continue;
236 }
237
238 while (engine->func->sclass[c].oclass) {
239 if (c++ == index) {
240 oclass->base = engine->func->sclass[index];
241 if (!oclass->base.ctor)
242 oclass->base.ctor = nvkm_object_new;
243 oclass->ctor = nvkm_fifo_chan_child_new;
244 return 0;
245 }
246 }
247 index -= c;
248 }
249
250 return -EINVAL;
251 }
252
253 static int
nvkm_fifo_chan_ntfy(struct nvkm_object * object,u32 type,struct nvkm_event ** pevent)254 nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
255 struct nvkm_event **pevent)
256 {
257 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
258 if (chan->func->ntfy)
259 return chan->func->ntfy(chan, type, pevent);
260 return -ENODEV;
261 }
262
263 static int
nvkm_fifo_chan_map(struct nvkm_object * object,void * argv,u32 argc,enum nvkm_object_map * type,u64 * addr,u64 * size)264 nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
265 enum nvkm_object_map *type, u64 *addr, u64 *size)
266 {
267 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
268 *type = NVKM_OBJECT_MAP_IO;
269 *addr = chan->addr;
270 *size = chan->size;
271 return 0;
272 }
273
274 static int
nvkm_fifo_chan_rd32(struct nvkm_object * object,u64 addr,u32 * data)275 nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
276 {
277 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
278 if (unlikely(!chan->user)) {
279 chan->user = ioremap(chan->addr, chan->size);
280 if (!chan->user)
281 return -ENOMEM;
282 }
283 if (unlikely(addr + 4 > chan->size))
284 return -EINVAL;
285 *data = ioread32_native(chan->user + addr);
286 return 0;
287 }
288
289 static int
nvkm_fifo_chan_wr32(struct nvkm_object * object,u64 addr,u32 data)290 nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
291 {
292 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
293 if (unlikely(!chan->user)) {
294 chan->user = ioremap(chan->addr, chan->size);
295 if (!chan->user)
296 return -ENOMEM;
297 }
298 if (unlikely(addr + 4 > chan->size))
299 return -EINVAL;
300 iowrite32_native(data, chan->user + addr);
301 return 0;
302 }
303
304 static int
nvkm_fifo_chan_fini(struct nvkm_object * object,bool suspend)305 nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
306 {
307 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
308 chan->func->fini(chan);
309 return 0;
310 }
311
312 static int
nvkm_fifo_chan_init(struct nvkm_object * object)313 nvkm_fifo_chan_init(struct nvkm_object *object)
314 {
315 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
316 chan->func->init(chan);
317 return 0;
318 }
319
320 static void *
nvkm_fifo_chan_dtor(struct nvkm_object * object)321 nvkm_fifo_chan_dtor(struct nvkm_object *object)
322 {
323 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
324 struct nvkm_fifo *fifo = chan->fifo;
325 void *data = chan->func->dtor(chan);
326 unsigned long flags;
327
328 spin_lock_irqsave(&fifo->lock, flags);
329 if (!list_empty(&chan->head)) {
330 __clear_bit(chan->chid, fifo->mask);
331 list_del(&chan->head);
332 }
333 spin_unlock_irqrestore(&fifo->lock, flags);
334
335 if (chan->user)
336 iounmap(chan->user);
337
338 if (chan->vmm) {
339 nvkm_vmm_part(chan->vmm, chan->inst->memory);
340 nvkm_vmm_unref(&chan->vmm);
341 }
342
343 nvkm_gpuobj_del(&chan->push);
344 nvkm_gpuobj_del(&chan->inst);
345 return data;
346 }
347
348 static const struct nvkm_object_func
349 nvkm_fifo_chan_func = {
350 .dtor = nvkm_fifo_chan_dtor,
351 .init = nvkm_fifo_chan_init,
352 .fini = nvkm_fifo_chan_fini,
353 .ntfy = nvkm_fifo_chan_ntfy,
354 .map = nvkm_fifo_chan_map,
355 .rd32 = nvkm_fifo_chan_rd32,
356 .wr32 = nvkm_fifo_chan_wr32,
357 .sclass = nvkm_fifo_chan_child_get,
358 };
359
360 int
nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func * func,struct nvkm_fifo * fifo,u32 size,u32 align,bool zero,u64 hvmm,u64 push,u32 engm,int bar,u32 base,u32 user,const struct nvkm_oclass * oclass,struct nvkm_fifo_chan * chan)361 nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
362 struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
363 u64 hvmm, u64 push, u32 engm, int bar, u32 base,
364 u32 user, const struct nvkm_oclass *oclass,
365 struct nvkm_fifo_chan *chan)
366 {
367 struct nvkm_client *client = oclass->client;
368 struct nvkm_device *device = fifo->engine.subdev.device;
369 struct nvkm_dmaobj *dmaobj;
370 unsigned long flags;
371 int ret;
372
373 nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
374 chan->func = func;
375 chan->fifo = fifo;
376 chan->engm = engm;
377 INIT_LIST_HEAD(&chan->head);
378
379 /* instance memory */
380 ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
381 if (ret)
382 return ret;
383
384 /* allocate push buffer ctxdma instance */
385 if (push) {
386 dmaobj = nvkm_dmaobj_search(client, push);
387 if (IS_ERR(dmaobj))
388 return PTR_ERR(dmaobj);
389
390 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
391 &chan->push);
392 if (ret)
393 return ret;
394 }
395
396 /* channel address space */
397 if (hvmm) {
398 struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
399 if (IS_ERR(vmm))
400 return PTR_ERR(vmm);
401
402 if (vmm->mmu != device->mmu)
403 return -EINVAL;
404
405 ret = nvkm_vmm_join(vmm, chan->inst->memory);
406 if (ret)
407 return ret;
408
409 chan->vmm = nvkm_vmm_ref(vmm);
410 }
411
412 /* allocate channel id */
413 spin_lock_irqsave(&fifo->lock, flags);
414 chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
415 if (chan->chid >= NVKM_FIFO_CHID_NR) {
416 spin_unlock_irqrestore(&fifo->lock, flags);
417 return -ENOSPC;
418 }
419 list_add(&chan->head, &fifo->chan);
420 __set_bit(chan->chid, fifo->mask);
421 spin_unlock_irqrestore(&fifo->lock, flags);
422
423 /* determine address of this channel's user registers */
424 chan->addr = device->func->resource_addr(device, bar) +
425 base + user * chan->chid;
426 chan->size = user;
427
428 nvkm_fifo_cevent(fifo);
429 return 0;
430 }
431