1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "chan.h"
25
26 #include <core/client.h>
27 #include <core/gpuobj.h>
28 #include <core/oproxy.h>
29 #include <subdev/mmu.h>
30 #include <engine/dma.h>
31
32 struct nvkm_fifo_chan_object {
33 struct nvkm_oproxy oproxy;
34 struct nvkm_fifo_chan *chan;
35 int hash;
36 };
37
38 static struct nvkm_fifo_engn *
nvkm_fifo_chan_engn(struct nvkm_fifo_chan * chan,struct nvkm_engine * engine)39 nvkm_fifo_chan_engn(struct nvkm_fifo_chan *chan, struct nvkm_engine *engine)
40 {
41 int engi = chan->fifo->func->engine_id(chan->fifo, engine);
42 if (engi >= 0)
43 return &chan->engn[engi];
44 return NULL;
45 }
46
47 static int
nvkm_fifo_chan_child_fini(struct nvkm_oproxy * base,bool suspend)48 nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
49 {
50 struct nvkm_fifo_chan_object *object =
51 container_of(base, typeof(*object), oproxy);
52 struct nvkm_engine *engine = object->oproxy.object->engine;
53 struct nvkm_fifo_chan *chan = object->chan;
54 struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
55 const char *name = engine->subdev.name;
56 int ret = 0;
57
58 if (--engn->usecount)
59 return 0;
60
61 if (chan->func->engine_fini) {
62 ret = chan->func->engine_fini(chan, engine, suspend);
63 if (ret) {
64 nvif_error(&chan->object,
65 "detach %s failed, %d\n", name, ret);
66 return ret;
67 }
68 }
69
70 if (engn->object) {
71 ret = nvkm_object_fini(engn->object, suspend);
72 if (ret && suspend)
73 return ret;
74 }
75
76 nvif_trace(&chan->object, "detached %s\n", name);
77 return ret;
78 }
79
80 static int
nvkm_fifo_chan_child_init(struct nvkm_oproxy * base)81 nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
82 {
83 struct nvkm_fifo_chan_object *object =
84 container_of(base, typeof(*object), oproxy);
85 struct nvkm_engine *engine = object->oproxy.object->engine;
86 struct nvkm_fifo_chan *chan = object->chan;
87 struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
88 const char *name = engine->subdev.name;
89 int ret;
90
91 if (engn->usecount++)
92 return 0;
93
94 if (engn->object) {
95 ret = nvkm_object_init(engn->object);
96 if (ret)
97 return ret;
98 }
99
100 if (chan->func->engine_init) {
101 ret = chan->func->engine_init(chan, engine);
102 if (ret) {
103 nvif_error(&chan->object,
104 "attach %s failed, %d\n", name, ret);
105 return ret;
106 }
107 }
108
109 nvif_trace(&chan->object, "attached %s\n", name);
110 return 0;
111 }
112
113 static void
nvkm_fifo_chan_child_del(struct nvkm_oproxy * base)114 nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
115 {
116 struct nvkm_fifo_chan_object *object =
117 container_of(base, typeof(*object), oproxy);
118 struct nvkm_engine *engine = object->oproxy.base.engine;
119 struct nvkm_fifo_chan *chan = object->chan;
120 struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
121
122 if (chan->func->object_dtor)
123 chan->func->object_dtor(chan, object->hash);
124
125 if (!--engn->refcount) {
126 if (chan->func->engine_dtor)
127 chan->func->engine_dtor(chan, engine);
128 nvkm_object_del(&engn->object);
129 if (chan->vmm)
130 atomic_dec(&chan->vmm->engref[engine->subdev.type]);
131 }
132 }
133
134 static const struct nvkm_oproxy_func
135 nvkm_fifo_chan_child_func = {
136 .dtor[0] = nvkm_fifo_chan_child_del,
137 .init[0] = nvkm_fifo_chan_child_init,
138 .fini[0] = nvkm_fifo_chan_child_fini,
139 };
140
141 static int
nvkm_fifo_chan_child_new(const struct nvkm_oclass * oclass,void * data,u32 size,struct nvkm_object ** pobject)142 nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
143 struct nvkm_object **pobject)
144 {
145 struct nvkm_engine *engine = oclass->engine;
146 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
147 struct nvkm_fifo_engn *engn = nvkm_fifo_chan_engn(chan, engine);
148 struct nvkm_fifo_chan_object *object;
149 int ret = 0;
150
151 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
152 return -ENOMEM;
153 nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
154 object->chan = chan;
155 *pobject = &object->oproxy.base;
156
157 if (!engn->refcount++) {
158 struct nvkm_oclass cclass = {
159 .client = oclass->client,
160 .engine = oclass->engine,
161 };
162
163 if (chan->vmm)
164 atomic_inc(&chan->vmm->engref[engine->subdev.type]);
165
166 if (engine->func->fifo.cclass) {
167 ret = engine->func->fifo.cclass(chan, &cclass,
168 &engn->object);
169 } else
170 if (engine->func->cclass) {
171 ret = nvkm_object_new_(engine->func->cclass, &cclass,
172 NULL, 0, &engn->object);
173 }
174 if (ret)
175 return ret;
176
177 if (chan->func->engine_ctor) {
178 ret = chan->func->engine_ctor(chan, oclass->engine,
179 engn->object);
180 if (ret)
181 return ret;
182 }
183 }
184
185 ret = oclass->base.ctor(&(const struct nvkm_oclass) {
186 .base = oclass->base,
187 .engn = oclass->engn,
188 .handle = oclass->handle,
189 .object = oclass->object,
190 .client = oclass->client,
191 .parent = engn->object ?
192 engn->object :
193 oclass->parent,
194 .engine = engine,
195 }, data, size, &object->oproxy.object);
196 if (ret)
197 return ret;
198
199 if (chan->func->object_ctor) {
200 object->hash =
201 chan->func->object_ctor(chan, object->oproxy.object);
202 if (object->hash < 0)
203 return object->hash;
204 }
205
206 return 0;
207 }
208
209 static int
nvkm_fifo_chan_child_get(struct nvkm_object * object,int index,struct nvkm_oclass * oclass)210 nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
211 struct nvkm_oclass *oclass)
212 {
213 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
214 struct nvkm_fifo *fifo = chan->fifo;
215 struct nvkm_engine *engine;
216 u32 engm = chan->engm;
217 int engi, ret, c;
218
219 for (; c = 0, engi = __ffs(engm), engm; engm &= ~(1ULL << engi)) {
220 if (!(engine = fifo->func->id_engine(fifo, engi)))
221 continue;
222 oclass->engine = engine;
223 oclass->base.oclass = 0;
224
225 if (engine->func->fifo.sclass) {
226 ret = engine->func->fifo.sclass(oclass, index);
227 if (oclass->base.oclass) {
228 if (!oclass->base.ctor)
229 oclass->base.ctor = nvkm_object_new;
230 oclass->ctor = nvkm_fifo_chan_child_new;
231 return 0;
232 }
233
234 index -= ret;
235 continue;
236 }
237
238 while (engine->func->sclass[c].oclass) {
239 if (c++ == index) {
240 oclass->base = engine->func->sclass[index];
241 if (!oclass->base.ctor)
242 oclass->base.ctor = nvkm_object_new;
243 oclass->ctor = nvkm_fifo_chan_child_new;
244 return 0;
245 }
246 }
247 index -= c;
248 }
249
250 return -EINVAL;
251 }
252
253 static int
nvkm_fifo_chan_ntfy(struct nvkm_object * object,u32 type,struct nvkm_event ** pevent)254 nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
255 struct nvkm_event **pevent)
256 {
257 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
258 if (chan->func->ntfy)
259 return chan->func->ntfy(chan, type, pevent);
260 return -ENODEV;
261 }
262
263 static int
nvkm_fifo_chan_map(struct nvkm_object * object,void * argv,u32 argc,enum nvkm_object_map * type,u64 * addr,u64 * size)264 nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
265 enum nvkm_object_map *type, u64 *addr, u64 *size)
266 {
267 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
268 *type = NVKM_OBJECT_MAP_IO;
269 *addr = chan->addr;
270 *size = chan->size;
271 return 0;
272 }
273
274 static int
nvkm_fifo_chan_fini(struct nvkm_object * object,bool suspend)275 nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
276 {
277 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
278 chan->func->fini(chan);
279 return 0;
280 }
281
282 static int
nvkm_fifo_chan_init(struct nvkm_object * object)283 nvkm_fifo_chan_init(struct nvkm_object *object)
284 {
285 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
286 chan->func->init(chan);
287 return 0;
288 }
289
290 static void *
nvkm_fifo_chan_dtor(struct nvkm_object * object)291 nvkm_fifo_chan_dtor(struct nvkm_object *object)
292 {
293 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
294 struct nvkm_fifo *fifo = chan->fifo;
295 void *data = chan->func->dtor(chan);
296 unsigned long flags;
297
298 spin_lock_irqsave(&fifo->lock, flags);
299 if (!list_empty(&chan->head)) {
300 __clear_bit(chan->chid, fifo->mask);
301 list_del(&chan->head);
302 }
303 spin_unlock_irqrestore(&fifo->lock, flags);
304
305 if (chan->vmm) {
306 nvkm_vmm_part(chan->vmm, chan->inst->memory);
307 nvkm_vmm_unref(&chan->vmm);
308 }
309
310 nvkm_gpuobj_del(&chan->push);
311 nvkm_gpuobj_del(&chan->inst);
312 return data;
313 }
314
315 static const struct nvkm_object_func
316 nvkm_fifo_chan_func = {
317 .dtor = nvkm_fifo_chan_dtor,
318 .init = nvkm_fifo_chan_init,
319 .fini = nvkm_fifo_chan_fini,
320 .ntfy = nvkm_fifo_chan_ntfy,
321 .map = nvkm_fifo_chan_map,
322 .sclass = nvkm_fifo_chan_child_get,
323 };
324
325 int
nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func * func,struct nvkm_fifo * fifo,u32 size,u32 align,bool zero,u64 hvmm,u64 push,u32 engm,int bar,u32 base,u32 user,const struct nvkm_oclass * oclass,struct nvkm_fifo_chan * chan)326 nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
327 struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
328 u64 hvmm, u64 push, u32 engm, int bar, u32 base,
329 u32 user, const struct nvkm_oclass *oclass,
330 struct nvkm_fifo_chan *chan)
331 {
332 struct nvkm_client *client = oclass->client;
333 struct nvkm_device *device = fifo->engine.subdev.device;
334 struct nvkm_dmaobj *dmaobj;
335 unsigned long flags;
336 int ret;
337
338 nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
339 chan->func = func;
340 chan->fifo = fifo;
341 chan->engm = engm;
342 INIT_LIST_HEAD(&chan->head);
343
344 /* instance memory */
345 ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
346 if (ret)
347 return ret;
348
349 /* allocate push buffer ctxdma instance */
350 if (push) {
351 dmaobj = nvkm_dmaobj_search(client, push);
352 if (IS_ERR(dmaobj))
353 return PTR_ERR(dmaobj);
354
355 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
356 &chan->push);
357 if (ret)
358 return ret;
359 }
360
361 /* channel address space */
362 if (hvmm) {
363 struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
364 if (IS_ERR(vmm))
365 return PTR_ERR(vmm);
366
367 if (vmm->mmu != device->mmu)
368 return -EINVAL;
369
370 ret = nvkm_vmm_join(vmm, chan->inst->memory);
371 if (ret)
372 return ret;
373
374 chan->vmm = nvkm_vmm_ref(vmm);
375 }
376
377 /* allocate channel id */
378 spin_lock_irqsave(&fifo->lock, flags);
379 chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
380 if (chan->chid >= NVKM_FIFO_CHID_NR) {
381 spin_unlock_irqrestore(&fifo->lock, flags);
382 return -ENOSPC;
383 }
384 list_add(&chan->head, &fifo->chan);
385 __set_bit(chan->chid, fifo->mask);
386 spin_unlock_irqrestore(&fifo->lock, flags);
387
388 /* determine address of this channel's user registers */
389 chan->addr = device->func->resource_addr(device, bar) +
390 base + user * chan->chid;
391 chan->size = user;
392 return 0;
393 }
394