1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "chan.h"
25 #include "chid.h"
26 #include "cgrp.h"
27 #include "chid.h"
28 #include "runl.h"
29 #include "priv.h"
30
31 #include <core/ramht.h>
32 #include <subdev/mmu.h>
33 #include <engine/dma.h>
34
35 #include <nvif/if0020.h>
36
37 const struct nvkm_event_func
38 nvkm_chan_event = {
39 };
40
41 void
nvkm_chan_cctx_bind(struct nvkm_chan * chan,struct nvkm_engn * engn,struct nvkm_cctx * cctx)42 nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx *cctx)
43 {
44 struct nvkm_cgrp *cgrp = chan->cgrp;
45 struct nvkm_runl *runl = cgrp->runl;
46 struct nvkm_engine *engine = engn->engine;
47
48 if (!engn->func->bind)
49 return;
50
51 CHAN_TRACE(chan, "%sbind cctx %d[%s]", cctx ? "" : "un", engn->id, engine->subdev.name);
52
53 /* Prevent any channel in channel group from being rescheduled, kick them
54 * off host and any engine(s) they're loaded on.
55 */
56 if (cgrp->hw)
57 nvkm_runl_block(runl);
58 else
59 nvkm_chan_block(chan);
60 nvkm_chan_preempt(chan, true);
61
62 /* Update context pointer. */
63 engn->func->bind(engn, cctx, chan);
64
65 /* Resume normal operation. */
66 if (cgrp->hw)
67 nvkm_runl_allow(runl);
68 else
69 nvkm_chan_allow(chan);
70 }
71
72 void
nvkm_chan_cctx_put(struct nvkm_chan * chan,struct nvkm_cctx ** pcctx)73 nvkm_chan_cctx_put(struct nvkm_chan *chan, struct nvkm_cctx **pcctx)
74 {
75 struct nvkm_cctx *cctx = *pcctx;
76
77 if (cctx) {
78 struct nvkm_engn *engn = cctx->vctx->ectx->engn;
79
80 if (refcount_dec_and_mutex_lock(&cctx->refs, &chan->cgrp->mutex)) {
81 CHAN_TRACE(chan, "dtor cctx %d[%s]", engn->id, engn->engine->subdev.name);
82 nvkm_cgrp_vctx_put(chan->cgrp, &cctx->vctx);
83 list_del(&cctx->head);
84 kfree(cctx);
85 mutex_unlock(&chan->cgrp->mutex);
86 }
87
88 *pcctx = NULL;
89 }
90 }
91
92 int
nvkm_chan_cctx_get(struct nvkm_chan * chan,struct nvkm_engn * engn,struct nvkm_cctx ** pcctx,struct nvkm_client * client)93 nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_cctx **pcctx,
94 struct nvkm_client *client)
95 {
96 struct nvkm_cgrp *cgrp = chan->cgrp;
97 struct nvkm_vctx *vctx;
98 struct nvkm_cctx *cctx;
99 int ret;
100
101 /* Look for an existing channel context for this engine+VEID. */
102 mutex_lock(&cgrp->mutex);
103 cctx = nvkm_list_find(cctx, &chan->cctxs, head,
104 cctx->vctx->ectx->engn == engn && cctx->vctx->vmm == chan->vmm);
105 if (cctx) {
106 refcount_inc(&cctx->refs);
107 *pcctx = cctx;
108 mutex_unlock(&chan->cgrp->mutex);
109 return 0;
110 }
111
112 /* Nope - create a fresh one. But, sub-context first. */
113 ret = nvkm_cgrp_vctx_get(cgrp, engn, chan, &vctx, client);
114 if (ret) {
115 CHAN_ERROR(chan, "vctx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret);
116 goto done;
117 }
118
119 /* Now, create the channel context - to track engine binding. */
120 CHAN_TRACE(chan, "ctor cctx %d[%s]", engn->id, engn->engine->subdev.name);
121 if (!(cctx = *pcctx = kzalloc(sizeof(*cctx), GFP_KERNEL))) {
122 nvkm_cgrp_vctx_put(cgrp, &vctx);
123 ret = -ENOMEM;
124 goto done;
125 }
126
127 cctx->vctx = vctx;
128 refcount_set(&cctx->refs, 1);
129 refcount_set(&cctx->uses, 0);
130 list_add_tail(&cctx->head, &chan->cctxs);
131 done:
132 mutex_unlock(&cgrp->mutex);
133 return ret;
134 }
135
136 int
nvkm_chan_preempt_locked(struct nvkm_chan * chan,bool wait)137 nvkm_chan_preempt_locked(struct nvkm_chan *chan, bool wait)
138 {
139 struct nvkm_runl *runl = chan->cgrp->runl;
140
141 CHAN_TRACE(chan, "preempt");
142 chan->func->preempt(chan);
143 if (!wait)
144 return 0;
145
146 return nvkm_runl_preempt_wait(runl);
147 }
148
149 int
nvkm_chan_preempt(struct nvkm_chan * chan,bool wait)150 nvkm_chan_preempt(struct nvkm_chan *chan, bool wait)
151 {
152 int ret;
153
154 if (!chan->func->preempt)
155 return 0;
156
157 mutex_lock(&chan->cgrp->runl->mutex);
158 ret = nvkm_chan_preempt_locked(chan, wait);
159 mutex_unlock(&chan->cgrp->runl->mutex);
160 return ret;
161 }
162
163 void
nvkm_chan_remove_locked(struct nvkm_chan * chan)164 nvkm_chan_remove_locked(struct nvkm_chan *chan)
165 {
166 struct nvkm_cgrp *cgrp = chan->cgrp;
167 struct nvkm_runl *runl = cgrp->runl;
168
169 if (list_empty(&chan->head))
170 return;
171
172 CHAN_TRACE(chan, "remove");
173 if (!--cgrp->chan_nr) {
174 runl->cgrp_nr--;
175 list_del(&cgrp->head);
176 }
177 runl->chan_nr--;
178 list_del_init(&chan->head);
179 atomic_set(&runl->changed, 1);
180 }
181
182 void
nvkm_chan_remove(struct nvkm_chan * chan,bool preempt)183 nvkm_chan_remove(struct nvkm_chan *chan, bool preempt)
184 {
185 struct nvkm_runl *runl = chan->cgrp->runl;
186
187 mutex_lock(&runl->mutex);
188 if (preempt && chan->func->preempt)
189 nvkm_chan_preempt_locked(chan, true);
190 nvkm_chan_remove_locked(chan);
191 nvkm_runl_update_locked(runl, true);
192 mutex_unlock(&runl->mutex);
193 }
194
195 void
nvkm_chan_insert(struct nvkm_chan * chan)196 nvkm_chan_insert(struct nvkm_chan *chan)
197 {
198 struct nvkm_cgrp *cgrp = chan->cgrp;
199 struct nvkm_runl *runl = cgrp->runl;
200
201 mutex_lock(&runl->mutex);
202 if (WARN_ON(!list_empty(&chan->head))) {
203 mutex_unlock(&runl->mutex);
204 return;
205 }
206
207 CHAN_TRACE(chan, "insert");
208 list_add_tail(&chan->head, &cgrp->chans);
209 runl->chan_nr++;
210 if (!cgrp->chan_nr++) {
211 list_add_tail(&cgrp->head, &cgrp->runl->cgrps);
212 runl->cgrp_nr++;
213 }
214 atomic_set(&runl->changed, 1);
215 nvkm_runl_update_locked(runl, true);
216 mutex_unlock(&runl->mutex);
217 }
218
219 static void
nvkm_chan_block_locked(struct nvkm_chan * chan)220 nvkm_chan_block_locked(struct nvkm_chan *chan)
221 {
222 CHAN_TRACE(chan, "block %d", atomic_read(&chan->blocked));
223 if (atomic_inc_return(&chan->blocked) == 1)
224 chan->func->stop(chan);
225 }
226
227 void
nvkm_chan_error(struct nvkm_chan * chan,bool preempt)228 nvkm_chan_error(struct nvkm_chan *chan, bool preempt)
229 {
230 unsigned long flags;
231
232 spin_lock_irqsave(&chan->lock, flags);
233 if (atomic_inc_return(&chan->errored) == 1) {
234 CHAN_ERROR(chan, "errored - disabling channel");
235 nvkm_chan_block_locked(chan);
236 if (preempt)
237 chan->func->preempt(chan);
238 nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED);
239 }
240 spin_unlock_irqrestore(&chan->lock, flags);
241 }
242
243 void
nvkm_chan_block(struct nvkm_chan * chan)244 nvkm_chan_block(struct nvkm_chan *chan)
245 {
246 spin_lock_irq(&chan->lock);
247 nvkm_chan_block_locked(chan);
248 spin_unlock_irq(&chan->lock);
249 }
250
251 void
nvkm_chan_allow(struct nvkm_chan * chan)252 nvkm_chan_allow(struct nvkm_chan *chan)
253 {
254 spin_lock_irq(&chan->lock);
255 CHAN_TRACE(chan, "allow %d", atomic_read(&chan->blocked));
256 if (atomic_dec_and_test(&chan->blocked))
257 chan->func->start(chan);
258 spin_unlock_irq(&chan->lock);
259 }
260
261 void
nvkm_chan_del(struct nvkm_chan ** pchan)262 nvkm_chan_del(struct nvkm_chan **pchan)
263 {
264 struct nvkm_chan *chan = *pchan;
265
266 if (!chan)
267 return;
268
269 if (chan->func->ramfc->clear)
270 chan->func->ramfc->clear(chan);
271
272 nvkm_ramht_del(&chan->ramht);
273 nvkm_gpuobj_del(&chan->pgd);
274 nvkm_gpuobj_del(&chan->eng);
275 nvkm_gpuobj_del(&chan->cache);
276 nvkm_gpuobj_del(&chan->ramfc);
277
278 nvkm_memory_unref(&chan->userd.mem);
279
280 if (chan->cgrp) {
281 nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
282 nvkm_cgrp_unref(&chan->cgrp);
283 }
284
285 if (chan->vmm) {
286 nvkm_vmm_part(chan->vmm, chan->inst->memory);
287 nvkm_vmm_unref(&chan->vmm);
288 }
289
290 nvkm_gpuobj_del(&chan->push);
291 nvkm_gpuobj_del(&chan->inst);
292 kfree(chan);
293 }
294
295 void
nvkm_chan_put(struct nvkm_chan ** pchan,unsigned long irqflags)296 nvkm_chan_put(struct nvkm_chan **pchan, unsigned long irqflags)
297 {
298 struct nvkm_chan *chan = *pchan;
299
300 if (!chan)
301 return;
302
303 *pchan = NULL;
304 spin_unlock_irqrestore(&chan->cgrp->lock, irqflags);
305 }
306
307 struct nvkm_chan *
nvkm_chan_get_inst(struct nvkm_engine * engine,u64 inst,unsigned long * pirqflags)308 nvkm_chan_get_inst(struct nvkm_engine *engine, u64 inst, unsigned long *pirqflags)
309 {
310 struct nvkm_fifo *fifo = engine->subdev.device->fifo;
311 struct nvkm_runl *runl;
312 struct nvkm_engn *engn;
313 struct nvkm_chan *chan;
314
315 nvkm_runl_foreach(runl, fifo) {
316 nvkm_runl_foreach_engn(engn, runl) {
317 if (engine == &fifo->engine || engn->engine == engine) {
318 chan = nvkm_runl_chan_get_inst(runl, inst, pirqflags);
319 if (chan || engn->engine == engine)
320 return chan;
321 }
322 }
323 }
324
325 return NULL;
326 }
327
328 struct nvkm_chan *
nvkm_chan_get_chid(struct nvkm_engine * engine,int id,unsigned long * pirqflags)329 nvkm_chan_get_chid(struct nvkm_engine *engine, int id, unsigned long *pirqflags)
330 {
331 struct nvkm_fifo *fifo = engine->subdev.device->fifo;
332 struct nvkm_runl *runl;
333 struct nvkm_engn *engn;
334
335 nvkm_runl_foreach(runl, fifo) {
336 nvkm_runl_foreach_engn(engn, runl) {
337 if (fifo->chid || engn->engine == engine)
338 return nvkm_runl_chan_get_chid(runl, id, pirqflags);
339 }
340 }
341
342 return NULL;
343 }
344
345 int
nvkm_chan_new_(const struct nvkm_chan_func * func,struct nvkm_runl * runl,int runq,struct nvkm_cgrp * cgrp,const char * name,bool priv,u32 devm,struct nvkm_vmm * vmm,struct nvkm_dmaobj * dmaobj,u64 offset,u64 length,struct nvkm_memory * userd,u64 ouserd,struct nvkm_chan ** pchan)346 nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int runq,
347 struct nvkm_cgrp *cgrp, const char *name, bool priv, u32 devm, struct nvkm_vmm *vmm,
348 struct nvkm_dmaobj *dmaobj, u64 offset, u64 length,
349 struct nvkm_memory *userd, u64 ouserd, struct nvkm_chan **pchan)
350 {
351 struct nvkm_fifo *fifo = runl->fifo;
352 struct nvkm_device *device = fifo->engine.subdev.device;
353 struct nvkm_chan *chan;
354 int ret;
355
356 /* Validate arguments against class requirements. */
357 if ((runq && runq >= runl->func->runqs) ||
358 (!func->inst->vmm != !vmm) ||
359 ((func->userd->bar < 0) == !userd) ||
360 (!func->ramfc->ctxdma != !dmaobj) ||
361 ((func->ramfc->devm < devm) && devm != BIT(0)) ||
362 (!func->ramfc->priv && priv)) {
363 RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
364 "push:%d:%p devm:%08x:%08x priv:%d:%d",
365 runl->func->runqs, runq, func->inst->vmm, vmm,
366 func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj,
367 func->ramfc->devm, devm, func->ramfc->priv, priv);
368 return -EINVAL;
369 }
370
371 if (!(chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL)))
372 return -ENOMEM;
373
374 chan->func = func;
375 strscpy(chan->name, name, sizeof(chan->name));
376 chan->runq = runq;
377 chan->id = -1;
378 spin_lock_init(&chan->lock);
379 atomic_set(&chan->blocked, 1);
380 atomic_set(&chan->errored, 0);
381 INIT_LIST_HEAD(&chan->cctxs);
382 INIT_LIST_HEAD(&chan->head);
383
384 /* Join channel group.
385 *
386 * GK110 and newer support channel groups (aka TSGs), where individual channels
387 * share a timeslice, and, engine context(s).
388 *
389 * As such, engine contexts are tracked in nvkm_cgrp and we need them even when
390 * channels aren't in an API channel group, and on HW that doesn't support TSGs.
391 */
392 if (!cgrp) {
393 ret = nvkm_cgrp_new(runl, chan->name, vmm, fifo->func->cgrp.force, &chan->cgrp);
394 if (ret) {
395 RUNL_DEBUG(runl, "cgrp %d", ret);
396 return ret;
397 }
398
399 cgrp = chan->cgrp;
400 } else {
401 if (cgrp->runl != runl || cgrp->vmm != vmm) {
402 RUNL_DEBUG(runl, "cgrp %d %d", cgrp->runl != runl, cgrp->vmm != vmm);
403 return -EINVAL;
404 }
405
406 chan->cgrp = nvkm_cgrp_ref(cgrp);
407 }
408
409 /* Allocate instance block. */
410 ret = nvkm_gpuobj_new(device, func->inst->size, 0x1000, func->inst->zero, NULL,
411 &chan->inst);
412 if (ret) {
413 RUNL_DEBUG(runl, "inst %d", ret);
414 return ret;
415 }
416
417 /* Initialise virtual address-space. */
418 if (func->inst->vmm) {
419 if (WARN_ON(vmm->mmu != device->mmu))
420 return -EINVAL;
421
422 ret = nvkm_vmm_join(vmm, chan->inst->memory);
423 if (ret) {
424 RUNL_DEBUG(runl, "vmm %d", ret);
425 return ret;
426 }
427
428 chan->vmm = nvkm_vmm_ref(vmm);
429 }
430
431 /* Allocate HW ctxdma for push buffer. */
432 if (func->ramfc->ctxdma) {
433 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16, &chan->push);
434 if (ret) {
435 RUNL_DEBUG(runl, "bind %d", ret);
436 return ret;
437 }
438 }
439
440 /* Allocate channel ID. */
441 chan->id = nvkm_chid_get(runl->chid, chan);
442 if (chan->id < 0) {
443 RUNL_ERROR(runl, "!chids");
444 return -ENOSPC;
445 }
446
447 if (cgrp->id < 0)
448 cgrp->id = chan->id;
449
450 /* Initialise USERD. */
451 if (func->userd->bar < 0) {
452 if (ouserd + chan->func->userd->size >= nvkm_memory_size(userd)) {
453 RUNL_DEBUG(runl, "ouserd %llx", ouserd);
454 return -EINVAL;
455 }
456
457 ret = nvkm_memory_kmap(userd, &chan->userd.mem);
458 if (ret) {
459 RUNL_DEBUG(runl, "userd %d", ret);
460 return ret;
461 }
462
463 chan->userd.base = ouserd;
464 } else {
465 chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
466 chan->userd.base = chan->id * chan->func->userd->size;
467 }
468
469 if (chan->func->userd->clear)
470 chan->func->userd->clear(chan);
471
472 /* Initialise RAMFC. */
473 ret = chan->func->ramfc->write(chan, offset, length, devm, priv);
474 if (ret) {
475 RUNL_DEBUG(runl, "ramfc %d", ret);
476 return ret;
477 }
478
479 return 0;
480 }
481