1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * NVIDIA Tegra Video decoder driver
4 *
5 * Copyright (C) 2016-2017 Dmitry Osipenko <digetx@gmail.com>
6 *
7 */
8
9 #include <linux/clk.h>
10 #include <linux/dma-buf.h>
11 #include <linux/genalloc.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/module.h>
15 #include <linux/of_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/reset.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20
21 #include <soc/tegra/common.h>
22 #include <soc/tegra/pmc.h>
23
24 #include "vde.h"
25
26 #define CREATE_TRACE_POINTS
27 #include "trace.h"
28
tegra_vde_writel(struct tegra_vde * vde,u32 value,void __iomem * base,u32 offset)29 void tegra_vde_writel(struct tegra_vde *vde, u32 value,
30 void __iomem *base, u32 offset)
31 {
32 trace_vde_writel(vde, base, offset, value);
33
34 writel_relaxed(value, base + offset);
35 }
36
tegra_vde_readl(struct tegra_vde * vde,void __iomem * base,u32 offset)37 u32 tegra_vde_readl(struct tegra_vde *vde, void __iomem *base, u32 offset)
38 {
39 u32 value = readl_relaxed(base + offset);
40
41 trace_vde_readl(vde, base, offset, value);
42
43 return value;
44 }
45
tegra_vde_set_bits(struct tegra_vde * vde,u32 mask,void __iomem * base,u32 offset)46 void tegra_vde_set_bits(struct tegra_vde *vde, u32 mask,
47 void __iomem *base, u32 offset)
48 {
49 u32 value = tegra_vde_readl(vde, base, offset);
50
51 tegra_vde_writel(vde, value | mask, base, offset);
52 }
53
tegra_vde_alloc_bo(struct tegra_vde * vde,struct tegra_vde_bo ** ret_bo,enum dma_data_direction dma_dir,size_t size)54 int tegra_vde_alloc_bo(struct tegra_vde *vde,
55 struct tegra_vde_bo **ret_bo,
56 enum dma_data_direction dma_dir,
57 size_t size)
58 {
59 struct device *dev = vde->dev;
60 struct tegra_vde_bo *bo;
61 int err;
62
63 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
64 if (!bo)
65 return -ENOMEM;
66
67 bo->vde = vde;
68 bo->size = size;
69 bo->dma_dir = dma_dir;
70 bo->dma_attrs = DMA_ATTR_WRITE_COMBINE |
71 DMA_ATTR_NO_KERNEL_MAPPING;
72
73 if (!vde->domain)
74 bo->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
75
76 bo->dma_cookie = dma_alloc_attrs(dev, bo->size, &bo->dma_handle,
77 GFP_KERNEL, bo->dma_attrs);
78 if (!bo->dma_cookie) {
79 dev_err(dev, "Failed to allocate DMA buffer of size: %zu\n",
80 bo->size);
81 err = -ENOMEM;
82 goto free_bo;
83 }
84
85 err = dma_get_sgtable_attrs(dev, &bo->sgt, bo->dma_cookie,
86 bo->dma_handle, bo->size, bo->dma_attrs);
87 if (err) {
88 dev_err(dev, "Failed to get DMA buffer SG table: %d\n", err);
89 goto free_attrs;
90 }
91
92 err = dma_map_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
93 if (err) {
94 dev_err(dev, "Failed to map DMA buffer SG table: %d\n", err);
95 goto free_table;
96 }
97
98 if (vde->domain) {
99 err = tegra_vde_iommu_map(vde, &bo->sgt, &bo->iova, bo->size);
100 if (err) {
101 dev_err(dev, "Failed to map DMA buffer IOVA: %d\n", err);
102 goto unmap_sgtable;
103 }
104
105 bo->dma_addr = iova_dma_addr(&vde->iova, bo->iova);
106 } else {
107 bo->dma_addr = sg_dma_address(bo->sgt.sgl);
108 }
109
110 *ret_bo = bo;
111
112 return 0;
113
114 unmap_sgtable:
115 dma_unmap_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
116 free_table:
117 sg_free_table(&bo->sgt);
118 free_attrs:
119 dma_free_attrs(dev, bo->size, bo->dma_cookie, bo->dma_handle,
120 bo->dma_attrs);
121 free_bo:
122 kfree(bo);
123
124 return err;
125 }
126
tegra_vde_free_bo(struct tegra_vde_bo * bo)127 void tegra_vde_free_bo(struct tegra_vde_bo *bo)
128 {
129 struct tegra_vde *vde = bo->vde;
130 struct device *dev = vde->dev;
131
132 if (vde->domain)
133 tegra_vde_iommu_unmap(vde, bo->iova);
134
135 dma_unmap_sgtable(dev, &bo->sgt, bo->dma_dir, bo->dma_attrs);
136
137 sg_free_table(&bo->sgt);
138
139 dma_free_attrs(dev, bo->size, bo->dma_cookie, bo->dma_handle,
140 bo->dma_attrs);
141 kfree(bo);
142 }
143
tegra_vde_isr(int irq,void * data)144 static irqreturn_t tegra_vde_isr(int irq, void *data)
145 {
146 struct tegra_vde *vde = data;
147
148 if (completion_done(&vde->decode_completion))
149 return IRQ_NONE;
150
151 tegra_vde_set_bits(vde, 0, vde->frameid, 0x208);
152 complete(&vde->decode_completion);
153
154 return IRQ_HANDLED;
155 }
156
tegra_vde_runtime_suspend(struct device * dev)157 static __maybe_unused int tegra_vde_runtime_suspend(struct device *dev)
158 {
159 struct tegra_vde *vde = dev_get_drvdata(dev);
160 int err;
161
162 if (!dev->pm_domain) {
163 err = tegra_powergate_power_off(TEGRA_POWERGATE_VDEC);
164 if (err) {
165 dev_err(dev, "Failed to power down HW: %d\n", err);
166 return err;
167 }
168 }
169
170 clk_disable_unprepare(vde->clk);
171 reset_control_release(vde->rst);
172 reset_control_release(vde->rst_mc);
173
174 return 0;
175 }
176
tegra_vde_runtime_resume(struct device * dev)177 static __maybe_unused int tegra_vde_runtime_resume(struct device *dev)
178 {
179 struct tegra_vde *vde = dev_get_drvdata(dev);
180 int err;
181
182 err = reset_control_acquire(vde->rst_mc);
183 if (err) {
184 dev_err(dev, "Failed to acquire mc reset: %d\n", err);
185 return err;
186 }
187
188 err = reset_control_acquire(vde->rst);
189 if (err) {
190 dev_err(dev, "Failed to acquire reset: %d\n", err);
191 goto release_mc_reset;
192 }
193
194 if (!dev->pm_domain) {
195 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_VDEC,
196 vde->clk, vde->rst);
197 if (err) {
198 dev_err(dev, "Failed to power up HW : %d\n", err);
199 goto release_reset;
200 }
201 } else {
202 /*
203 * tegra_powergate_sequence_power_up() leaves clocks enabled,
204 * while GENPD not.
205 */
206 err = clk_prepare_enable(vde->clk);
207 if (err) {
208 dev_err(dev, "Failed to enable clock: %d\n", err);
209 goto release_reset;
210 }
211 }
212
213 return 0;
214
215 release_reset:
216 reset_control_release(vde->rst);
217 release_mc_reset:
218 reset_control_release(vde->rst_mc);
219
220 return err;
221 }
222
tegra_vde_probe(struct platform_device * pdev)223 static int tegra_vde_probe(struct platform_device *pdev)
224 {
225 struct device *dev = &pdev->dev;
226 struct tegra_vde *vde;
227 int irq, err;
228
229 vde = devm_kzalloc(dev, sizeof(*vde), GFP_KERNEL);
230 if (!vde)
231 return -ENOMEM;
232
233 platform_set_drvdata(pdev, vde);
234
235 vde->soc = of_device_get_match_data(&pdev->dev);
236 vde->dev = dev;
237
238 vde->sxe = devm_platform_ioremap_resource_byname(pdev, "sxe");
239 if (IS_ERR(vde->sxe))
240 return PTR_ERR(vde->sxe);
241
242 vde->bsev = devm_platform_ioremap_resource_byname(pdev, "bsev");
243 if (IS_ERR(vde->bsev))
244 return PTR_ERR(vde->bsev);
245
246 vde->mbe = devm_platform_ioremap_resource_byname(pdev, "mbe");
247 if (IS_ERR(vde->mbe))
248 return PTR_ERR(vde->mbe);
249
250 vde->ppe = devm_platform_ioremap_resource_byname(pdev, "ppe");
251 if (IS_ERR(vde->ppe))
252 return PTR_ERR(vde->ppe);
253
254 vde->mce = devm_platform_ioremap_resource_byname(pdev, "mce");
255 if (IS_ERR(vde->mce))
256 return PTR_ERR(vde->mce);
257
258 vde->tfe = devm_platform_ioremap_resource_byname(pdev, "tfe");
259 if (IS_ERR(vde->tfe))
260 return PTR_ERR(vde->tfe);
261
262 vde->ppb = devm_platform_ioremap_resource_byname(pdev, "ppb");
263 if (IS_ERR(vde->ppb))
264 return PTR_ERR(vde->ppb);
265
266 vde->vdma = devm_platform_ioremap_resource_byname(pdev, "vdma");
267 if (IS_ERR(vde->vdma))
268 return PTR_ERR(vde->vdma);
269
270 vde->frameid = devm_platform_ioremap_resource_byname(pdev, "frameid");
271 if (IS_ERR(vde->frameid))
272 return PTR_ERR(vde->frameid);
273
274 vde->clk = devm_clk_get(dev, NULL);
275 if (IS_ERR(vde->clk)) {
276 err = PTR_ERR(vde->clk);
277 dev_err(dev, "Could not get VDE clk %d\n", err);
278 return err;
279 }
280
281 vde->rst = devm_reset_control_get_exclusive_released(dev, NULL);
282 if (IS_ERR(vde->rst)) {
283 err = PTR_ERR(vde->rst);
284 dev_err(dev, "Could not get VDE reset %d\n", err);
285 return err;
286 }
287
288 vde->rst_mc = devm_reset_control_get_optional_exclusive_released(dev, "mc");
289 if (IS_ERR(vde->rst_mc)) {
290 err = PTR_ERR(vde->rst_mc);
291 dev_err(dev, "Could not get MC reset %d\n", err);
292 return err;
293 }
294
295 irq = platform_get_irq_byname(pdev, "sync-token");
296 if (irq < 0)
297 return irq;
298
299 err = devm_request_irq(dev, irq, tegra_vde_isr, 0,
300 dev_name(dev), vde);
301 if (err) {
302 dev_err(dev, "Could not request IRQ %d\n", err);
303 return err;
304 }
305
306 err = devm_tegra_core_dev_init_opp_table_common(dev);
307 if (err) {
308 dev_err(dev, "Could initialize OPP table %d\n", err);
309 return err;
310 }
311
312 vde->iram_pool = of_gen_pool_get(dev->of_node, "iram", 0);
313 if (!vde->iram_pool) {
314 dev_err(dev, "Could not get IRAM pool\n");
315 return -EPROBE_DEFER;
316 }
317
318 vde->iram = gen_pool_dma_alloc(vde->iram_pool,
319 gen_pool_size(vde->iram_pool),
320 &vde->iram_lists_addr);
321 if (!vde->iram) {
322 dev_err(dev, "Could not reserve IRAM\n");
323 return -ENOMEM;
324 }
325
326 INIT_LIST_HEAD(&vde->map_list);
327 mutex_init(&vde->map_lock);
328 mutex_init(&vde->lock);
329 init_completion(&vde->decode_completion);
330
331 err = tegra_vde_iommu_init(vde);
332 if (err) {
333 dev_err(dev, "Failed to initialize IOMMU: %d\n", err);
334 goto err_gen_free;
335 }
336
337 pm_runtime_enable(dev);
338 pm_runtime_use_autosuspend(dev);
339 pm_runtime_set_autosuspend_delay(dev, 300);
340
341 /*
342 * VDE partition may be left ON after bootloader, hence let's
343 * power-cycle it in order to put hardware into a predictable lower
344 * power state.
345 */
346 err = pm_runtime_resume_and_get(dev);
347 if (err)
348 goto err_pm_runtime;
349
350 pm_runtime_put(dev);
351
352 err = tegra_vde_alloc_bo(vde, &vde->secure_bo, DMA_FROM_DEVICE, 4096);
353 if (err) {
354 dev_err(dev, "Failed to allocate secure BO: %d\n", err);
355 goto err_pm_runtime;
356 }
357
358 err = tegra_vde_v4l2_init(vde);
359 if (err) {
360 dev_err(dev, "Failed to initialize V4L2: %d\n", err);
361 goto err_free_secure_bo;
362 }
363
364 return 0;
365
366 err_free_secure_bo:
367 tegra_vde_free_bo(vde->secure_bo);
368 err_pm_runtime:
369 pm_runtime_dont_use_autosuspend(dev);
370 pm_runtime_disable(dev);
371
372 tegra_vde_iommu_deinit(vde);
373
374 err_gen_free:
375 gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
376 gen_pool_size(vde->iram_pool));
377
378 return err;
379 }
380
tegra_vde_remove(struct platform_device * pdev)381 static int tegra_vde_remove(struct platform_device *pdev)
382 {
383 struct tegra_vde *vde = platform_get_drvdata(pdev);
384 struct device *dev = &pdev->dev;
385
386 tegra_vde_v4l2_deinit(vde);
387 tegra_vde_free_bo(vde->secure_bo);
388
389 /*
390 * As it increments RPM usage_count even on errors, we don't need to
391 * check the returned code here.
392 */
393 pm_runtime_get_sync(dev);
394
395 pm_runtime_dont_use_autosuspend(dev);
396 pm_runtime_disable(dev);
397
398 /*
399 * Balance RPM state, the VDE power domain is left ON and hardware
400 * is clock-gated. It's safe to reboot machine now.
401 */
402 pm_runtime_put_noidle(dev);
403 clk_disable_unprepare(vde->clk);
404
405 tegra_vde_dmabuf_cache_unmap_all(vde);
406 tegra_vde_iommu_deinit(vde);
407
408 gen_pool_free(vde->iram_pool, (unsigned long)vde->iram,
409 gen_pool_size(vde->iram_pool));
410
411 return 0;
412 }
413
tegra_vde_shutdown(struct platform_device * pdev)414 static void tegra_vde_shutdown(struct platform_device *pdev)
415 {
416 /*
417 * On some devices bootloader isn't ready to a power-gated VDE on
418 * a warm-reboot, machine will hang in that case.
419 */
420 pm_runtime_get_sync(&pdev->dev);
421 }
422
tegra_vde_pm_suspend(struct device * dev)423 static __maybe_unused int tegra_vde_pm_suspend(struct device *dev)
424 {
425 struct tegra_vde *vde = dev_get_drvdata(dev);
426 int err;
427
428 mutex_lock(&vde->lock);
429
430 err = pm_runtime_force_suspend(dev);
431 if (err < 0)
432 return err;
433
434 return 0;
435 }
436
tegra_vde_pm_resume(struct device * dev)437 static __maybe_unused int tegra_vde_pm_resume(struct device *dev)
438 {
439 struct tegra_vde *vde = dev_get_drvdata(dev);
440 int err;
441
442 err = pm_runtime_force_resume(dev);
443 if (err < 0)
444 return err;
445
446 mutex_unlock(&vde->lock);
447
448 return 0;
449 }
450
451 static const struct dev_pm_ops tegra_vde_pm_ops = {
452 SET_RUNTIME_PM_OPS(tegra_vde_runtime_suspend,
453 tegra_vde_runtime_resume,
454 NULL)
455 SET_SYSTEM_SLEEP_PM_OPS(tegra_vde_pm_suspend,
456 tegra_vde_pm_resume)
457 };
458
459 static const u32 tegra124_decoded_fmts[] = {
460 /* TBD: T124 supports only a non-standard Tegra tiled format */
461 };
462
463 static const struct tegra_coded_fmt_desc tegra124_coded_fmts[] = {
464 {
465 .fourcc = V4L2_PIX_FMT_H264_SLICE,
466 .frmsize = {
467 .min_width = 16,
468 .max_width = 1920,
469 .step_width = 16,
470 .min_height = 16,
471 .max_height = 2032,
472 .step_height = 16,
473 },
474 .num_decoded_fmts = ARRAY_SIZE(tegra124_decoded_fmts),
475 .decoded_fmts = tegra124_decoded_fmts,
476 .decode_run = tegra_vde_h264_decode_run,
477 .decode_wait = tegra_vde_h264_decode_wait,
478 },
479 };
480
481 static const u32 tegra20_decoded_fmts[] = {
482 V4L2_PIX_FMT_YUV420M,
483 V4L2_PIX_FMT_YVU420M,
484 };
485
486 static const struct tegra_coded_fmt_desc tegra20_coded_fmts[] = {
487 {
488 .fourcc = V4L2_PIX_FMT_H264_SLICE,
489 .frmsize = {
490 .min_width = 16,
491 .max_width = 1920,
492 .step_width = 16,
493 .min_height = 16,
494 .max_height = 2032,
495 .step_height = 16,
496 },
497 .num_decoded_fmts = ARRAY_SIZE(tegra20_decoded_fmts),
498 .decoded_fmts = tegra20_decoded_fmts,
499 .decode_run = tegra_vde_h264_decode_run,
500 .decode_wait = tegra_vde_h264_decode_wait,
501 },
502 };
503
504 static const struct tegra_vde_soc tegra124_vde_soc = {
505 .supports_ref_pic_marking = true,
506 .coded_fmts = tegra124_coded_fmts,
507 .num_coded_fmts = ARRAY_SIZE(tegra124_coded_fmts),
508 };
509
510 static const struct tegra_vde_soc tegra114_vde_soc = {
511 .supports_ref_pic_marking = true,
512 .coded_fmts = tegra20_coded_fmts,
513 .num_coded_fmts = ARRAY_SIZE(tegra20_coded_fmts),
514 };
515
516 static const struct tegra_vde_soc tegra30_vde_soc = {
517 .supports_ref_pic_marking = false,
518 .coded_fmts = tegra20_coded_fmts,
519 .num_coded_fmts = ARRAY_SIZE(tegra20_coded_fmts),
520 };
521
522 static const struct tegra_vde_soc tegra20_vde_soc = {
523 .supports_ref_pic_marking = false,
524 .coded_fmts = tegra20_coded_fmts,
525 .num_coded_fmts = ARRAY_SIZE(tegra20_coded_fmts),
526 };
527
528 static const struct of_device_id tegra_vde_of_match[] = {
529 { .compatible = "nvidia,tegra124-vde", .data = &tegra124_vde_soc },
530 { .compatible = "nvidia,tegra114-vde", .data = &tegra114_vde_soc },
531 { .compatible = "nvidia,tegra30-vde", .data = &tegra30_vde_soc },
532 { .compatible = "nvidia,tegra20-vde", .data = &tegra20_vde_soc },
533 { },
534 };
535 MODULE_DEVICE_TABLE(of, tegra_vde_of_match);
536
537 static struct platform_driver tegra_vde_driver = {
538 .probe = tegra_vde_probe,
539 .remove = tegra_vde_remove,
540 .shutdown = tegra_vde_shutdown,
541 .driver = {
542 .name = "tegra-vde",
543 .of_match_table = tegra_vde_of_match,
544 .pm = &tegra_vde_pm_ops,
545 },
546 };
547 module_platform_driver(tegra_vde_driver);
548
549 MODULE_DESCRIPTION("NVIDIA Tegra Video Decoder driver");
550 MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
551 MODULE_LICENSE("GPL");
552