1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2021, NVIDIA Corporation.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/host1x.h>
9 #include <linux/iommu.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_device.h>
13 #include <linux/of_platform.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/reset.h>
17 
18 #include <soc/tegra/pmc.h>
19 
20 #include "drm.h"
21 #include "falcon.h"
22 #include "vic.h"
23 
24 struct nvdec_config {
25 	const char *firmware;
26 	unsigned int version;
27 	bool supports_sid;
28 };
29 
30 struct nvdec {
31 	struct falcon falcon;
32 
33 	void __iomem *regs;
34 	struct tegra_drm_client client;
35 	struct host1x_channel *channel;
36 	struct device *dev;
37 	struct clk *clk;
38 
39 	/* Platform configuration */
40 	const struct nvdec_config *config;
41 };
42 
to_nvdec(struct tegra_drm_client * client)43 static inline struct nvdec *to_nvdec(struct tegra_drm_client *client)
44 {
45 	return container_of(client, struct nvdec, client);
46 }
47 
nvdec_writel(struct nvdec * nvdec,u32 value,unsigned int offset)48 static inline void nvdec_writel(struct nvdec *nvdec, u32 value,
49 				unsigned int offset)
50 {
51 	writel(value, nvdec->regs + offset);
52 }
53 
nvdec_boot(struct nvdec * nvdec)54 static int nvdec_boot(struct nvdec *nvdec)
55 {
56 #ifdef CONFIG_IOMMU_API
57 	struct iommu_fwspec *spec = dev_iommu_fwspec_get(nvdec->dev);
58 #endif
59 	int err;
60 
61 #ifdef CONFIG_IOMMU_API
62 	if (nvdec->config->supports_sid && spec) {
63 		u32 value;
64 
65 		value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
66 		nvdec_writel(nvdec, value, VIC_TFBIF_TRANSCFG);
67 
68 		if (spec->num_ids > 0) {
69 			value = spec->ids[0] & 0xffff;
70 
71 			nvdec_writel(nvdec, value, VIC_THI_STREAMID0);
72 			nvdec_writel(nvdec, value, VIC_THI_STREAMID1);
73 		}
74 	}
75 #endif
76 
77 	err = falcon_boot(&nvdec->falcon);
78 	if (err < 0)
79 		return err;
80 
81 	err = falcon_wait_idle(&nvdec->falcon);
82 	if (err < 0) {
83 		dev_err(nvdec->dev, "falcon boot timed out\n");
84 		return err;
85 	}
86 
87 	return 0;
88 }
89 
nvdec_init(struct host1x_client * client)90 static int nvdec_init(struct host1x_client *client)
91 {
92 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
93 	struct drm_device *dev = dev_get_drvdata(client->host);
94 	struct tegra_drm *tegra = dev->dev_private;
95 	struct nvdec *nvdec = to_nvdec(drm);
96 	int err;
97 
98 	err = host1x_client_iommu_attach(client);
99 	if (err < 0 && err != -ENODEV) {
100 		dev_err(nvdec->dev, "failed to attach to domain: %d\n", err);
101 		return err;
102 	}
103 
104 	nvdec->channel = host1x_channel_request(client);
105 	if (!nvdec->channel) {
106 		err = -ENOMEM;
107 		goto detach;
108 	}
109 
110 	client->syncpts[0] = host1x_syncpt_request(client, 0);
111 	if (!client->syncpts[0]) {
112 		err = -ENOMEM;
113 		goto free_channel;
114 	}
115 
116 	pm_runtime_enable(client->dev);
117 	pm_runtime_use_autosuspend(client->dev);
118 	pm_runtime_set_autosuspend_delay(client->dev, 500);
119 
120 	err = tegra_drm_register_client(tegra, drm);
121 	if (err < 0)
122 		goto disable_rpm;
123 
124 	/*
125 	 * Inherit the DMA parameters (such as maximum segment size) from the
126 	 * parent host1x device.
127 	 */
128 	client->dev->dma_parms = client->host->dma_parms;
129 
130 	return 0;
131 
132 disable_rpm:
133 	pm_runtime_dont_use_autosuspend(client->dev);
134 	pm_runtime_force_suspend(client->dev);
135 
136 	host1x_syncpt_put(client->syncpts[0]);
137 free_channel:
138 	host1x_channel_put(nvdec->channel);
139 detach:
140 	host1x_client_iommu_detach(client);
141 
142 	return err;
143 }
144 
nvdec_exit(struct host1x_client * client)145 static int nvdec_exit(struct host1x_client *client)
146 {
147 	struct tegra_drm_client *drm = host1x_to_drm_client(client);
148 	struct drm_device *dev = dev_get_drvdata(client->host);
149 	struct tegra_drm *tegra = dev->dev_private;
150 	struct nvdec *nvdec = to_nvdec(drm);
151 	int err;
152 
153 	/* avoid a dangling pointer just in case this disappears */
154 	client->dev->dma_parms = NULL;
155 
156 	err = tegra_drm_unregister_client(tegra, drm);
157 	if (err < 0)
158 		return err;
159 
160 	pm_runtime_dont_use_autosuspend(client->dev);
161 	pm_runtime_force_suspend(client->dev);
162 
163 	host1x_syncpt_put(client->syncpts[0]);
164 	host1x_channel_put(nvdec->channel);
165 	host1x_client_iommu_detach(client);
166 
167 	nvdec->channel = NULL;
168 
169 	if (client->group) {
170 		dma_unmap_single(nvdec->dev, nvdec->falcon.firmware.phys,
171 				 nvdec->falcon.firmware.size, DMA_TO_DEVICE);
172 		tegra_drm_free(tegra, nvdec->falcon.firmware.size,
173 			       nvdec->falcon.firmware.virt,
174 			       nvdec->falcon.firmware.iova);
175 	} else {
176 		dma_free_coherent(nvdec->dev, nvdec->falcon.firmware.size,
177 				  nvdec->falcon.firmware.virt,
178 				  nvdec->falcon.firmware.iova);
179 	}
180 
181 	return 0;
182 }
183 
184 static const struct host1x_client_ops nvdec_client_ops = {
185 	.init = nvdec_init,
186 	.exit = nvdec_exit,
187 };
188 
nvdec_load_firmware(struct nvdec * nvdec)189 static int nvdec_load_firmware(struct nvdec *nvdec)
190 {
191 	struct host1x_client *client = &nvdec->client.base;
192 	struct tegra_drm *tegra = nvdec->client.drm;
193 	dma_addr_t iova;
194 	size_t size;
195 	void *virt;
196 	int err;
197 
198 	if (nvdec->falcon.firmware.virt)
199 		return 0;
200 
201 	err = falcon_read_firmware(&nvdec->falcon, nvdec->config->firmware);
202 	if (err < 0)
203 		return err;
204 
205 	size = nvdec->falcon.firmware.size;
206 
207 	if (!client->group) {
208 		virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
209 
210 		err = dma_mapping_error(nvdec->dev, iova);
211 		if (err < 0)
212 			return err;
213 	} else {
214 		virt = tegra_drm_alloc(tegra, size, &iova);
215 	}
216 
217 	nvdec->falcon.firmware.virt = virt;
218 	nvdec->falcon.firmware.iova = iova;
219 
220 	err = falcon_load_firmware(&nvdec->falcon);
221 	if (err < 0)
222 		goto cleanup;
223 
224 	/*
225 	 * In this case we have received an IOVA from the shared domain, so we
226 	 * need to make sure to get the physical address so that the DMA API
227 	 * knows what memory pages to flush the cache for.
228 	 */
229 	if (client->group) {
230 		dma_addr_t phys;
231 
232 		phys = dma_map_single(nvdec->dev, virt, size, DMA_TO_DEVICE);
233 
234 		err = dma_mapping_error(nvdec->dev, phys);
235 		if (err < 0)
236 			goto cleanup;
237 
238 		nvdec->falcon.firmware.phys = phys;
239 	}
240 
241 	return 0;
242 
243 cleanup:
244 	if (!client->group)
245 		dma_free_coherent(nvdec->dev, size, virt, iova);
246 	else
247 		tegra_drm_free(tegra, size, virt, iova);
248 
249 	return err;
250 }
251 
252 
nvdec_runtime_resume(struct device * dev)253 static __maybe_unused int nvdec_runtime_resume(struct device *dev)
254 {
255 	struct nvdec *nvdec = dev_get_drvdata(dev);
256 	int err;
257 
258 	err = clk_prepare_enable(nvdec->clk);
259 	if (err < 0)
260 		return err;
261 
262 	usleep_range(10, 20);
263 
264 	err = nvdec_load_firmware(nvdec);
265 	if (err < 0)
266 		goto disable;
267 
268 	err = nvdec_boot(nvdec);
269 	if (err < 0)
270 		goto disable;
271 
272 	return 0;
273 
274 disable:
275 	clk_disable_unprepare(nvdec->clk);
276 	return err;
277 }
278 
nvdec_runtime_suspend(struct device * dev)279 static __maybe_unused int nvdec_runtime_suspend(struct device *dev)
280 {
281 	struct nvdec *nvdec = dev_get_drvdata(dev);
282 
283 	host1x_channel_stop(nvdec->channel);
284 
285 	clk_disable_unprepare(nvdec->clk);
286 
287 	return 0;
288 }
289 
nvdec_open_channel(struct tegra_drm_client * client,struct tegra_drm_context * context)290 static int nvdec_open_channel(struct tegra_drm_client *client,
291 			    struct tegra_drm_context *context)
292 {
293 	struct nvdec *nvdec = to_nvdec(client);
294 
295 	context->channel = host1x_channel_get(nvdec->channel);
296 	if (!context->channel)
297 		return -ENOMEM;
298 
299 	return 0;
300 }
301 
nvdec_close_channel(struct tegra_drm_context * context)302 static void nvdec_close_channel(struct tegra_drm_context *context)
303 {
304 	host1x_channel_put(context->channel);
305 }
306 
307 static const struct tegra_drm_client_ops nvdec_ops = {
308 	.open_channel = nvdec_open_channel,
309 	.close_channel = nvdec_close_channel,
310 	.submit = tegra_drm_submit,
311 };
312 
313 #define NVIDIA_TEGRA_210_NVDEC_FIRMWARE "nvidia/tegra210/nvdec.bin"
314 
315 static const struct nvdec_config nvdec_t210_config = {
316 	.firmware = NVIDIA_TEGRA_210_NVDEC_FIRMWARE,
317 	.version = 0x21,
318 	.supports_sid = false,
319 };
320 
321 #define NVIDIA_TEGRA_186_NVDEC_FIRMWARE "nvidia/tegra186/nvdec.bin"
322 
323 static const struct nvdec_config nvdec_t186_config = {
324 	.firmware = NVIDIA_TEGRA_186_NVDEC_FIRMWARE,
325 	.version = 0x18,
326 	.supports_sid = true,
327 };
328 
329 #define NVIDIA_TEGRA_194_NVDEC_FIRMWARE "nvidia/tegra194/nvdec.bin"
330 
331 static const struct nvdec_config nvdec_t194_config = {
332 	.firmware = NVIDIA_TEGRA_194_NVDEC_FIRMWARE,
333 	.version = 0x19,
334 	.supports_sid = true,
335 };
336 
337 static const struct of_device_id tegra_nvdec_of_match[] = {
338 	{ .compatible = "nvidia,tegra210-nvdec", .data = &nvdec_t210_config },
339 	{ .compatible = "nvidia,tegra186-nvdec", .data = &nvdec_t186_config },
340 	{ .compatible = "nvidia,tegra194-nvdec", .data = &nvdec_t194_config },
341 	{ },
342 };
343 MODULE_DEVICE_TABLE(of, tegra_nvdec_of_match);
344 
nvdec_probe(struct platform_device * pdev)345 static int nvdec_probe(struct platform_device *pdev)
346 {
347 	struct device *dev = &pdev->dev;
348 	struct host1x_syncpt **syncpts;
349 	struct nvdec *nvdec;
350 	u32 host_class;
351 	int err;
352 
353 	/* inherit DMA mask from host1x parent */
354 	err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
355 	if (err < 0) {
356 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
357 		return err;
358 	}
359 
360 	nvdec = devm_kzalloc(dev, sizeof(*nvdec), GFP_KERNEL);
361 	if (!nvdec)
362 		return -ENOMEM;
363 
364 	nvdec->config = of_device_get_match_data(dev);
365 
366 	syncpts = devm_kzalloc(dev, sizeof(*syncpts), GFP_KERNEL);
367 	if (!syncpts)
368 		return -ENOMEM;
369 
370 	nvdec->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
371 	if (IS_ERR(nvdec->regs))
372 		return PTR_ERR(nvdec->regs);
373 
374 	nvdec->clk = devm_clk_get(dev, NULL);
375 	if (IS_ERR(nvdec->clk)) {
376 		dev_err(&pdev->dev, "failed to get clock\n");
377 		return PTR_ERR(nvdec->clk);
378 	}
379 
380 	err = clk_set_rate(nvdec->clk, ULONG_MAX);
381 	if (err < 0) {
382 		dev_err(&pdev->dev, "failed to set clock rate\n");
383 		return err;
384 	}
385 
386 	err = of_property_read_u32(dev->of_node, "nvidia,host1x-class", &host_class);
387 	if (err < 0)
388 		host_class = HOST1X_CLASS_NVDEC;
389 
390 	nvdec->falcon.dev = dev;
391 	nvdec->falcon.regs = nvdec->regs;
392 
393 	err = falcon_init(&nvdec->falcon);
394 	if (err < 0)
395 		return err;
396 
397 	platform_set_drvdata(pdev, nvdec);
398 
399 	INIT_LIST_HEAD(&nvdec->client.base.list);
400 	nvdec->client.base.ops = &nvdec_client_ops;
401 	nvdec->client.base.dev = dev;
402 	nvdec->client.base.class = host_class;
403 	nvdec->client.base.syncpts = syncpts;
404 	nvdec->client.base.num_syncpts = 1;
405 	nvdec->dev = dev;
406 
407 	INIT_LIST_HEAD(&nvdec->client.list);
408 	nvdec->client.version = nvdec->config->version;
409 	nvdec->client.ops = &nvdec_ops;
410 
411 	err = host1x_client_register(&nvdec->client.base);
412 	if (err < 0) {
413 		dev_err(dev, "failed to register host1x client: %d\n", err);
414 		goto exit_falcon;
415 	}
416 
417 	return 0;
418 
419 exit_falcon:
420 	falcon_exit(&nvdec->falcon);
421 
422 	return err;
423 }
424 
nvdec_remove(struct platform_device * pdev)425 static int nvdec_remove(struct platform_device *pdev)
426 {
427 	struct nvdec *nvdec = platform_get_drvdata(pdev);
428 	int err;
429 
430 	err = host1x_client_unregister(&nvdec->client.base);
431 	if (err < 0) {
432 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
433 			err);
434 		return err;
435 	}
436 
437 	falcon_exit(&nvdec->falcon);
438 
439 	return 0;
440 }
441 
442 static const struct dev_pm_ops nvdec_pm_ops = {
443 	SET_RUNTIME_PM_OPS(nvdec_runtime_suspend, nvdec_runtime_resume, NULL)
444 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
445 				pm_runtime_force_resume)
446 };
447 
448 struct platform_driver tegra_nvdec_driver = {
449 	.driver = {
450 		.name = "tegra-nvdec",
451 		.of_match_table = tegra_nvdec_of_match,
452 		.pm = &nvdec_pm_ops
453 	},
454 	.probe = nvdec_probe,
455 	.remove = nvdec_remove,
456 };
457 
458 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
459 MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVDEC_FIRMWARE);
460 #endif
461 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
462 MODULE_FIRMWARE(NVIDIA_TEGRA_186_NVDEC_FIRMWARE);
463 #endif
464 #if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
465 MODULE_FIRMWARE(NVIDIA_TEGRA_194_NVDEC_FIRMWARE);
466 #endif
467