1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6 #include <linux/slab.h>
7 #include <linux/err.h>
8 #include <linux/init.h>
9 #include <linux/list.h>
10 #include <linux/io.h>
11 #include <linux/of_address.h>
12 #include <linux/of_device.h>
13 #include <linux/of_dma.h>
14
15 #define TI_XBAR_DRA7 0
16 #define TI_XBAR_AM335X 1
17 static const u32 ti_xbar_type[] = {
18 [TI_XBAR_DRA7] = TI_XBAR_DRA7,
19 [TI_XBAR_AM335X] = TI_XBAR_AM335X,
20 };
21
22 static const struct of_device_id ti_dma_xbar_match[] = {
23 {
24 .compatible = "ti,dra7-dma-crossbar",
25 .data = &ti_xbar_type[TI_XBAR_DRA7],
26 },
27 {
28 .compatible = "ti,am335x-edma-crossbar",
29 .data = &ti_xbar_type[TI_XBAR_AM335X],
30 },
31 {},
32 };
33
34 /* Crossbar on AM335x/AM437x family */
35 #define TI_AM335X_XBAR_LINES 64
36
37 struct ti_am335x_xbar_data {
38 void __iomem *iomem;
39
40 struct dma_router dmarouter;
41
42 u32 xbar_events; /* maximum number of events to select in xbar */
43 u32 dma_requests; /* number of DMA requests on eDMA */
44 };
45
46 struct ti_am335x_xbar_map {
47 u16 dma_line;
48 u8 mux_val;
49 };
50
ti_am335x_xbar_write(void __iomem * iomem,int event,u8 val)51 static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
52 {
53 /*
54 * TPCC_EVT_MUX_60_63 register layout is different than the
55 * rest, in the sense, that event 63 is mapped to lowest byte
56 * and event 60 is mapped to highest, handle it separately.
57 */
58 if (event >= 60 && event <= 63)
59 writeb_relaxed(val, iomem + (63 - event % 4));
60 else
61 writeb_relaxed(val, iomem + event);
62 }
63
ti_am335x_xbar_free(struct device * dev,void * route_data)64 static void ti_am335x_xbar_free(struct device *dev, void *route_data)
65 {
66 struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
67 struct ti_am335x_xbar_map *map = route_data;
68
69 dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
70 map->mux_val, map->dma_line);
71
72 ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
73 kfree(map);
74 }
75
ti_am335x_xbar_route_allocate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)76 static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
77 struct of_dma *ofdma)
78 {
79 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
80 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
81 struct ti_am335x_xbar_map *map;
82
83 if (dma_spec->args_count != 3)
84 return ERR_PTR(-EINVAL);
85
86 if (dma_spec->args[2] >= xbar->xbar_events) {
87 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
88 dma_spec->args[2]);
89 return ERR_PTR(-EINVAL);
90 }
91
92 if (dma_spec->args[0] >= xbar->dma_requests) {
93 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
94 dma_spec->args[0]);
95 return ERR_PTR(-EINVAL);
96 }
97
98 /* The of_node_put() will be done in the core for the node */
99 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
100 if (!dma_spec->np) {
101 dev_err(&pdev->dev, "Can't get DMA master\n");
102 return ERR_PTR(-EINVAL);
103 }
104
105 map = kzalloc(sizeof(*map), GFP_KERNEL);
106 if (!map) {
107 of_node_put(dma_spec->np);
108 return ERR_PTR(-ENOMEM);
109 }
110
111 map->dma_line = (u16)dma_spec->args[0];
112 map->mux_val = (u8)dma_spec->args[2];
113
114 dma_spec->args[2] = 0;
115 dma_spec->args_count = 2;
116
117 dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
118 map->mux_val, map->dma_line);
119
120 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
121
122 return map;
123 }
124
125 static const struct of_device_id ti_am335x_master_match[] __maybe_unused = {
126 { .compatible = "ti,edma3-tpcc", },
127 {},
128 };
129
ti_am335x_xbar_probe(struct platform_device * pdev)130 static int ti_am335x_xbar_probe(struct platform_device *pdev)
131 {
132 struct device_node *node = pdev->dev.of_node;
133 const struct of_device_id *match;
134 struct device_node *dma_node;
135 struct ti_am335x_xbar_data *xbar;
136 void __iomem *iomem;
137 int i, ret;
138
139 if (!node)
140 return -ENODEV;
141
142 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
143 if (!xbar)
144 return -ENOMEM;
145
146 dma_node = of_parse_phandle(node, "dma-masters", 0);
147 if (!dma_node) {
148 dev_err(&pdev->dev, "Can't get DMA master node\n");
149 return -ENODEV;
150 }
151
152 match = of_match_node(ti_am335x_master_match, dma_node);
153 if (!match) {
154 dev_err(&pdev->dev, "DMA master is not supported\n");
155 of_node_put(dma_node);
156 return -EINVAL;
157 }
158
159 if (of_property_read_u32(dma_node, "dma-requests",
160 &xbar->dma_requests)) {
161 dev_info(&pdev->dev,
162 "Missing XBAR output information, using %u.\n",
163 TI_AM335X_XBAR_LINES);
164 xbar->dma_requests = TI_AM335X_XBAR_LINES;
165 }
166 of_node_put(dma_node);
167
168 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
169 dev_info(&pdev->dev,
170 "Missing XBAR input information, using %u.\n",
171 TI_AM335X_XBAR_LINES);
172 xbar->xbar_events = TI_AM335X_XBAR_LINES;
173 }
174
175 iomem = devm_platform_ioremap_resource(pdev, 0);
176 if (IS_ERR(iomem))
177 return PTR_ERR(iomem);
178
179 xbar->iomem = iomem;
180
181 xbar->dmarouter.dev = &pdev->dev;
182 xbar->dmarouter.route_free = ti_am335x_xbar_free;
183
184 platform_set_drvdata(pdev, xbar);
185
186 /* Reset the crossbar */
187 for (i = 0; i < xbar->dma_requests; i++)
188 ti_am335x_xbar_write(xbar->iomem, i, 0);
189
190 ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
191 &xbar->dmarouter);
192
193 return ret;
194 }
195
196 /* Crossbar on DRA7xx family */
197 #define TI_DRA7_XBAR_OUTPUTS 127
198 #define TI_DRA7_XBAR_INPUTS 256
199
200 struct ti_dra7_xbar_data {
201 void __iomem *iomem;
202
203 struct dma_router dmarouter;
204 struct mutex mutex;
205 unsigned long *dma_inuse;
206
207 u16 safe_val; /* Value to rest the crossbar lines */
208 u32 xbar_requests; /* number of DMA requests connected to XBAR */
209 u32 dma_requests; /* number of DMA requests forwarded to DMA */
210 u32 dma_offset;
211 };
212
213 struct ti_dra7_xbar_map {
214 u16 xbar_in;
215 int xbar_out;
216 };
217
ti_dra7_xbar_write(void __iomem * iomem,int xbar,u16 val)218 static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
219 {
220 writew_relaxed(val, iomem + (xbar * 2));
221 }
222
ti_dra7_xbar_free(struct device * dev,void * route_data)223 static void ti_dra7_xbar_free(struct device *dev, void *route_data)
224 {
225 struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
226 struct ti_dra7_xbar_map *map = route_data;
227
228 dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
229 map->xbar_in, map->xbar_out);
230
231 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
232 mutex_lock(&xbar->mutex);
233 clear_bit(map->xbar_out, xbar->dma_inuse);
234 mutex_unlock(&xbar->mutex);
235 kfree(map);
236 }
237
ti_dra7_xbar_route_allocate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)238 static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
239 struct of_dma *ofdma)
240 {
241 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
242 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
243 struct ti_dra7_xbar_map *map;
244
245 if (dma_spec->args[0] >= xbar->xbar_requests) {
246 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
247 dma_spec->args[0]);
248 put_device(&pdev->dev);
249 return ERR_PTR(-EINVAL);
250 }
251
252 /* The of_node_put() will be done in the core for the node */
253 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
254 if (!dma_spec->np) {
255 dev_err(&pdev->dev, "Can't get DMA master\n");
256 put_device(&pdev->dev);
257 return ERR_PTR(-EINVAL);
258 }
259
260 map = kzalloc(sizeof(*map), GFP_KERNEL);
261 if (!map) {
262 of_node_put(dma_spec->np);
263 put_device(&pdev->dev);
264 return ERR_PTR(-ENOMEM);
265 }
266
267 mutex_lock(&xbar->mutex);
268 map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
269 xbar->dma_requests);
270 if (map->xbar_out == xbar->dma_requests) {
271 mutex_unlock(&xbar->mutex);
272 dev_err(&pdev->dev, "Run out of free DMA requests\n");
273 kfree(map);
274 of_node_put(dma_spec->np);
275 put_device(&pdev->dev);
276 return ERR_PTR(-ENOMEM);
277 }
278 set_bit(map->xbar_out, xbar->dma_inuse);
279 mutex_unlock(&xbar->mutex);
280
281 map->xbar_in = (u16)dma_spec->args[0];
282
283 dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
284
285 dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
286 map->xbar_in, map->xbar_out);
287
288 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
289
290 return map;
291 }
292
293 #define TI_XBAR_EDMA_OFFSET 0
294 #define TI_XBAR_SDMA_OFFSET 1
295 static const u32 ti_dma_offset[] = {
296 [TI_XBAR_EDMA_OFFSET] = 0,
297 [TI_XBAR_SDMA_OFFSET] = 1,
298 };
299
300 static const struct of_device_id ti_dra7_master_match[] __maybe_unused = {
301 {
302 .compatible = "ti,omap4430-sdma",
303 .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
304 },
305 {
306 .compatible = "ti,edma3",
307 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
308 },
309 {
310 .compatible = "ti,edma3-tpcc",
311 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
312 },
313 {},
314 };
315
ti_dra7_xbar_reserve(int offset,int len,unsigned long * p)316 static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
317 {
318 for (; len > 0; len--)
319 set_bit(offset + (len - 1), p);
320 }
321
ti_dra7_xbar_probe(struct platform_device * pdev)322 static int ti_dra7_xbar_probe(struct platform_device *pdev)
323 {
324 struct device_node *node = pdev->dev.of_node;
325 const struct of_device_id *match;
326 struct device_node *dma_node;
327 struct ti_dra7_xbar_data *xbar;
328 struct property *prop;
329 u32 safe_val;
330 int sz;
331 void __iomem *iomem;
332 int i, ret;
333
334 if (!node)
335 return -ENODEV;
336
337 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
338 if (!xbar)
339 return -ENOMEM;
340
341 dma_node = of_parse_phandle(node, "dma-masters", 0);
342 if (!dma_node) {
343 dev_err(&pdev->dev, "Can't get DMA master node\n");
344 return -ENODEV;
345 }
346
347 match = of_match_node(ti_dra7_master_match, dma_node);
348 if (!match) {
349 dev_err(&pdev->dev, "DMA master is not supported\n");
350 of_node_put(dma_node);
351 return -EINVAL;
352 }
353
354 if (of_property_read_u32(dma_node, "dma-requests",
355 &xbar->dma_requests)) {
356 dev_info(&pdev->dev,
357 "Missing XBAR output information, using %u.\n",
358 TI_DRA7_XBAR_OUTPUTS);
359 xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
360 }
361 of_node_put(dma_node);
362
363 xbar->dma_inuse = devm_kcalloc(&pdev->dev,
364 BITS_TO_LONGS(xbar->dma_requests),
365 sizeof(unsigned long), GFP_KERNEL);
366 if (!xbar->dma_inuse)
367 return -ENOMEM;
368
369 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
370 dev_info(&pdev->dev,
371 "Missing XBAR input information, using %u.\n",
372 TI_DRA7_XBAR_INPUTS);
373 xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
374 }
375
376 if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
377 xbar->safe_val = (u16)safe_val;
378
379
380 prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
381 if (prop) {
382 const char pname[] = "ti,reserved-dma-request-ranges";
383 u32 (*rsv_events)[2];
384 size_t nelm = sz / sizeof(*rsv_events);
385 int i;
386
387 if (!nelm)
388 return -EINVAL;
389
390 rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
391 if (!rsv_events)
392 return -ENOMEM;
393
394 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
395 nelm * 2);
396 if (ret) {
397 kfree(rsv_events);
398 return ret;
399 }
400
401 for (i = 0; i < nelm; i++) {
402 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
403 xbar->dma_inuse);
404 }
405 kfree(rsv_events);
406 }
407
408 iomem = devm_platform_ioremap_resource(pdev, 0);
409 if (IS_ERR(iomem))
410 return PTR_ERR(iomem);
411
412 xbar->iomem = iomem;
413
414 xbar->dmarouter.dev = &pdev->dev;
415 xbar->dmarouter.route_free = ti_dra7_xbar_free;
416 xbar->dma_offset = *(u32 *)match->data;
417
418 mutex_init(&xbar->mutex);
419 platform_set_drvdata(pdev, xbar);
420
421 /* Reset the crossbar */
422 for (i = 0; i < xbar->dma_requests; i++) {
423 if (!test_bit(i, xbar->dma_inuse))
424 ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
425 }
426
427 ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
428 &xbar->dmarouter);
429 if (ret) {
430 /* Restore the defaults for the crossbar */
431 for (i = 0; i < xbar->dma_requests; i++) {
432 if (!test_bit(i, xbar->dma_inuse))
433 ti_dra7_xbar_write(xbar->iomem, i, i);
434 }
435 }
436
437 return ret;
438 }
439
ti_dma_xbar_probe(struct platform_device * pdev)440 static int ti_dma_xbar_probe(struct platform_device *pdev)
441 {
442 const struct of_device_id *match;
443 int ret;
444
445 match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
446 if (unlikely(!match))
447 return -EINVAL;
448
449 switch (*(u32 *)match->data) {
450 case TI_XBAR_DRA7:
451 ret = ti_dra7_xbar_probe(pdev);
452 break;
453 case TI_XBAR_AM335X:
454 ret = ti_am335x_xbar_probe(pdev);
455 break;
456 default:
457 dev_err(&pdev->dev, "Unsupported crossbar\n");
458 ret = -ENODEV;
459 break;
460 }
461
462 return ret;
463 }
464
465 static struct platform_driver ti_dma_xbar_driver = {
466 .driver = {
467 .name = "ti-dma-crossbar",
468 .of_match_table = ti_dma_xbar_match,
469 },
470 .probe = ti_dma_xbar_probe,
471 };
472
omap_dmaxbar_init(void)473 static int omap_dmaxbar_init(void)
474 {
475 return platform_driver_register(&ti_dma_xbar_driver);
476 }
477 arch_initcall(omap_dmaxbar_init);
478