1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 */
5
6 /*
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
10 * this capability.
11 *
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
14 * such as locking.
15 *
16 * LOCKING:
17 *
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
20 *
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
25 *
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
28 *
29 * See Documentation/driver-api/dmaengine for more details
30 */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/init.h>
37 #include <linux/module.h>
38 #include <linux/mm.h>
39 #include <linux/device.h>
40 #include <linux/dmaengine.h>
41 #include <linux/hardirq.h>
42 #include <linux/spinlock.h>
43 #include <linux/percpu.h>
44 #include <linux/rcupdate.h>
45 #include <linux/mutex.h>
46 #include <linux/jiffies.h>
47 #include <linux/rculist.h>
48 #include <linux/idr.h>
49 #include <linux/slab.h>
50 #include <linux/acpi.h>
51 #include <linux/acpi_dma.h>
52 #include <linux/of_dma.h>
53 #include <linux/mempool.h>
54 #include <linux/numa.h>
55
56 #include "dmaengine.h"
57
58 static DEFINE_MUTEX(dma_list_mutex);
59 static DEFINE_IDA(dma_ida);
60 static LIST_HEAD(dma_device_list);
61 static long dmaengine_ref_count;
62
63 /* --- debugfs implementation --- */
64 #ifdef CONFIG_DEBUG_FS
65 #include <linux/debugfs.h>
66
67 static struct dentry *rootdir;
68
dmaengine_debug_register(struct dma_device * dma_dev)69 static void dmaengine_debug_register(struct dma_device *dma_dev)
70 {
71 dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
72 rootdir);
73 if (IS_ERR(dma_dev->dbg_dev_root))
74 dma_dev->dbg_dev_root = NULL;
75 }
76
dmaengine_debug_unregister(struct dma_device * dma_dev)77 static void dmaengine_debug_unregister(struct dma_device *dma_dev)
78 {
79 debugfs_remove_recursive(dma_dev->dbg_dev_root);
80 dma_dev->dbg_dev_root = NULL;
81 }
82
dmaengine_dbg_summary_show(struct seq_file * s,struct dma_device * dma_dev)83 static void dmaengine_dbg_summary_show(struct seq_file *s,
84 struct dma_device *dma_dev)
85 {
86 struct dma_chan *chan;
87
88 list_for_each_entry(chan, &dma_dev->channels, device_node) {
89 if (chan->client_count) {
90 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
91 chan->dbg_client_name ?: "in-use");
92
93 if (chan->router)
94 seq_printf(s, " (via router: %s)\n",
95 dev_name(chan->router->dev));
96 else
97 seq_puts(s, "\n");
98 }
99 }
100 }
101
dmaengine_summary_show(struct seq_file * s,void * data)102 static int dmaengine_summary_show(struct seq_file *s, void *data)
103 {
104 struct dma_device *dma_dev = NULL;
105
106 mutex_lock(&dma_list_mutex);
107 list_for_each_entry(dma_dev, &dma_device_list, global_node) {
108 seq_printf(s, "dma%d (%s): number of channels: %u\n",
109 dma_dev->dev_id, dev_name(dma_dev->dev),
110 dma_dev->chancnt);
111
112 if (dma_dev->dbg_summary_show)
113 dma_dev->dbg_summary_show(s, dma_dev);
114 else
115 dmaengine_dbg_summary_show(s, dma_dev);
116
117 if (!list_is_last(&dma_dev->global_node, &dma_device_list))
118 seq_puts(s, "\n");
119 }
120 mutex_unlock(&dma_list_mutex);
121
122 return 0;
123 }
124 DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
125
dmaengine_debugfs_init(void)126 static void __init dmaengine_debugfs_init(void)
127 {
128 rootdir = debugfs_create_dir("dmaengine", NULL);
129
130 /* /sys/kernel/debug/dmaengine/summary */
131 debugfs_create_file("summary", 0444, rootdir, NULL,
132 &dmaengine_summary_fops);
133 }
134 #else
dmaengine_debugfs_init(void)135 static inline void dmaengine_debugfs_init(void) { }
dmaengine_debug_register(struct dma_device * dma_dev)136 static inline int dmaengine_debug_register(struct dma_device *dma_dev)
137 {
138 return 0;
139 }
140
dmaengine_debug_unregister(struct dma_device * dma_dev)141 static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
142 #endif /* DEBUG_FS */
143
144 /* --- sysfs implementation --- */
145
146 #define DMA_SLAVE_NAME "slave"
147
148 /**
149 * dev_to_dma_chan - convert a device pointer to its sysfs container object
150 * @dev: device node
151 *
152 * Must be called under dma_list_mutex.
153 */
dev_to_dma_chan(struct device * dev)154 static struct dma_chan *dev_to_dma_chan(struct device *dev)
155 {
156 struct dma_chan_dev *chan_dev;
157
158 chan_dev = container_of(dev, typeof(*chan_dev), device);
159 return chan_dev->chan;
160 }
161
memcpy_count_show(struct device * dev,struct device_attribute * attr,char * buf)162 static ssize_t memcpy_count_show(struct device *dev,
163 struct device_attribute *attr, char *buf)
164 {
165 struct dma_chan *chan;
166 unsigned long count = 0;
167 int i;
168 int err;
169
170 mutex_lock(&dma_list_mutex);
171 chan = dev_to_dma_chan(dev);
172 if (chan) {
173 for_each_possible_cpu(i)
174 count += per_cpu_ptr(chan->local, i)->memcpy_count;
175 err = sprintf(buf, "%lu\n", count);
176 } else
177 err = -ENODEV;
178 mutex_unlock(&dma_list_mutex);
179
180 return err;
181 }
182 static DEVICE_ATTR_RO(memcpy_count);
183
bytes_transferred_show(struct device * dev,struct device_attribute * attr,char * buf)184 static ssize_t bytes_transferred_show(struct device *dev,
185 struct device_attribute *attr, char *buf)
186 {
187 struct dma_chan *chan;
188 unsigned long count = 0;
189 int i;
190 int err;
191
192 mutex_lock(&dma_list_mutex);
193 chan = dev_to_dma_chan(dev);
194 if (chan) {
195 for_each_possible_cpu(i)
196 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
197 err = sprintf(buf, "%lu\n", count);
198 } else
199 err = -ENODEV;
200 mutex_unlock(&dma_list_mutex);
201
202 return err;
203 }
204 static DEVICE_ATTR_RO(bytes_transferred);
205
in_use_show(struct device * dev,struct device_attribute * attr,char * buf)206 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
207 char *buf)
208 {
209 struct dma_chan *chan;
210 int err;
211
212 mutex_lock(&dma_list_mutex);
213 chan = dev_to_dma_chan(dev);
214 if (chan)
215 err = sprintf(buf, "%d\n", chan->client_count);
216 else
217 err = -ENODEV;
218 mutex_unlock(&dma_list_mutex);
219
220 return err;
221 }
222 static DEVICE_ATTR_RO(in_use);
223
224 static struct attribute *dma_dev_attrs[] = {
225 &dev_attr_memcpy_count.attr,
226 &dev_attr_bytes_transferred.attr,
227 &dev_attr_in_use.attr,
228 NULL,
229 };
230 ATTRIBUTE_GROUPS(dma_dev);
231
chan_dev_release(struct device * dev)232 static void chan_dev_release(struct device *dev)
233 {
234 struct dma_chan_dev *chan_dev;
235
236 chan_dev = container_of(dev, typeof(*chan_dev), device);
237 kfree(chan_dev);
238 }
239
240 static struct class dma_devclass = {
241 .name = "dma",
242 .dev_groups = dma_dev_groups,
243 .dev_release = chan_dev_release,
244 };
245
246 /* --- client and device registration --- */
247
248 /* enable iteration over all operation types */
249 static dma_cap_mask_t dma_cap_mask_all;
250
251 /**
252 * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
253 * @chan: associated channel for this entry
254 */
255 struct dma_chan_tbl_ent {
256 struct dma_chan *chan;
257 };
258
259 /* percpu lookup table for memory-to-memory offload providers */
260 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
261
dma_channel_table_init(void)262 static int __init dma_channel_table_init(void)
263 {
264 enum dma_transaction_type cap;
265 int err = 0;
266
267 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
268
269 /* 'interrupt', 'private', and 'slave' are channel capabilities,
270 * but are not associated with an operation so they do not need
271 * an entry in the channel_table
272 */
273 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
274 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
275 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
276
277 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
278 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
279 if (!channel_table[cap]) {
280 err = -ENOMEM;
281 break;
282 }
283 }
284
285 if (err) {
286 pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
287 for_each_dma_cap_mask(cap, dma_cap_mask_all)
288 free_percpu(channel_table[cap]);
289 }
290
291 return err;
292 }
293 arch_initcall(dma_channel_table_init);
294
295 /**
296 * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
297 * @chan: DMA channel to test
298 * @cpu: CPU index which the channel should be close to
299 *
300 * Returns true if the channel is in the same NUMA-node as the CPU.
301 */
dma_chan_is_local(struct dma_chan * chan,int cpu)302 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
303 {
304 int node = dev_to_node(chan->device->dev);
305 return node == NUMA_NO_NODE ||
306 cpumask_test_cpu(cpu, cpumask_of_node(node));
307 }
308
309 /**
310 * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
311 * @cap: capability to match
312 * @cpu: CPU index which the channel should be close to
313 *
314 * If some channels are close to the given CPU, the one with the lowest
315 * reference count is returned. Otherwise, CPU is ignored and only the
316 * reference count is taken into account.
317 *
318 * Must be called under dma_list_mutex.
319 */
min_chan(enum dma_transaction_type cap,int cpu)320 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
321 {
322 struct dma_device *device;
323 struct dma_chan *chan;
324 struct dma_chan *min = NULL;
325 struct dma_chan *localmin = NULL;
326
327 list_for_each_entry(device, &dma_device_list, global_node) {
328 if (!dma_has_cap(cap, device->cap_mask) ||
329 dma_has_cap(DMA_PRIVATE, device->cap_mask))
330 continue;
331 list_for_each_entry(chan, &device->channels, device_node) {
332 if (!chan->client_count)
333 continue;
334 if (!min || chan->table_count < min->table_count)
335 min = chan;
336
337 if (dma_chan_is_local(chan, cpu))
338 if (!localmin ||
339 chan->table_count < localmin->table_count)
340 localmin = chan;
341 }
342 }
343
344 chan = localmin ? localmin : min;
345
346 if (chan)
347 chan->table_count++;
348
349 return chan;
350 }
351
352 /**
353 * dma_channel_rebalance - redistribute the available channels
354 *
355 * Optimize for CPU isolation (each CPU gets a dedicated channel for an
356 * operation type) in the SMP case, and operation isolation (avoid
357 * multi-tasking channels) in the non-SMP case.
358 *
359 * Must be called under dma_list_mutex.
360 */
dma_channel_rebalance(void)361 static void dma_channel_rebalance(void)
362 {
363 struct dma_chan *chan;
364 struct dma_device *device;
365 int cpu;
366 int cap;
367
368 /* undo the last distribution */
369 for_each_dma_cap_mask(cap, dma_cap_mask_all)
370 for_each_possible_cpu(cpu)
371 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
372
373 list_for_each_entry(device, &dma_device_list, global_node) {
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
375 continue;
376 list_for_each_entry(chan, &device->channels, device_node)
377 chan->table_count = 0;
378 }
379
380 /* don't populate the channel_table if no clients are available */
381 if (!dmaengine_ref_count)
382 return;
383
384 /* redistribute available channels */
385 for_each_dma_cap_mask(cap, dma_cap_mask_all)
386 for_each_online_cpu(cpu) {
387 chan = min_chan(cap, cpu);
388 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
389 }
390 }
391
dma_device_satisfies_mask(struct dma_device * device,const dma_cap_mask_t * want)392 static int dma_device_satisfies_mask(struct dma_device *device,
393 const dma_cap_mask_t *want)
394 {
395 dma_cap_mask_t has;
396
397 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
398 DMA_TX_TYPE_END);
399 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
400 }
401
dma_chan_to_owner(struct dma_chan * chan)402 static struct module *dma_chan_to_owner(struct dma_chan *chan)
403 {
404 return chan->device->owner;
405 }
406
407 /**
408 * balance_ref_count - catch up the channel reference count
409 * @chan: channel to balance ->client_count versus dmaengine_ref_count
410 *
411 * Must be called under dma_list_mutex.
412 */
balance_ref_count(struct dma_chan * chan)413 static void balance_ref_count(struct dma_chan *chan)
414 {
415 struct module *owner = dma_chan_to_owner(chan);
416
417 while (chan->client_count < dmaengine_ref_count) {
418 __module_get(owner);
419 chan->client_count++;
420 }
421 }
422
dma_device_release(struct kref * ref)423 static void dma_device_release(struct kref *ref)
424 {
425 struct dma_device *device = container_of(ref, struct dma_device, ref);
426
427 list_del_rcu(&device->global_node);
428 dma_channel_rebalance();
429
430 if (device->device_release)
431 device->device_release(device);
432 }
433
dma_device_put(struct dma_device * device)434 static void dma_device_put(struct dma_device *device)
435 {
436 lockdep_assert_held(&dma_list_mutex);
437 kref_put(&device->ref, dma_device_release);
438 }
439
440 /**
441 * dma_chan_get - try to grab a DMA channel's parent driver module
442 * @chan: channel to grab
443 *
444 * Must be called under dma_list_mutex.
445 */
dma_chan_get(struct dma_chan * chan)446 static int dma_chan_get(struct dma_chan *chan)
447 {
448 struct module *owner = dma_chan_to_owner(chan);
449 int ret;
450
451 /* The channel is already in use, update client count */
452 if (chan->client_count) {
453 __module_get(owner);
454 goto out;
455 }
456
457 if (!try_module_get(owner))
458 return -ENODEV;
459
460 ret = kref_get_unless_zero(&chan->device->ref);
461 if (!ret) {
462 ret = -ENODEV;
463 goto module_put_out;
464 }
465
466 /* allocate upon first client reference */
467 if (chan->device->device_alloc_chan_resources) {
468 ret = chan->device->device_alloc_chan_resources(chan);
469 if (ret < 0)
470 goto err_out;
471 }
472
473 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
474 balance_ref_count(chan);
475
476 out:
477 chan->client_count++;
478 return 0;
479
480 err_out:
481 dma_device_put(chan->device);
482 module_put_out:
483 module_put(owner);
484 return ret;
485 }
486
487 /**
488 * dma_chan_put - drop a reference to a DMA channel's parent driver module
489 * @chan: channel to release
490 *
491 * Must be called under dma_list_mutex.
492 */
dma_chan_put(struct dma_chan * chan)493 static void dma_chan_put(struct dma_chan *chan)
494 {
495 /* This channel is not in use, bail out */
496 if (!chan->client_count)
497 return;
498
499 chan->client_count--;
500
501 /* This channel is not in use anymore, free it */
502 if (!chan->client_count && chan->device->device_free_chan_resources) {
503 /* Make sure all operations have completed */
504 dmaengine_synchronize(chan);
505 chan->device->device_free_chan_resources(chan);
506 }
507
508 /* If the channel is used via a DMA request router, free the mapping */
509 if (chan->router && chan->router->route_free) {
510 chan->router->route_free(chan->router->dev, chan->route_data);
511 chan->router = NULL;
512 chan->route_data = NULL;
513 }
514
515 dma_device_put(chan->device);
516 module_put(dma_chan_to_owner(chan));
517 }
518
dma_sync_wait(struct dma_chan * chan,dma_cookie_t cookie)519 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
520 {
521 enum dma_status status;
522 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
523
524 dma_async_issue_pending(chan);
525 do {
526 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
527 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
528 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
529 return DMA_ERROR;
530 }
531 if (status != DMA_IN_PROGRESS)
532 break;
533 cpu_relax();
534 } while (1);
535
536 return status;
537 }
538 EXPORT_SYMBOL(dma_sync_wait);
539
540 /**
541 * dma_find_channel - find a channel to carry out the operation
542 * @tx_type: transaction type
543 */
dma_find_channel(enum dma_transaction_type tx_type)544 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
545 {
546 return this_cpu_read(channel_table[tx_type]->chan);
547 }
548 EXPORT_SYMBOL(dma_find_channel);
549
550 /**
551 * dma_issue_pending_all - flush all pending operations across all channels
552 */
dma_issue_pending_all(void)553 void dma_issue_pending_all(void)
554 {
555 struct dma_device *device;
556 struct dma_chan *chan;
557
558 rcu_read_lock();
559 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
560 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
561 continue;
562 list_for_each_entry(chan, &device->channels, device_node)
563 if (chan->client_count)
564 device->device_issue_pending(chan);
565 }
566 rcu_read_unlock();
567 }
568 EXPORT_SYMBOL(dma_issue_pending_all);
569
dma_get_slave_caps(struct dma_chan * chan,struct dma_slave_caps * caps)570 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
571 {
572 struct dma_device *device;
573
574 if (!chan || !caps)
575 return -EINVAL;
576
577 device = chan->device;
578
579 /* check if the channel supports slave transactions */
580 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
581 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
582 return -ENXIO;
583
584 /*
585 * Check whether it reports it uses the generic slave
586 * capabilities, if not, that means it doesn't support any
587 * kind of slave capabilities reporting.
588 */
589 if (!device->directions)
590 return -ENXIO;
591
592 caps->src_addr_widths = device->src_addr_widths;
593 caps->dst_addr_widths = device->dst_addr_widths;
594 caps->directions = device->directions;
595 caps->min_burst = device->min_burst;
596 caps->max_burst = device->max_burst;
597 caps->max_sg_burst = device->max_sg_burst;
598 caps->residue_granularity = device->residue_granularity;
599 caps->descriptor_reuse = device->descriptor_reuse;
600 caps->cmd_pause = !!device->device_pause;
601 caps->cmd_resume = !!device->device_resume;
602 caps->cmd_terminate = !!device->device_terminate_all;
603
604 /*
605 * DMA engine device might be configured with non-uniformly
606 * distributed slave capabilities per device channels. In this
607 * case the corresponding driver may provide the device_caps
608 * callback to override the generic capabilities with
609 * channel-specific ones.
610 */
611 if (device->device_caps)
612 device->device_caps(chan, caps);
613
614 return 0;
615 }
616 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
617
private_candidate(const dma_cap_mask_t * mask,struct dma_device * dev,dma_filter_fn fn,void * fn_param)618 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
619 struct dma_device *dev,
620 dma_filter_fn fn, void *fn_param)
621 {
622 struct dma_chan *chan;
623
624 if (mask && !dma_device_satisfies_mask(dev, mask)) {
625 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
626 return NULL;
627 }
628 /* devices with multiple channels need special handling as we need to
629 * ensure that all channels are either private or public.
630 */
631 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
632 list_for_each_entry(chan, &dev->channels, device_node) {
633 /* some channels are already publicly allocated */
634 if (chan->client_count)
635 return NULL;
636 }
637
638 list_for_each_entry(chan, &dev->channels, device_node) {
639 if (chan->client_count) {
640 dev_dbg(dev->dev, "%s: %s busy\n",
641 __func__, dma_chan_name(chan));
642 continue;
643 }
644 if (fn && !fn(chan, fn_param)) {
645 dev_dbg(dev->dev, "%s: %s filter said false\n",
646 __func__, dma_chan_name(chan));
647 continue;
648 }
649 return chan;
650 }
651
652 return NULL;
653 }
654
find_candidate(struct dma_device * device,const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param)655 static struct dma_chan *find_candidate(struct dma_device *device,
656 const dma_cap_mask_t *mask,
657 dma_filter_fn fn, void *fn_param)
658 {
659 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
660 int err;
661
662 if (chan) {
663 /* Found a suitable channel, try to grab, prep, and return it.
664 * We first set DMA_PRIVATE to disable balance_ref_count as this
665 * channel will not be published in the general-purpose
666 * allocator
667 */
668 dma_cap_set(DMA_PRIVATE, device->cap_mask);
669 device->privatecnt++;
670 err = dma_chan_get(chan);
671
672 if (err) {
673 if (err == -ENODEV) {
674 dev_dbg(device->dev, "%s: %s module removed\n",
675 __func__, dma_chan_name(chan));
676 list_del_rcu(&device->global_node);
677 } else
678 dev_dbg(device->dev,
679 "%s: failed to get %s: (%d)\n",
680 __func__, dma_chan_name(chan), err);
681
682 if (--device->privatecnt == 0)
683 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
684
685 chan = ERR_PTR(err);
686 }
687 }
688
689 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
690 }
691
692 /**
693 * dma_get_slave_channel - try to get specific channel exclusively
694 * @chan: target channel
695 */
dma_get_slave_channel(struct dma_chan * chan)696 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
697 {
698 /* lock against __dma_request_channel */
699 mutex_lock(&dma_list_mutex);
700
701 if (chan->client_count == 0) {
702 struct dma_device *device = chan->device;
703 int err;
704
705 dma_cap_set(DMA_PRIVATE, device->cap_mask);
706 device->privatecnt++;
707 err = dma_chan_get(chan);
708 if (err) {
709 dev_dbg(chan->device->dev,
710 "%s: failed to get %s: (%d)\n",
711 __func__, dma_chan_name(chan), err);
712 chan = NULL;
713 if (--device->privatecnt == 0)
714 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
715 }
716 } else
717 chan = NULL;
718
719 mutex_unlock(&dma_list_mutex);
720
721
722 return chan;
723 }
724 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
725
dma_get_any_slave_channel(struct dma_device * device)726 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
727 {
728 dma_cap_mask_t mask;
729 struct dma_chan *chan;
730
731 dma_cap_zero(mask);
732 dma_cap_set(DMA_SLAVE, mask);
733
734 /* lock against __dma_request_channel */
735 mutex_lock(&dma_list_mutex);
736
737 chan = find_candidate(device, &mask, NULL, NULL);
738
739 mutex_unlock(&dma_list_mutex);
740
741 return IS_ERR(chan) ? NULL : chan;
742 }
743 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
744
745 /**
746 * __dma_request_channel - try to allocate an exclusive channel
747 * @mask: capabilities that the channel must satisfy
748 * @fn: optional callback to disposition available channels
749 * @fn_param: opaque parameter to pass to dma_filter_fn()
750 * @np: device node to look for DMA channels
751 *
752 * Returns pointer to appropriate DMA channel on success or NULL.
753 */
__dma_request_channel(const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param,struct device_node * np)754 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
755 dma_filter_fn fn, void *fn_param,
756 struct device_node *np)
757 {
758 struct dma_device *device, *_d;
759 struct dma_chan *chan = NULL;
760
761 /* Find a channel */
762 mutex_lock(&dma_list_mutex);
763 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
764 /* Finds a DMA controller with matching device node */
765 if (np && device->dev->of_node && np != device->dev->of_node)
766 continue;
767
768 chan = find_candidate(device, mask, fn, fn_param);
769 if (!IS_ERR(chan))
770 break;
771
772 chan = NULL;
773 }
774 mutex_unlock(&dma_list_mutex);
775
776 pr_debug("%s: %s (%s)\n",
777 __func__,
778 chan ? "success" : "fail",
779 chan ? dma_chan_name(chan) : NULL);
780
781 return chan;
782 }
783 EXPORT_SYMBOL_GPL(__dma_request_channel);
784
dma_filter_match(struct dma_device * device,const char * name,struct device * dev)785 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
786 const char *name,
787 struct device *dev)
788 {
789 int i;
790
791 if (!device->filter.mapcnt)
792 return NULL;
793
794 for (i = 0; i < device->filter.mapcnt; i++) {
795 const struct dma_slave_map *map = &device->filter.map[i];
796
797 if (!strcmp(map->devname, dev_name(dev)) &&
798 !strcmp(map->slave, name))
799 return map;
800 }
801
802 return NULL;
803 }
804
805 /**
806 * dma_request_chan - try to allocate an exclusive slave channel
807 * @dev: pointer to client device structure
808 * @name: slave channel name
809 *
810 * Returns pointer to appropriate DMA channel on success or an error pointer.
811 */
dma_request_chan(struct device * dev,const char * name)812 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
813 {
814 struct dma_device *d, *_d;
815 struct dma_chan *chan = NULL;
816
817 /* If device-tree is present get slave info from here */
818 if (dev->of_node)
819 chan = of_dma_request_slave_channel(dev->of_node, name);
820
821 /* If device was enumerated by ACPI get slave info from here */
822 if (has_acpi_companion(dev) && !chan)
823 chan = acpi_dma_request_slave_chan_by_name(dev, name);
824
825 if (PTR_ERR(chan) == -EPROBE_DEFER)
826 return chan;
827
828 if (!IS_ERR_OR_NULL(chan))
829 goto found;
830
831 /* Try to find the channel via the DMA filter map(s) */
832 mutex_lock(&dma_list_mutex);
833 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
834 dma_cap_mask_t mask;
835 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
836
837 if (!map)
838 continue;
839
840 dma_cap_zero(mask);
841 dma_cap_set(DMA_SLAVE, mask);
842
843 chan = find_candidate(d, &mask, d->filter.fn, map->param);
844 if (!IS_ERR(chan))
845 break;
846 }
847 mutex_unlock(&dma_list_mutex);
848
849 if (IS_ERR(chan))
850 return chan;
851 if (!chan)
852 return ERR_PTR(-EPROBE_DEFER);
853
854 found:
855 #ifdef CONFIG_DEBUG_FS
856 chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
857 name);
858 #endif
859
860 chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
861 if (!chan->name)
862 return chan;
863 chan->slave = dev;
864
865 if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
866 DMA_SLAVE_NAME))
867 dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
868 if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
869 dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
870
871 return chan;
872 }
873 EXPORT_SYMBOL_GPL(dma_request_chan);
874
875 /**
876 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
877 * @mask: capabilities that the channel must satisfy
878 *
879 * Returns pointer to appropriate DMA channel on success or an error pointer.
880 */
dma_request_chan_by_mask(const dma_cap_mask_t * mask)881 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
882 {
883 struct dma_chan *chan;
884
885 if (!mask)
886 return ERR_PTR(-ENODEV);
887
888 chan = __dma_request_channel(mask, NULL, NULL, NULL);
889 if (!chan) {
890 mutex_lock(&dma_list_mutex);
891 if (list_empty(&dma_device_list))
892 chan = ERR_PTR(-EPROBE_DEFER);
893 else
894 chan = ERR_PTR(-ENODEV);
895 mutex_unlock(&dma_list_mutex);
896 }
897
898 return chan;
899 }
900 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
901
dma_release_channel(struct dma_chan * chan)902 void dma_release_channel(struct dma_chan *chan)
903 {
904 mutex_lock(&dma_list_mutex);
905 WARN_ONCE(chan->client_count != 1,
906 "chan reference count %d != 1\n", chan->client_count);
907 dma_chan_put(chan);
908 /* drop PRIVATE cap enabled by __dma_request_channel() */
909 if (--chan->device->privatecnt == 0)
910 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
911
912 if (chan->slave) {
913 sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
914 sysfs_remove_link(&chan->slave->kobj, chan->name);
915 kfree(chan->name);
916 chan->name = NULL;
917 chan->slave = NULL;
918 }
919
920 #ifdef CONFIG_DEBUG_FS
921 kfree(chan->dbg_client_name);
922 chan->dbg_client_name = NULL;
923 #endif
924 mutex_unlock(&dma_list_mutex);
925 }
926 EXPORT_SYMBOL_GPL(dma_release_channel);
927
928 /**
929 * dmaengine_get - register interest in dma_channels
930 */
dmaengine_get(void)931 void dmaengine_get(void)
932 {
933 struct dma_device *device, *_d;
934 struct dma_chan *chan;
935 int err;
936
937 mutex_lock(&dma_list_mutex);
938 dmaengine_ref_count++;
939
940 /* try to grab channels */
941 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
942 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
943 continue;
944 list_for_each_entry(chan, &device->channels, device_node) {
945 err = dma_chan_get(chan);
946 if (err == -ENODEV) {
947 /* module removed before we could use it */
948 list_del_rcu(&device->global_node);
949 break;
950 } else if (err)
951 dev_dbg(chan->device->dev,
952 "%s: failed to get %s: (%d)\n",
953 __func__, dma_chan_name(chan), err);
954 }
955 }
956
957 /* if this is the first reference and there were channels
958 * waiting we need to rebalance to get those channels
959 * incorporated into the channel table
960 */
961 if (dmaengine_ref_count == 1)
962 dma_channel_rebalance();
963 mutex_unlock(&dma_list_mutex);
964 }
965 EXPORT_SYMBOL(dmaengine_get);
966
967 /**
968 * dmaengine_put - let DMA drivers be removed when ref_count == 0
969 */
dmaengine_put(void)970 void dmaengine_put(void)
971 {
972 struct dma_device *device, *_d;
973 struct dma_chan *chan;
974
975 mutex_lock(&dma_list_mutex);
976 dmaengine_ref_count--;
977 BUG_ON(dmaengine_ref_count < 0);
978 /* drop channel references */
979 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
980 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
981 continue;
982 list_for_each_entry(chan, &device->channels, device_node)
983 dma_chan_put(chan);
984 }
985 mutex_unlock(&dma_list_mutex);
986 }
987 EXPORT_SYMBOL(dmaengine_put);
988
device_has_all_tx_types(struct dma_device * device)989 static bool device_has_all_tx_types(struct dma_device *device)
990 {
991 /* A device that satisfies this test has channels that will never cause
992 * an async_tx channel switch event as all possible operation types can
993 * be handled.
994 */
995 #ifdef CONFIG_ASYNC_TX_DMA
996 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
997 return false;
998 #endif
999
1000 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
1001 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
1002 return false;
1003 #endif
1004
1005 #if IS_ENABLED(CONFIG_ASYNC_XOR)
1006 if (!dma_has_cap(DMA_XOR, device->cap_mask))
1007 return false;
1008
1009 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1010 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
1011 return false;
1012 #endif
1013 #endif
1014
1015 #if IS_ENABLED(CONFIG_ASYNC_PQ)
1016 if (!dma_has_cap(DMA_PQ, device->cap_mask))
1017 return false;
1018
1019 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1020 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
1021 return false;
1022 #endif
1023 #endif
1024
1025 return true;
1026 }
1027
get_dma_id(struct dma_device * device)1028 static int get_dma_id(struct dma_device *device)
1029 {
1030 int rc = ida_alloc(&dma_ida, GFP_KERNEL);
1031
1032 if (rc < 0)
1033 return rc;
1034 device->dev_id = rc;
1035 return 0;
1036 }
1037
__dma_async_device_channel_register(struct dma_device * device,struct dma_chan * chan)1038 static int __dma_async_device_channel_register(struct dma_device *device,
1039 struct dma_chan *chan)
1040 {
1041 int rc;
1042
1043 chan->local = alloc_percpu(typeof(*chan->local));
1044 if (!chan->local)
1045 return -ENOMEM;
1046 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1047 if (!chan->dev) {
1048 rc = -ENOMEM;
1049 goto err_free_local;
1050 }
1051
1052 /*
1053 * When the chan_id is a negative value, we are dynamically adding
1054 * the channel. Otherwise we are static enumerating.
1055 */
1056 chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1057 if (chan->chan_id < 0) {
1058 pr_err("%s: unable to alloc ida for chan: %d\n",
1059 __func__, chan->chan_id);
1060 rc = chan->chan_id;
1061 goto err_free_dev;
1062 }
1063
1064 chan->dev->device.class = &dma_devclass;
1065 chan->dev->device.parent = device->dev;
1066 chan->dev->chan = chan;
1067 chan->dev->dev_id = device->dev_id;
1068 dev_set_name(&chan->dev->device, "dma%dchan%d",
1069 device->dev_id, chan->chan_id);
1070 rc = device_register(&chan->dev->device);
1071 if (rc)
1072 goto err_out_ida;
1073 chan->client_count = 0;
1074 device->chancnt++;
1075
1076 return 0;
1077
1078 err_out_ida:
1079 ida_free(&device->chan_ida, chan->chan_id);
1080 err_free_dev:
1081 kfree(chan->dev);
1082 err_free_local:
1083 free_percpu(chan->local);
1084 chan->local = NULL;
1085 return rc;
1086 }
1087
dma_async_device_channel_register(struct dma_device * device,struct dma_chan * chan)1088 int dma_async_device_channel_register(struct dma_device *device,
1089 struct dma_chan *chan)
1090 {
1091 int rc;
1092
1093 rc = __dma_async_device_channel_register(device, chan);
1094 if (rc < 0)
1095 return rc;
1096
1097 dma_channel_rebalance();
1098 return 0;
1099 }
1100 EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
1101
__dma_async_device_channel_unregister(struct dma_device * device,struct dma_chan * chan)1102 static void __dma_async_device_channel_unregister(struct dma_device *device,
1103 struct dma_chan *chan)
1104 {
1105 WARN_ONCE(!device->device_release && chan->client_count,
1106 "%s called while %d clients hold a reference\n",
1107 __func__, chan->client_count);
1108 mutex_lock(&dma_list_mutex);
1109 device->chancnt--;
1110 chan->dev->chan = NULL;
1111 mutex_unlock(&dma_list_mutex);
1112 ida_free(&device->chan_ida, chan->chan_id);
1113 device_unregister(&chan->dev->device);
1114 free_percpu(chan->local);
1115 }
1116
dma_async_device_channel_unregister(struct dma_device * device,struct dma_chan * chan)1117 void dma_async_device_channel_unregister(struct dma_device *device,
1118 struct dma_chan *chan)
1119 {
1120 __dma_async_device_channel_unregister(device, chan);
1121 dma_channel_rebalance();
1122 }
1123 EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
1124
1125 /**
1126 * dma_async_device_register - registers DMA devices found
1127 * @device: pointer to &struct dma_device
1128 *
1129 * After calling this routine the structure should not be freed except in the
1130 * device_release() callback which will be called after
1131 * dma_async_device_unregister() is called and no further references are taken.
1132 */
dma_async_device_register(struct dma_device * device)1133 int dma_async_device_register(struct dma_device *device)
1134 {
1135 int rc;
1136 struct dma_chan* chan;
1137
1138 if (!device)
1139 return -ENODEV;
1140
1141 /* validate device routines */
1142 if (!device->dev) {
1143 pr_err("DMAdevice must have dev\n");
1144 return -EIO;
1145 }
1146
1147 device->owner = device->dev->driver->owner;
1148
1149 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
1150 dev_err(device->dev,
1151 "Device claims capability %s, but op is not defined\n",
1152 "DMA_MEMCPY");
1153 return -EIO;
1154 }
1155
1156 if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) {
1157 dev_err(device->dev,
1158 "Device claims capability %s, but op is not defined\n",
1159 "DMA_MEMCPY_SG");
1160 return -EIO;
1161 }
1162
1163 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
1164 dev_err(device->dev,
1165 "Device claims capability %s, but op is not defined\n",
1166 "DMA_XOR");
1167 return -EIO;
1168 }
1169
1170 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
1171 dev_err(device->dev,
1172 "Device claims capability %s, but op is not defined\n",
1173 "DMA_XOR_VAL");
1174 return -EIO;
1175 }
1176
1177 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
1178 dev_err(device->dev,
1179 "Device claims capability %s, but op is not defined\n",
1180 "DMA_PQ");
1181 return -EIO;
1182 }
1183
1184 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
1185 dev_err(device->dev,
1186 "Device claims capability %s, but op is not defined\n",
1187 "DMA_PQ_VAL");
1188 return -EIO;
1189 }
1190
1191 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
1192 dev_err(device->dev,
1193 "Device claims capability %s, but op is not defined\n",
1194 "DMA_MEMSET");
1195 return -EIO;
1196 }
1197
1198 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
1199 dev_err(device->dev,
1200 "Device claims capability %s, but op is not defined\n",
1201 "DMA_INTERRUPT");
1202 return -EIO;
1203 }
1204
1205 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
1206 dev_err(device->dev,
1207 "Device claims capability %s, but op is not defined\n",
1208 "DMA_CYCLIC");
1209 return -EIO;
1210 }
1211
1212 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
1213 dev_err(device->dev,
1214 "Device claims capability %s, but op is not defined\n",
1215 "DMA_INTERLEAVE");
1216 return -EIO;
1217 }
1218
1219
1220 if (!device->device_tx_status) {
1221 dev_err(device->dev, "Device tx_status is not defined\n");
1222 return -EIO;
1223 }
1224
1225
1226 if (!device->device_issue_pending) {
1227 dev_err(device->dev, "Device issue_pending is not defined\n");
1228 return -EIO;
1229 }
1230
1231 if (!device->device_release)
1232 dev_dbg(device->dev,
1233 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1234
1235 kref_init(&device->ref);
1236
1237 /* note: this only matters in the
1238 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1239 */
1240 if (device_has_all_tx_types(device))
1241 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1242
1243 rc = get_dma_id(device);
1244 if (rc != 0)
1245 return rc;
1246
1247 ida_init(&device->chan_ida);
1248
1249 /* represent channels in sysfs. Probably want devs too */
1250 list_for_each_entry(chan, &device->channels, device_node) {
1251 rc = __dma_async_device_channel_register(device, chan);
1252 if (rc < 0)
1253 goto err_out;
1254 }
1255
1256 mutex_lock(&dma_list_mutex);
1257 /* take references on public channels */
1258 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1259 list_for_each_entry(chan, &device->channels, device_node) {
1260 /* if clients are already waiting for channels we need
1261 * to take references on their behalf
1262 */
1263 if (dma_chan_get(chan) == -ENODEV) {
1264 /* note we can only get here for the first
1265 * channel as the remaining channels are
1266 * guaranteed to get a reference
1267 */
1268 rc = -ENODEV;
1269 mutex_unlock(&dma_list_mutex);
1270 goto err_out;
1271 }
1272 }
1273 list_add_tail_rcu(&device->global_node, &dma_device_list);
1274 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1275 device->privatecnt++; /* Always private */
1276 dma_channel_rebalance();
1277 mutex_unlock(&dma_list_mutex);
1278
1279 dmaengine_debug_register(device);
1280
1281 return 0;
1282
1283 err_out:
1284 /* if we never registered a channel just release the idr */
1285 if (!device->chancnt) {
1286 ida_free(&dma_ida, device->dev_id);
1287 return rc;
1288 }
1289
1290 list_for_each_entry(chan, &device->channels, device_node) {
1291 if (chan->local == NULL)
1292 continue;
1293 mutex_lock(&dma_list_mutex);
1294 chan->dev->chan = NULL;
1295 mutex_unlock(&dma_list_mutex);
1296 device_unregister(&chan->dev->device);
1297 free_percpu(chan->local);
1298 }
1299 return rc;
1300 }
1301 EXPORT_SYMBOL(dma_async_device_register);
1302
1303 /**
1304 * dma_async_device_unregister - unregister a DMA device
1305 * @device: pointer to &struct dma_device
1306 *
1307 * This routine is called by dma driver exit routines, dmaengine holds module
1308 * references to prevent it being called while channels are in use.
1309 */
dma_async_device_unregister(struct dma_device * device)1310 void dma_async_device_unregister(struct dma_device *device)
1311 {
1312 struct dma_chan *chan, *n;
1313
1314 dmaengine_debug_unregister(device);
1315
1316 list_for_each_entry_safe(chan, n, &device->channels, device_node)
1317 __dma_async_device_channel_unregister(device, chan);
1318
1319 mutex_lock(&dma_list_mutex);
1320 /*
1321 * setting DMA_PRIVATE ensures the device being torn down will not
1322 * be used in the channel_table
1323 */
1324 dma_cap_set(DMA_PRIVATE, device->cap_mask);
1325 dma_channel_rebalance();
1326 ida_free(&dma_ida, device->dev_id);
1327 dma_device_put(device);
1328 mutex_unlock(&dma_list_mutex);
1329 }
1330 EXPORT_SYMBOL(dma_async_device_unregister);
1331
dmam_device_release(struct device * dev,void * res)1332 static void dmam_device_release(struct device *dev, void *res)
1333 {
1334 struct dma_device *device;
1335
1336 device = *(struct dma_device **)res;
1337 dma_async_device_unregister(device);
1338 }
1339
1340 /**
1341 * dmaenginem_async_device_register - registers DMA devices found
1342 * @device: pointer to &struct dma_device
1343 *
1344 * The operation is managed and will be undone on driver detach.
1345 */
dmaenginem_async_device_register(struct dma_device * device)1346 int dmaenginem_async_device_register(struct dma_device *device)
1347 {
1348 void *p;
1349 int ret;
1350
1351 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1352 if (!p)
1353 return -ENOMEM;
1354
1355 ret = dma_async_device_register(device);
1356 if (!ret) {
1357 *(struct dma_device **)p = device;
1358 devres_add(device->dev, p);
1359 } else {
1360 devres_free(p);
1361 }
1362
1363 return ret;
1364 }
1365 EXPORT_SYMBOL(dmaenginem_async_device_register);
1366
1367 struct dmaengine_unmap_pool {
1368 struct kmem_cache *cache;
1369 const char *name;
1370 mempool_t *pool;
1371 size_t size;
1372 };
1373
1374 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1375 static struct dmaengine_unmap_pool unmap_pool[] = {
1376 __UNMAP_POOL(2),
1377 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1378 __UNMAP_POOL(16),
1379 __UNMAP_POOL(128),
1380 __UNMAP_POOL(256),
1381 #endif
1382 };
1383
__get_unmap_pool(int nr)1384 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1385 {
1386 int order = get_count_order(nr);
1387
1388 switch (order) {
1389 case 0 ... 1:
1390 return &unmap_pool[0];
1391 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1392 case 2 ... 4:
1393 return &unmap_pool[1];
1394 case 5 ... 7:
1395 return &unmap_pool[2];
1396 case 8:
1397 return &unmap_pool[3];
1398 #endif
1399 default:
1400 BUG();
1401 return NULL;
1402 }
1403 }
1404
dmaengine_unmap(struct kref * kref)1405 static void dmaengine_unmap(struct kref *kref)
1406 {
1407 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1408 struct device *dev = unmap->dev;
1409 int cnt, i;
1410
1411 cnt = unmap->to_cnt;
1412 for (i = 0; i < cnt; i++)
1413 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1414 DMA_TO_DEVICE);
1415 cnt += unmap->from_cnt;
1416 for (; i < cnt; i++)
1417 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1418 DMA_FROM_DEVICE);
1419 cnt += unmap->bidi_cnt;
1420 for (; i < cnt; i++) {
1421 if (unmap->addr[i] == 0)
1422 continue;
1423 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1424 DMA_BIDIRECTIONAL);
1425 }
1426 cnt = unmap->map_cnt;
1427 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1428 }
1429
dmaengine_unmap_put(struct dmaengine_unmap_data * unmap)1430 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1431 {
1432 if (unmap)
1433 kref_put(&unmap->kref, dmaengine_unmap);
1434 }
1435 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1436
dmaengine_destroy_unmap_pool(void)1437 static void dmaengine_destroy_unmap_pool(void)
1438 {
1439 int i;
1440
1441 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1442 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1443
1444 mempool_destroy(p->pool);
1445 p->pool = NULL;
1446 kmem_cache_destroy(p->cache);
1447 p->cache = NULL;
1448 }
1449 }
1450
dmaengine_init_unmap_pool(void)1451 static int __init dmaengine_init_unmap_pool(void)
1452 {
1453 int i;
1454
1455 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1456 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1457 size_t size;
1458
1459 size = sizeof(struct dmaengine_unmap_data) +
1460 sizeof(dma_addr_t) * p->size;
1461
1462 p->cache = kmem_cache_create(p->name, size, 0,
1463 SLAB_HWCACHE_ALIGN, NULL);
1464 if (!p->cache)
1465 break;
1466 p->pool = mempool_create_slab_pool(1, p->cache);
1467 if (!p->pool)
1468 break;
1469 }
1470
1471 if (i == ARRAY_SIZE(unmap_pool))
1472 return 0;
1473
1474 dmaengine_destroy_unmap_pool();
1475 return -ENOMEM;
1476 }
1477
1478 struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device * dev,int nr,gfp_t flags)1479 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1480 {
1481 struct dmaengine_unmap_data *unmap;
1482
1483 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1484 if (!unmap)
1485 return NULL;
1486
1487 memset(unmap, 0, sizeof(*unmap));
1488 kref_init(&unmap->kref);
1489 unmap->dev = dev;
1490 unmap->map_cnt = nr;
1491
1492 return unmap;
1493 }
1494 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1495
dma_async_tx_descriptor_init(struct dma_async_tx_descriptor * tx,struct dma_chan * chan)1496 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1497 struct dma_chan *chan)
1498 {
1499 tx->chan = chan;
1500 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1501 spin_lock_init(&tx->lock);
1502 #endif
1503 }
1504 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1505
desc_check_and_set_metadata_mode(struct dma_async_tx_descriptor * desc,enum dma_desc_metadata_mode mode)1506 static inline int desc_check_and_set_metadata_mode(
1507 struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
1508 {
1509 /* Make sure that the metadata mode is not mixed */
1510 if (!desc->desc_metadata_mode) {
1511 if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
1512 desc->desc_metadata_mode = mode;
1513 else
1514 return -ENOTSUPP;
1515 } else if (desc->desc_metadata_mode != mode) {
1516 return -EINVAL;
1517 }
1518
1519 return 0;
1520 }
1521
dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor * desc,void * data,size_t len)1522 int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1523 void *data, size_t len)
1524 {
1525 int ret;
1526
1527 if (!desc)
1528 return -EINVAL;
1529
1530 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
1531 if (ret)
1532 return ret;
1533
1534 if (!desc->metadata_ops || !desc->metadata_ops->attach)
1535 return -ENOTSUPP;
1536
1537 return desc->metadata_ops->attach(desc, data, len);
1538 }
1539 EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
1540
dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor * desc,size_t * payload_len,size_t * max_len)1541 void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1542 size_t *payload_len, size_t *max_len)
1543 {
1544 int ret;
1545
1546 if (!desc)
1547 return ERR_PTR(-EINVAL);
1548
1549 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1550 if (ret)
1551 return ERR_PTR(ret);
1552
1553 if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
1554 return ERR_PTR(-ENOTSUPP);
1555
1556 return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
1557 }
1558 EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
1559
dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor * desc,size_t payload_len)1560 int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1561 size_t payload_len)
1562 {
1563 int ret;
1564
1565 if (!desc)
1566 return -EINVAL;
1567
1568 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1569 if (ret)
1570 return ret;
1571
1572 if (!desc->metadata_ops || !desc->metadata_ops->set_len)
1573 return -ENOTSUPP;
1574
1575 return desc->metadata_ops->set_len(desc, payload_len);
1576 }
1577 EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
1578
1579 /**
1580 * dma_wait_for_async_tx - spin wait for a transaction to complete
1581 * @tx: in-flight transaction to wait on
1582 */
1583 enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor * tx)1584 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1585 {
1586 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1587
1588 if (!tx)
1589 return DMA_COMPLETE;
1590
1591 while (tx->cookie == -EBUSY) {
1592 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1593 dev_err(tx->chan->device->dev,
1594 "%s timeout waiting for descriptor submission\n",
1595 __func__);
1596 return DMA_ERROR;
1597 }
1598 cpu_relax();
1599 }
1600 return dma_sync_wait(tx->chan, tx->cookie);
1601 }
1602 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1603
1604 /**
1605 * dma_run_dependencies - process dependent operations on the target channel
1606 * @tx: transaction with dependencies
1607 *
1608 * Helper routine for DMA drivers to process (start) dependent operations
1609 * on their target channel.
1610 */
dma_run_dependencies(struct dma_async_tx_descriptor * tx)1611 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1612 {
1613 struct dma_async_tx_descriptor *dep = txd_next(tx);
1614 struct dma_async_tx_descriptor *dep_next;
1615 struct dma_chan *chan;
1616
1617 if (!dep)
1618 return;
1619
1620 /* we'll submit tx->next now, so clear the link */
1621 txd_clear_next(tx);
1622 chan = dep->chan;
1623
1624 /* keep submitting up until a channel switch is detected
1625 * in that case we will be called again as a result of
1626 * processing the interrupt from async_tx_channel_switch
1627 */
1628 for (; dep; dep = dep_next) {
1629 txd_lock(dep);
1630 txd_clear_parent(dep);
1631 dep_next = txd_next(dep);
1632 if (dep_next && dep_next->chan == chan)
1633 txd_clear_next(dep); /* ->next will be submitted */
1634 else
1635 dep_next = NULL; /* submit current dep and terminate */
1636 txd_unlock(dep);
1637
1638 dep->tx_submit(dep);
1639 }
1640
1641 chan->device->device_issue_pending(chan);
1642 }
1643 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1644
dma_bus_init(void)1645 static int __init dma_bus_init(void)
1646 {
1647 int err = dmaengine_init_unmap_pool();
1648
1649 if (err)
1650 return err;
1651
1652 err = class_register(&dma_devclass);
1653 if (!err)
1654 dmaengine_debugfs_init();
1655
1656 return err;
1657 }
1658 arch_initcall(dma_bus_init);
1659