1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * AMD Cryptographic Coprocessor (CCP) driver
4 *
5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc.
6 *
7 * Author: Gary R Hook <gary.hook@amd.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/spinlock.h>
15 #include <linux/mutex.h>
16 #include <linux/ccp.h>
17
18 #include "ccp-dev.h"
19 #include "../../dma/dmaengine.h"
20
21 #define CCP_DMA_WIDTH(_mask) \
22 ({ \
23 u64 mask = _mask + 1; \
24 (mask == 0) ? 64 : fls64(mask); \
25 })
26
27 /* The CCP as a DMA provider can be configured for public or private
28 * channels. Default is specified in the vdata for the device (PCI ID).
29 * This module parameter will override for all channels on all devices:
30 * dma_chan_attr = 0x2 to force all channels public
31 * = 0x1 to force all channels private
32 * = 0x0 to defer to the vdata setting
33 * = any other value: warning, revert to 0x0
34 */
35 static unsigned int dma_chan_attr = CCP_DMA_DFLT;
36 module_param(dma_chan_attr, uint, 0444);
37 MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
38
39 static unsigned int dmaengine = 1;
40 module_param(dmaengine, uint, 0444);
41 MODULE_PARM_DESC(dmaengine, "Register services with the DMA subsystem (any non-zero value, default: 1)");
42
ccp_get_dma_chan_attr(struct ccp_device * ccp)43 static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
44 {
45 switch (dma_chan_attr) {
46 case CCP_DMA_DFLT:
47 return ccp->vdata->dma_chan_attr;
48
49 case CCP_DMA_PRIV:
50 return DMA_PRIVATE;
51
52 case CCP_DMA_PUB:
53 return 0;
54
55 default:
56 dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
57 dma_chan_attr);
58 return ccp->vdata->dma_chan_attr;
59 }
60 }
61
ccp_free_cmd_resources(struct ccp_device * ccp,struct list_head * list)62 static void ccp_free_cmd_resources(struct ccp_device *ccp,
63 struct list_head *list)
64 {
65 struct ccp_dma_cmd *cmd, *ctmp;
66
67 list_for_each_entry_safe(cmd, ctmp, list, entry) {
68 list_del(&cmd->entry);
69 kmem_cache_free(ccp->dma_cmd_cache, cmd);
70 }
71 }
72
ccp_free_desc_resources(struct ccp_device * ccp,struct list_head * list)73 static void ccp_free_desc_resources(struct ccp_device *ccp,
74 struct list_head *list)
75 {
76 struct ccp_dma_desc *desc, *dtmp;
77
78 list_for_each_entry_safe(desc, dtmp, list, entry) {
79 ccp_free_cmd_resources(ccp, &desc->active);
80 ccp_free_cmd_resources(ccp, &desc->pending);
81
82 list_del(&desc->entry);
83 kmem_cache_free(ccp->dma_desc_cache, desc);
84 }
85 }
86
ccp_free_chan_resources(struct dma_chan * dma_chan)87 static void ccp_free_chan_resources(struct dma_chan *dma_chan)
88 {
89 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
90 dma_chan);
91 unsigned long flags;
92
93 dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
94
95 spin_lock_irqsave(&chan->lock, flags);
96
97 ccp_free_desc_resources(chan->ccp, &chan->complete);
98 ccp_free_desc_resources(chan->ccp, &chan->active);
99 ccp_free_desc_resources(chan->ccp, &chan->pending);
100 ccp_free_desc_resources(chan->ccp, &chan->created);
101
102 spin_unlock_irqrestore(&chan->lock, flags);
103 }
104
ccp_cleanup_desc_resources(struct ccp_device * ccp,struct list_head * list)105 static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
106 struct list_head *list)
107 {
108 struct ccp_dma_desc *desc, *dtmp;
109
110 list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
111 if (!async_tx_test_ack(&desc->tx_desc))
112 continue;
113
114 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
115
116 ccp_free_cmd_resources(ccp, &desc->active);
117 ccp_free_cmd_resources(ccp, &desc->pending);
118
119 list_del(&desc->entry);
120 kmem_cache_free(ccp->dma_desc_cache, desc);
121 }
122 }
123
ccp_do_cleanup(unsigned long data)124 static void ccp_do_cleanup(unsigned long data)
125 {
126 struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
127 unsigned long flags;
128
129 dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
130 dma_chan_name(&chan->dma_chan));
131
132 spin_lock_irqsave(&chan->lock, flags);
133
134 ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
135
136 spin_unlock_irqrestore(&chan->lock, flags);
137 }
138
ccp_issue_next_cmd(struct ccp_dma_desc * desc)139 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
140 {
141 struct ccp_dma_cmd *cmd;
142 int ret;
143
144 cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
145 list_move(&cmd->entry, &desc->active);
146
147 dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
148 desc->tx_desc.cookie, cmd);
149
150 ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
151 if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
152 return 0;
153
154 dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
155 ret, desc->tx_desc.cookie, cmd);
156
157 return ret;
158 }
159
ccp_free_active_cmd(struct ccp_dma_desc * desc)160 static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
161 {
162 struct ccp_dma_cmd *cmd;
163
164 cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
165 entry);
166 if (!cmd)
167 return;
168
169 dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
170 __func__, desc->tx_desc.cookie, cmd);
171
172 list_del(&cmd->entry);
173 kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
174 }
175
__ccp_next_dma_desc(struct ccp_dma_chan * chan,struct ccp_dma_desc * desc)176 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
177 struct ccp_dma_desc *desc)
178 {
179 /* Move current DMA descriptor to the complete list */
180 if (desc)
181 list_move(&desc->entry, &chan->complete);
182
183 /* Get the next DMA descriptor on the active list */
184 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
185 entry);
186
187 return desc;
188 }
189
ccp_handle_active_desc(struct ccp_dma_chan * chan,struct ccp_dma_desc * desc)190 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
191 struct ccp_dma_desc *desc)
192 {
193 struct dma_async_tx_descriptor *tx_desc;
194 unsigned long flags;
195
196 /* Loop over descriptors until one is found with commands */
197 do {
198 if (desc) {
199 /* Remove the DMA command from the list and free it */
200 ccp_free_active_cmd(desc);
201
202 if (!list_empty(&desc->pending)) {
203 /* No errors, keep going */
204 if (desc->status != DMA_ERROR)
205 return desc;
206
207 /* Error, free remaining commands and move on */
208 ccp_free_cmd_resources(desc->ccp,
209 &desc->pending);
210 }
211
212 tx_desc = &desc->tx_desc;
213 } else {
214 tx_desc = NULL;
215 }
216
217 spin_lock_irqsave(&chan->lock, flags);
218
219 if (desc) {
220 if (desc->status != DMA_ERROR)
221 desc->status = DMA_COMPLETE;
222
223 dev_dbg(desc->ccp->dev,
224 "%s - tx %d complete, status=%u\n", __func__,
225 desc->tx_desc.cookie, desc->status);
226
227 dma_cookie_complete(tx_desc);
228 dma_descriptor_unmap(tx_desc);
229 }
230
231 desc = __ccp_next_dma_desc(chan, desc);
232
233 spin_unlock_irqrestore(&chan->lock, flags);
234
235 if (tx_desc) {
236 dmaengine_desc_get_callback_invoke(tx_desc, NULL);
237
238 dma_run_dependencies(tx_desc);
239 }
240 } while (desc);
241
242 return NULL;
243 }
244
__ccp_pending_to_active(struct ccp_dma_chan * chan)245 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
246 {
247 struct ccp_dma_desc *desc;
248
249 if (list_empty(&chan->pending))
250 return NULL;
251
252 desc = list_empty(&chan->active)
253 ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
254 : NULL;
255
256 list_splice_tail_init(&chan->pending, &chan->active);
257
258 return desc;
259 }
260
ccp_cmd_callback(void * data,int err)261 static void ccp_cmd_callback(void *data, int err)
262 {
263 struct ccp_dma_desc *desc = data;
264 struct ccp_dma_chan *chan;
265 int ret;
266
267 if (err == -EINPROGRESS)
268 return;
269
270 chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
271 dma_chan);
272
273 dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
274 __func__, desc->tx_desc.cookie, err);
275
276 if (err)
277 desc->status = DMA_ERROR;
278
279 while (true) {
280 /* Check for DMA descriptor completion */
281 desc = ccp_handle_active_desc(chan, desc);
282
283 /* Don't submit cmd if no descriptor or DMA is paused */
284 if (!desc || (chan->status == DMA_PAUSED))
285 break;
286
287 ret = ccp_issue_next_cmd(desc);
288 if (!ret)
289 break;
290
291 desc->status = DMA_ERROR;
292 }
293
294 tasklet_schedule(&chan->cleanup_tasklet);
295 }
296
ccp_tx_submit(struct dma_async_tx_descriptor * tx_desc)297 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
298 {
299 struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
300 tx_desc);
301 struct ccp_dma_chan *chan;
302 dma_cookie_t cookie;
303 unsigned long flags;
304
305 chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
306
307 spin_lock_irqsave(&chan->lock, flags);
308
309 cookie = dma_cookie_assign(tx_desc);
310 list_move_tail(&desc->entry, &chan->pending);
311
312 spin_unlock_irqrestore(&chan->lock, flags);
313
314 dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
315 __func__, cookie);
316
317 return cookie;
318 }
319
ccp_alloc_dma_cmd(struct ccp_dma_chan * chan)320 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
321 {
322 struct ccp_dma_cmd *cmd;
323
324 cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
325 if (cmd)
326 memset(cmd, 0, sizeof(*cmd));
327
328 return cmd;
329 }
330
ccp_alloc_dma_desc(struct ccp_dma_chan * chan,unsigned long flags)331 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
332 unsigned long flags)
333 {
334 struct ccp_dma_desc *desc;
335
336 desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
337 if (!desc)
338 return NULL;
339
340 dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
341 desc->tx_desc.flags = flags;
342 desc->tx_desc.tx_submit = ccp_tx_submit;
343 desc->ccp = chan->ccp;
344 INIT_LIST_HEAD(&desc->entry);
345 INIT_LIST_HEAD(&desc->pending);
346 INIT_LIST_HEAD(&desc->active);
347 desc->status = DMA_IN_PROGRESS;
348
349 return desc;
350 }
351
ccp_create_desc(struct dma_chan * dma_chan,struct scatterlist * dst_sg,unsigned int dst_nents,struct scatterlist * src_sg,unsigned int src_nents,unsigned long flags)352 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
353 struct scatterlist *dst_sg,
354 unsigned int dst_nents,
355 struct scatterlist *src_sg,
356 unsigned int src_nents,
357 unsigned long flags)
358 {
359 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
360 dma_chan);
361 struct ccp_device *ccp = chan->ccp;
362 struct ccp_dma_desc *desc;
363 struct ccp_dma_cmd *cmd;
364 struct ccp_cmd *ccp_cmd;
365 struct ccp_passthru_nomap_engine *ccp_pt;
366 unsigned int src_offset, src_len;
367 unsigned int dst_offset, dst_len;
368 unsigned int len;
369 unsigned long sflags;
370 size_t total_len;
371
372 if (!dst_sg || !src_sg)
373 return NULL;
374
375 if (!dst_nents || !src_nents)
376 return NULL;
377
378 desc = ccp_alloc_dma_desc(chan, flags);
379 if (!desc)
380 return NULL;
381
382 total_len = 0;
383
384 src_len = sg_dma_len(src_sg);
385 src_offset = 0;
386
387 dst_len = sg_dma_len(dst_sg);
388 dst_offset = 0;
389
390 while (true) {
391 if (!src_len) {
392 src_nents--;
393 if (!src_nents)
394 break;
395
396 src_sg = sg_next(src_sg);
397 if (!src_sg)
398 break;
399
400 src_len = sg_dma_len(src_sg);
401 src_offset = 0;
402 continue;
403 }
404
405 if (!dst_len) {
406 dst_nents--;
407 if (!dst_nents)
408 break;
409
410 dst_sg = sg_next(dst_sg);
411 if (!dst_sg)
412 break;
413
414 dst_len = sg_dma_len(dst_sg);
415 dst_offset = 0;
416 continue;
417 }
418
419 len = min(dst_len, src_len);
420
421 cmd = ccp_alloc_dma_cmd(chan);
422 if (!cmd)
423 goto err;
424
425 ccp_cmd = &cmd->ccp_cmd;
426 ccp_cmd->ccp = chan->ccp;
427 ccp_pt = &ccp_cmd->u.passthru_nomap;
428 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
429 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
430 ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
431 ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
432 ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
433 ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
434 ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
435 ccp_pt->src_len = len;
436 ccp_pt->final = 1;
437 ccp_cmd->callback = ccp_cmd_callback;
438 ccp_cmd->data = desc;
439
440 list_add_tail(&cmd->entry, &desc->pending);
441
442 dev_dbg(ccp->dev,
443 "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
444 cmd, &ccp_pt->src_dma,
445 &ccp_pt->dst_dma, ccp_pt->src_len);
446
447 total_len += len;
448
449 src_len -= len;
450 src_offset += len;
451
452 dst_len -= len;
453 dst_offset += len;
454 }
455
456 desc->len = total_len;
457
458 if (list_empty(&desc->pending))
459 goto err;
460
461 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
462
463 spin_lock_irqsave(&chan->lock, sflags);
464
465 list_add_tail(&desc->entry, &chan->created);
466
467 spin_unlock_irqrestore(&chan->lock, sflags);
468
469 return desc;
470
471 err:
472 ccp_free_cmd_resources(ccp, &desc->pending);
473 kmem_cache_free(ccp->dma_desc_cache, desc);
474
475 return NULL;
476 }
477
ccp_prep_dma_memcpy(struct dma_chan * dma_chan,dma_addr_t dst,dma_addr_t src,size_t len,unsigned long flags)478 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
479 struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
480 unsigned long flags)
481 {
482 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
483 dma_chan);
484 struct ccp_dma_desc *desc;
485 struct scatterlist dst_sg, src_sg;
486
487 dev_dbg(chan->ccp->dev,
488 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
489 __func__, &src, &dst, len, flags);
490
491 sg_init_table(&dst_sg, 1);
492 sg_dma_address(&dst_sg) = dst;
493 sg_dma_len(&dst_sg) = len;
494
495 sg_init_table(&src_sg, 1);
496 sg_dma_address(&src_sg) = src;
497 sg_dma_len(&src_sg) = len;
498
499 desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
500 if (!desc)
501 return NULL;
502
503 return &desc->tx_desc;
504 }
505
ccp_prep_dma_interrupt(struct dma_chan * dma_chan,unsigned long flags)506 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
507 struct dma_chan *dma_chan, unsigned long flags)
508 {
509 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
510 dma_chan);
511 struct ccp_dma_desc *desc;
512
513 desc = ccp_alloc_dma_desc(chan, flags);
514 if (!desc)
515 return NULL;
516
517 return &desc->tx_desc;
518 }
519
ccp_issue_pending(struct dma_chan * dma_chan)520 static void ccp_issue_pending(struct dma_chan *dma_chan)
521 {
522 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
523 dma_chan);
524 struct ccp_dma_desc *desc;
525 unsigned long flags;
526
527 dev_dbg(chan->ccp->dev, "%s\n", __func__);
528
529 spin_lock_irqsave(&chan->lock, flags);
530
531 desc = __ccp_pending_to_active(chan);
532
533 spin_unlock_irqrestore(&chan->lock, flags);
534
535 /* If there was nothing active, start processing */
536 if (desc)
537 ccp_cmd_callback(desc, 0);
538 }
539
ccp_tx_status(struct dma_chan * dma_chan,dma_cookie_t cookie,struct dma_tx_state * state)540 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
541 dma_cookie_t cookie,
542 struct dma_tx_state *state)
543 {
544 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
545 dma_chan);
546 struct ccp_dma_desc *desc;
547 enum dma_status ret;
548 unsigned long flags;
549
550 if (chan->status == DMA_PAUSED) {
551 ret = DMA_PAUSED;
552 goto out;
553 }
554
555 ret = dma_cookie_status(dma_chan, cookie, state);
556 if (ret == DMA_COMPLETE) {
557 spin_lock_irqsave(&chan->lock, flags);
558
559 /* Get status from complete chain, if still there */
560 list_for_each_entry(desc, &chan->complete, entry) {
561 if (desc->tx_desc.cookie != cookie)
562 continue;
563
564 ret = desc->status;
565 break;
566 }
567
568 spin_unlock_irqrestore(&chan->lock, flags);
569 }
570
571 out:
572 dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
573
574 return ret;
575 }
576
ccp_pause(struct dma_chan * dma_chan)577 static int ccp_pause(struct dma_chan *dma_chan)
578 {
579 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
580 dma_chan);
581
582 chan->status = DMA_PAUSED;
583
584 /*TODO: Wait for active DMA to complete before returning? */
585
586 return 0;
587 }
588
ccp_resume(struct dma_chan * dma_chan)589 static int ccp_resume(struct dma_chan *dma_chan)
590 {
591 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
592 dma_chan);
593 struct ccp_dma_desc *desc;
594 unsigned long flags;
595
596 spin_lock_irqsave(&chan->lock, flags);
597
598 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
599 entry);
600
601 spin_unlock_irqrestore(&chan->lock, flags);
602
603 /* Indicate the channel is running again */
604 chan->status = DMA_IN_PROGRESS;
605
606 /* If there was something active, re-start */
607 if (desc)
608 ccp_cmd_callback(desc, 0);
609
610 return 0;
611 }
612
ccp_terminate_all(struct dma_chan * dma_chan)613 static int ccp_terminate_all(struct dma_chan *dma_chan)
614 {
615 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
616 dma_chan);
617 unsigned long flags;
618
619 dev_dbg(chan->ccp->dev, "%s\n", __func__);
620
621 /*TODO: Wait for active DMA to complete before continuing */
622
623 spin_lock_irqsave(&chan->lock, flags);
624
625 /*TODO: Purge the complete list? */
626 ccp_free_desc_resources(chan->ccp, &chan->active);
627 ccp_free_desc_resources(chan->ccp, &chan->pending);
628 ccp_free_desc_resources(chan->ccp, &chan->created);
629
630 spin_unlock_irqrestore(&chan->lock, flags);
631
632 return 0;
633 }
634
ccp_dma_release(struct ccp_device * ccp)635 static void ccp_dma_release(struct ccp_device *ccp)
636 {
637 struct ccp_dma_chan *chan;
638 struct dma_chan *dma_chan;
639 unsigned int i;
640
641 for (i = 0; i < ccp->cmd_q_count; i++) {
642 chan = ccp->ccp_dma_chan + i;
643 dma_chan = &chan->dma_chan;
644
645 if (dma_chan->client_count)
646 dma_release_channel(dma_chan);
647
648 tasklet_kill(&chan->cleanup_tasklet);
649 list_del_rcu(&dma_chan->device_node);
650 }
651 }
652
ccp_dmaengine_register(struct ccp_device * ccp)653 int ccp_dmaengine_register(struct ccp_device *ccp)
654 {
655 struct ccp_dma_chan *chan;
656 struct dma_device *dma_dev = &ccp->dma_dev;
657 struct dma_chan *dma_chan;
658 char *dma_cmd_cache_name;
659 char *dma_desc_cache_name;
660 unsigned int i;
661 int ret;
662
663 if (!dmaengine)
664 return 0;
665
666 ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
667 sizeof(*(ccp->ccp_dma_chan)),
668 GFP_KERNEL);
669 if (!ccp->ccp_dma_chan)
670 return -ENOMEM;
671
672 dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
673 "%s-dmaengine-cmd-cache",
674 ccp->name);
675 if (!dma_cmd_cache_name)
676 return -ENOMEM;
677
678 ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
679 sizeof(struct ccp_dma_cmd),
680 sizeof(void *),
681 SLAB_HWCACHE_ALIGN, NULL);
682 if (!ccp->dma_cmd_cache)
683 return -ENOMEM;
684
685 dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
686 "%s-dmaengine-desc-cache",
687 ccp->name);
688 if (!dma_desc_cache_name) {
689 ret = -ENOMEM;
690 goto err_cache;
691 }
692
693 ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
694 sizeof(struct ccp_dma_desc),
695 sizeof(void *),
696 SLAB_HWCACHE_ALIGN, NULL);
697 if (!ccp->dma_desc_cache) {
698 ret = -ENOMEM;
699 goto err_cache;
700 }
701
702 dma_dev->dev = ccp->dev;
703 dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
704 dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
705 dma_dev->directions = DMA_MEM_TO_MEM;
706 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
707 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
708 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
709
710 /* The DMA channels for this device can be set to public or private,
711 * and overridden by the module parameter dma_chan_attr.
712 * Default: according to the value in vdata (dma_chan_attr=0)
713 * dma_chan_attr=0x1: all channels private (override vdata)
714 * dma_chan_attr=0x2: all channels public (override vdata)
715 */
716 if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
717 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
718
719 INIT_LIST_HEAD(&dma_dev->channels);
720 for (i = 0; i < ccp->cmd_q_count; i++) {
721 chan = ccp->ccp_dma_chan + i;
722 dma_chan = &chan->dma_chan;
723
724 chan->ccp = ccp;
725
726 spin_lock_init(&chan->lock);
727 INIT_LIST_HEAD(&chan->created);
728 INIT_LIST_HEAD(&chan->pending);
729 INIT_LIST_HEAD(&chan->active);
730 INIT_LIST_HEAD(&chan->complete);
731
732 tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
733 (unsigned long)chan);
734
735 dma_chan->device = dma_dev;
736 dma_cookie_init(dma_chan);
737
738 list_add_tail(&dma_chan->device_node, &dma_dev->channels);
739 }
740
741 dma_dev->device_free_chan_resources = ccp_free_chan_resources;
742 dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
743 dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
744 dma_dev->device_issue_pending = ccp_issue_pending;
745 dma_dev->device_tx_status = ccp_tx_status;
746 dma_dev->device_pause = ccp_pause;
747 dma_dev->device_resume = ccp_resume;
748 dma_dev->device_terminate_all = ccp_terminate_all;
749
750 ret = dma_async_device_register(dma_dev);
751 if (ret)
752 goto err_reg;
753
754 return 0;
755
756 err_reg:
757 ccp_dma_release(ccp);
758 kmem_cache_destroy(ccp->dma_desc_cache);
759
760 err_cache:
761 kmem_cache_destroy(ccp->dma_cmd_cache);
762
763 return ret;
764 }
765
ccp_dmaengine_unregister(struct ccp_device * ccp)766 void ccp_dmaengine_unregister(struct ccp_device *ccp)
767 {
768 struct dma_device *dma_dev = &ccp->dma_dev;
769
770 if (!dmaengine)
771 return;
772
773 ccp_dma_release(ccp);
774 dma_async_device_unregister(dma_dev);
775
776 kmem_cache_destroy(ccp->dma_desc_cache);
777 kmem_cache_destroy(ccp->dma_cmd_cache);
778 }
779