GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / crypto / ccp / ccp-dmaengine.c
1 /*
2  * AMD Cryptographic Coprocessor (CCP) driver
3  *
4  * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
5  *
6  * Author: Gary R Hook <gary.hook@amd.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/dmaengine.h>
16 #include <linux/spinlock.h>
17 #include <linux/mutex.h>
18 #include <linux/ccp.h>
19
20 #include "ccp-dev.h"
21 #include "../../dma/dmaengine.h"
22
23 #define CCP_DMA_WIDTH(_mask)            \
24 ({                                      \
25         u64 mask = _mask + 1;           \
26         (mask == 0) ? 64 : fls64(mask); \
27 })
28
29 /* The CCP as a DMA provider can be configured for public or private
30  * channels. Default is specified in the vdata for the device (PCI ID).
31  * This module parameter will override for all channels on all devices:
32  *   dma_chan_attr = 0x2 to force all channels public
33  *                 = 0x1 to force all channels private
34  *                 = 0x0 to defer to the vdata setting
35  *                 = any other value: warning, revert to 0x0
36  */
37 static unsigned int dma_chan_attr = CCP_DMA_DFLT;
38 module_param(dma_chan_attr, uint, 0444);
39 MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
40
41 unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
42 {
43         switch (dma_chan_attr) {
44         case CCP_DMA_DFLT:
45                 return ccp->vdata->dma_chan_attr;
46
47         case CCP_DMA_PRIV:
48                 return DMA_PRIVATE;
49
50         case CCP_DMA_PUB:
51                 return 0;
52
53         default:
54                 dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
55                               dma_chan_attr);
56                 return ccp->vdata->dma_chan_attr;
57         }
58 }
59
60 static void ccp_free_cmd_resources(struct ccp_device *ccp,
61                                    struct list_head *list)
62 {
63         struct ccp_dma_cmd *cmd, *ctmp;
64
65         list_for_each_entry_safe(cmd, ctmp, list, entry) {
66                 list_del(&cmd->entry);
67                 kmem_cache_free(ccp->dma_cmd_cache, cmd);
68         }
69 }
70
71 static void ccp_free_desc_resources(struct ccp_device *ccp,
72                                     struct list_head *list)
73 {
74         struct ccp_dma_desc *desc, *dtmp;
75
76         list_for_each_entry_safe(desc, dtmp, list, entry) {
77                 ccp_free_cmd_resources(ccp, &desc->active);
78                 ccp_free_cmd_resources(ccp, &desc->pending);
79
80                 list_del(&desc->entry);
81                 kmem_cache_free(ccp->dma_desc_cache, desc);
82         }
83 }
84
85 static void ccp_free_chan_resources(struct dma_chan *dma_chan)
86 {
87         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
88                                                  dma_chan);
89         unsigned long flags;
90
91         dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
92
93         spin_lock_irqsave(&chan->lock, flags);
94
95         ccp_free_desc_resources(chan->ccp, &chan->complete);
96         ccp_free_desc_resources(chan->ccp, &chan->active);
97         ccp_free_desc_resources(chan->ccp, &chan->pending);
98         ccp_free_desc_resources(chan->ccp, &chan->created);
99
100         spin_unlock_irqrestore(&chan->lock, flags);
101 }
102
103 static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
104                                        struct list_head *list)
105 {
106         struct ccp_dma_desc *desc, *dtmp;
107
108         list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
109                 if (!async_tx_test_ack(&desc->tx_desc))
110                         continue;
111
112                 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
113
114                 ccp_free_cmd_resources(ccp, &desc->active);
115                 ccp_free_cmd_resources(ccp, &desc->pending);
116
117                 list_del(&desc->entry);
118                 kmem_cache_free(ccp->dma_desc_cache, desc);
119         }
120 }
121
122 static void ccp_do_cleanup(unsigned long data)
123 {
124         struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
125         unsigned long flags;
126
127         dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
128                 dma_chan_name(&chan->dma_chan));
129
130         spin_lock_irqsave(&chan->lock, flags);
131
132         ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
133
134         spin_unlock_irqrestore(&chan->lock, flags);
135 }
136
137 static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
138 {
139         struct ccp_dma_cmd *cmd;
140         int ret;
141
142         cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
143         list_move(&cmd->entry, &desc->active);
144
145         dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
146                 desc->tx_desc.cookie, cmd);
147
148         ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
149         if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
150                 return 0;
151
152         dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
153                 ret, desc->tx_desc.cookie, cmd);
154
155         return ret;
156 }
157
158 static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
159 {
160         struct ccp_dma_cmd *cmd;
161
162         cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
163                                        entry);
164         if (!cmd)
165                 return;
166
167         dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
168                 __func__, desc->tx_desc.cookie, cmd);
169
170         list_del(&cmd->entry);
171         kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
172 }
173
174 static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
175                                                 struct ccp_dma_desc *desc)
176 {
177         /* Move current DMA descriptor to the complete list */
178         if (desc)
179                 list_move(&desc->entry, &chan->complete);
180
181         /* Get the next DMA descriptor on the active list */
182         desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
183                                         entry);
184
185         return desc;
186 }
187
188 static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
189                                                    struct ccp_dma_desc *desc)
190 {
191         struct dma_async_tx_descriptor *tx_desc;
192         unsigned long flags;
193
194         /* Loop over descriptors until one is found with commands */
195         do {
196                 if (desc) {
197                         /* Remove the DMA command from the list and free it */
198                         ccp_free_active_cmd(desc);
199
200                         if (!list_empty(&desc->pending)) {
201                                 /* No errors, keep going */
202                                 if (desc->status != DMA_ERROR)
203                                         return desc;
204
205                                 /* Error, free remaining commands and move on */
206                                 ccp_free_cmd_resources(desc->ccp,
207                                                        &desc->pending);
208                         }
209
210                         tx_desc = &desc->tx_desc;
211                 } else {
212                         tx_desc = NULL;
213                 }
214
215                 spin_lock_irqsave(&chan->lock, flags);
216
217                 if (desc) {
218                         if (desc->status != DMA_ERROR)
219                                 desc->status = DMA_COMPLETE;
220
221                         dev_dbg(desc->ccp->dev,
222                                 "%s - tx %d complete, status=%u\n", __func__,
223                                 desc->tx_desc.cookie, desc->status);
224
225                         dma_cookie_complete(tx_desc);
226                 }
227
228                 desc = __ccp_next_dma_desc(chan, desc);
229
230                 spin_unlock_irqrestore(&chan->lock, flags);
231
232                 if (tx_desc) {
233                         if (tx_desc->callback &&
234                             (tx_desc->flags & DMA_PREP_INTERRUPT))
235                                 tx_desc->callback(tx_desc->callback_param);
236
237                         dma_run_dependencies(tx_desc);
238                 }
239         } while (desc);
240
241         return NULL;
242 }
243
244 static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
245 {
246         struct ccp_dma_desc *desc;
247
248         if (list_empty(&chan->pending))
249                 return NULL;
250
251         desc = list_empty(&chan->active)
252                 ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
253                 : NULL;
254
255         list_splice_tail_init(&chan->pending, &chan->active);
256
257         return desc;
258 }
259
260 static void ccp_cmd_callback(void *data, int err)
261 {
262         struct ccp_dma_desc *desc = data;
263         struct ccp_dma_chan *chan;
264         int ret;
265
266         if (err == -EINPROGRESS)
267                 return;
268
269         chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
270                             dma_chan);
271
272         dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
273                 __func__, desc->tx_desc.cookie, err);
274
275         if (err)
276                 desc->status = DMA_ERROR;
277
278         while (true) {
279                 /* Check for DMA descriptor completion */
280                 desc = ccp_handle_active_desc(chan, desc);
281
282                 /* Don't submit cmd if no descriptor or DMA is paused */
283                 if (!desc || (chan->status == DMA_PAUSED))
284                         break;
285
286                 ret = ccp_issue_next_cmd(desc);
287                 if (!ret)
288                         break;
289
290                 desc->status = DMA_ERROR;
291         }
292
293         tasklet_schedule(&chan->cleanup_tasklet);
294 }
295
296 static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
297 {
298         struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
299                                                  tx_desc);
300         struct ccp_dma_chan *chan;
301         dma_cookie_t cookie;
302         unsigned long flags;
303
304         chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
305
306         spin_lock_irqsave(&chan->lock, flags);
307
308         cookie = dma_cookie_assign(tx_desc);
309         list_del(&desc->entry);
310         list_add_tail(&desc->entry, &chan->pending);
311
312         spin_unlock_irqrestore(&chan->lock, flags);
313
314         dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
315                 __func__, cookie);
316
317         return cookie;
318 }
319
320 static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
321 {
322         struct ccp_dma_cmd *cmd;
323
324         cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
325         if (cmd)
326                 memset(cmd, 0, sizeof(*cmd));
327
328         return cmd;
329 }
330
331 static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
332                                                unsigned long flags)
333 {
334         struct ccp_dma_desc *desc;
335
336         desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
337         if (!desc)
338                 return NULL;
339
340         dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
341         desc->tx_desc.flags = flags;
342         desc->tx_desc.tx_submit = ccp_tx_submit;
343         desc->ccp = chan->ccp;
344         INIT_LIST_HEAD(&desc->entry);
345         INIT_LIST_HEAD(&desc->pending);
346         INIT_LIST_HEAD(&desc->active);
347         desc->status = DMA_IN_PROGRESS;
348
349         return desc;
350 }
351
352 static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
353                                             struct scatterlist *dst_sg,
354                                             unsigned int dst_nents,
355                                             struct scatterlist *src_sg,
356                                             unsigned int src_nents,
357                                             unsigned long flags)
358 {
359         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
360                                                  dma_chan);
361         struct ccp_device *ccp = chan->ccp;
362         struct ccp_dma_desc *desc;
363         struct ccp_dma_cmd *cmd;
364         struct ccp_cmd *ccp_cmd;
365         struct ccp_passthru_nomap_engine *ccp_pt;
366         unsigned int src_offset, src_len;
367         unsigned int dst_offset, dst_len;
368         unsigned int len;
369         unsigned long sflags;
370         size_t total_len;
371
372         if (!dst_sg || !src_sg)
373                 return NULL;
374
375         if (!dst_nents || !src_nents)
376                 return NULL;
377
378         desc = ccp_alloc_dma_desc(chan, flags);
379         if (!desc)
380                 return NULL;
381
382         total_len = 0;
383
384         src_len = sg_dma_len(src_sg);
385         src_offset = 0;
386
387         dst_len = sg_dma_len(dst_sg);
388         dst_offset = 0;
389
390         while (true) {
391                 if (!src_len) {
392                         src_nents--;
393                         if (!src_nents)
394                                 break;
395
396                         src_sg = sg_next(src_sg);
397                         if (!src_sg)
398                                 break;
399
400                         src_len = sg_dma_len(src_sg);
401                         src_offset = 0;
402                         continue;
403                 }
404
405                 if (!dst_len) {
406                         dst_nents--;
407                         if (!dst_nents)
408                                 break;
409
410                         dst_sg = sg_next(dst_sg);
411                         if (!dst_sg)
412                                 break;
413
414                         dst_len = sg_dma_len(dst_sg);
415                         dst_offset = 0;
416                         continue;
417                 }
418
419                 len = min(dst_len, src_len);
420
421                 cmd = ccp_alloc_dma_cmd(chan);
422                 if (!cmd)
423                         goto err;
424
425                 ccp_cmd = &cmd->ccp_cmd;
426                 ccp_cmd->ccp = chan->ccp;
427                 ccp_pt = &ccp_cmd->u.passthru_nomap;
428                 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
429                 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
430                 ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
431                 ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
432                 ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
433                 ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
434                 ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
435                 ccp_pt->src_len = len;
436                 ccp_pt->final = 1;
437                 ccp_cmd->callback = ccp_cmd_callback;
438                 ccp_cmd->data = desc;
439
440                 list_add_tail(&cmd->entry, &desc->pending);
441
442                 dev_dbg(ccp->dev,
443                         "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
444                         cmd, &ccp_pt->src_dma,
445                         &ccp_pt->dst_dma, ccp_pt->src_len);
446
447                 total_len += len;
448
449                 src_len -= len;
450                 src_offset += len;
451
452                 dst_len -= len;
453                 dst_offset += len;
454         }
455
456         desc->len = total_len;
457
458         if (list_empty(&desc->pending))
459                 goto err;
460
461         dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
462
463         spin_lock_irqsave(&chan->lock, sflags);
464
465         list_add_tail(&desc->entry, &chan->created);
466
467         spin_unlock_irqrestore(&chan->lock, sflags);
468
469         return desc;
470
471 err:
472         ccp_free_cmd_resources(ccp, &desc->pending);
473         kmem_cache_free(ccp->dma_desc_cache, desc);
474
475         return NULL;
476 }
477
478 static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
479         struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
480         unsigned long flags)
481 {
482         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
483                                                  dma_chan);
484         struct ccp_dma_desc *desc;
485         struct scatterlist dst_sg, src_sg;
486
487         dev_dbg(chan->ccp->dev,
488                 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
489                 __func__, &src, &dst, len, flags);
490
491         sg_init_table(&dst_sg, 1);
492         sg_dma_address(&dst_sg) = dst;
493         sg_dma_len(&dst_sg) = len;
494
495         sg_init_table(&src_sg, 1);
496         sg_dma_address(&src_sg) = src;
497         sg_dma_len(&src_sg) = len;
498
499         desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
500         if (!desc)
501                 return NULL;
502
503         return &desc->tx_desc;
504 }
505
506 static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
507         struct dma_chan *dma_chan, unsigned long flags)
508 {
509         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
510                                                  dma_chan);
511         struct ccp_dma_desc *desc;
512
513         desc = ccp_alloc_dma_desc(chan, flags);
514         if (!desc)
515                 return NULL;
516
517         return &desc->tx_desc;
518 }
519
520 static void ccp_issue_pending(struct dma_chan *dma_chan)
521 {
522         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
523                                                  dma_chan);
524         struct ccp_dma_desc *desc;
525         unsigned long flags;
526
527         dev_dbg(chan->ccp->dev, "%s\n", __func__);
528
529         spin_lock_irqsave(&chan->lock, flags);
530
531         desc = __ccp_pending_to_active(chan);
532
533         spin_unlock_irqrestore(&chan->lock, flags);
534
535         /* If there was nothing active, start processing */
536         if (desc)
537                 ccp_cmd_callback(desc, 0);
538 }
539
540 static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
541                                      dma_cookie_t cookie,
542                                      struct dma_tx_state *state)
543 {
544         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
545                                                  dma_chan);
546         struct ccp_dma_desc *desc;
547         enum dma_status ret;
548         unsigned long flags;
549
550         if (chan->status == DMA_PAUSED) {
551                 ret = DMA_PAUSED;
552                 goto out;
553         }
554
555         ret = dma_cookie_status(dma_chan, cookie, state);
556         if (ret == DMA_COMPLETE) {
557                 spin_lock_irqsave(&chan->lock, flags);
558
559                 /* Get status from complete chain, if still there */
560                 list_for_each_entry(desc, &chan->complete, entry) {
561                         if (desc->tx_desc.cookie != cookie)
562                                 continue;
563
564                         ret = desc->status;
565                         break;
566                 }
567
568                 spin_unlock_irqrestore(&chan->lock, flags);
569         }
570
571 out:
572         dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
573
574         return ret;
575 }
576
577 static int ccp_pause(struct dma_chan *dma_chan)
578 {
579         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
580                                                  dma_chan);
581
582         chan->status = DMA_PAUSED;
583
584         /*TODO: Wait for active DMA to complete before returning? */
585
586         return 0;
587 }
588
589 static int ccp_resume(struct dma_chan *dma_chan)
590 {
591         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
592                                                  dma_chan);
593         struct ccp_dma_desc *desc;
594         unsigned long flags;
595
596         spin_lock_irqsave(&chan->lock, flags);
597
598         desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
599                                         entry);
600
601         spin_unlock_irqrestore(&chan->lock, flags);
602
603         /* Indicate the channel is running again */
604         chan->status = DMA_IN_PROGRESS;
605
606         /* If there was something active, re-start */
607         if (desc)
608                 ccp_cmd_callback(desc, 0);
609
610         return 0;
611 }
612
613 static int ccp_terminate_all(struct dma_chan *dma_chan)
614 {
615         struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
616                                                  dma_chan);
617         unsigned long flags;
618
619         dev_dbg(chan->ccp->dev, "%s\n", __func__);
620
621         /*TODO: Wait for active DMA to complete before continuing */
622
623         spin_lock_irqsave(&chan->lock, flags);
624
625         /*TODO: Purge the complete list? */
626         ccp_free_desc_resources(chan->ccp, &chan->active);
627         ccp_free_desc_resources(chan->ccp, &chan->pending);
628         ccp_free_desc_resources(chan->ccp, &chan->created);
629
630         spin_unlock_irqrestore(&chan->lock, flags);
631
632         return 0;
633 }
634
635 static void ccp_dma_release(struct ccp_device *ccp)
636 {
637         struct ccp_dma_chan *chan;
638         struct dma_chan *dma_chan;
639         unsigned int i;
640
641         for (i = 0; i < ccp->cmd_q_count; i++) {
642                 chan = ccp->ccp_dma_chan + i;
643                 dma_chan = &chan->dma_chan;
644                 tasklet_kill(&chan->cleanup_tasklet);
645                 list_del_rcu(&dma_chan->device_node);
646         }
647 }
648
649 int ccp_dmaengine_register(struct ccp_device *ccp)
650 {
651         struct ccp_dma_chan *chan;
652         struct dma_device *dma_dev = &ccp->dma_dev;
653         struct dma_chan *dma_chan;
654         char *dma_cmd_cache_name;
655         char *dma_desc_cache_name;
656         unsigned int i;
657         int ret;
658
659         ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
660                                          sizeof(*(ccp->ccp_dma_chan)),
661                                          GFP_KERNEL);
662         if (!ccp->ccp_dma_chan)
663                 return -ENOMEM;
664
665         dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
666                                             "%s-dmaengine-cmd-cache",
667                                             ccp->name);
668         if (!dma_cmd_cache_name)
669                 return -ENOMEM;
670
671         ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
672                                                sizeof(struct ccp_dma_cmd),
673                                                sizeof(void *),
674                                                SLAB_HWCACHE_ALIGN, NULL);
675         if (!ccp->dma_cmd_cache)
676                 return -ENOMEM;
677
678         dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
679                                              "%s-dmaengine-desc-cache",
680                                              ccp->name);
681         if (!dma_desc_cache_name) {
682                 ret = -ENOMEM;
683                 goto err_cache;
684         }
685
686         ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
687                                                 sizeof(struct ccp_dma_desc),
688                                                 sizeof(void *),
689                                                 SLAB_HWCACHE_ALIGN, NULL);
690         if (!ccp->dma_desc_cache) {
691                 ret = -ENOMEM;
692                 goto err_cache;
693         }
694
695         dma_dev->dev = ccp->dev;
696         dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
697         dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
698         dma_dev->directions = DMA_MEM_TO_MEM;
699         dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
700         dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
701         dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
702
703         /* The DMA channels for this device can be set to public or private,
704          * and overridden by the module parameter dma_chan_attr.
705          * Default: according to the value in vdata (dma_chan_attr=0)
706          * dma_chan_attr=0x1: all channels private (override vdata)
707          * dma_chan_attr=0x2: all channels public (override vdata)
708          */
709         if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
710                 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
711
712         INIT_LIST_HEAD(&dma_dev->channels);
713         for (i = 0; i < ccp->cmd_q_count; i++) {
714                 chan = ccp->ccp_dma_chan + i;
715                 dma_chan = &chan->dma_chan;
716
717                 chan->ccp = ccp;
718
719                 spin_lock_init(&chan->lock);
720                 INIT_LIST_HEAD(&chan->created);
721                 INIT_LIST_HEAD(&chan->pending);
722                 INIT_LIST_HEAD(&chan->active);
723                 INIT_LIST_HEAD(&chan->complete);
724
725                 tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
726                              (unsigned long)chan);
727
728                 dma_chan->device = dma_dev;
729                 dma_cookie_init(dma_chan);
730
731                 list_add_tail(&dma_chan->device_node, &dma_dev->channels);
732         }
733
734         dma_dev->device_free_chan_resources = ccp_free_chan_resources;
735         dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
736         dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
737         dma_dev->device_issue_pending = ccp_issue_pending;
738         dma_dev->device_tx_status = ccp_tx_status;
739         dma_dev->device_pause = ccp_pause;
740         dma_dev->device_resume = ccp_resume;
741         dma_dev->device_terminate_all = ccp_terminate_all;
742
743         ret = dma_async_device_register(dma_dev);
744         if (ret)
745                 goto err_reg;
746
747         return 0;
748
749 err_reg:
750         ccp_dma_release(ccp);
751         kmem_cache_destroy(ccp->dma_desc_cache);
752
753 err_cache:
754         kmem_cache_destroy(ccp->dma_cmd_cache);
755
756         return ret;
757 }
758
759 void ccp_dmaengine_unregister(struct ccp_device *ccp)
760 {
761         struct dma_device *dma_dev = &ccp->dma_dev;
762
763         dma_async_device_unregister(dma_dev);
764         ccp_dma_release(ccp);
765
766         kmem_cache_destroy(ccp->dma_desc_cache);
767         kmem_cache_destroy(ccp->dma_cmd_cache);
768 }