GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / dma / dw / core.c
1 /*
2  * Core driver for the Synopsys DesignWare DMA Controller
3  *
4  * Copyright (C) 2007-2008 Atmel Corporation
5  * Copyright (C) 2010-2011 ST Microelectronics
6  * Copyright (C) 2013 Intel Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/bitops.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
26
27 #include "../dmaengine.h"
28 #include "internal.h"
29
30 /*
31  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32  * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33  * of which use ARM any more).  See the "Databook" from Synopsys for
34  * information beyond what licensees probably provide.
35  *
36  * The driver has been tested with the Atmel AT32AP7000, which does not
37  * support descriptor writeback.
38  */
39
40 #define DWC_DEFAULT_CTLLO(_chan) ({                             \
41                 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);       \
42                 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43                 bool _is_slave = is_slave_direction(_dwc->direction);   \
44                 u8 _smsize = _is_slave ? _sconfig->src_maxburst :       \
45                         DW_DMA_MSIZE_16;                        \
46                 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :       \
47                         DW_DMA_MSIZE_16;                        \
48                 u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ?         \
49                         _dwc->dws.p_master : _dwc->dws.m_master;        \
50                 u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ?         \
51                         _dwc->dws.p_master : _dwc->dws.m_master;        \
52                                                                 \
53                 (DWC_CTLL_DST_MSIZE(_dmsize)                    \
54                  | DWC_CTLL_SRC_MSIZE(_smsize)                  \
55                  | DWC_CTLL_LLP_D_EN                            \
56                  | DWC_CTLL_LLP_S_EN                            \
57                  | DWC_CTLL_DMS(_dms)                           \
58                  | DWC_CTLL_SMS(_sms));                         \
59         })
60
61 /* The set of bus widths supported by the DMA controller */
62 #define DW_DMA_BUSWIDTHS                          \
63         BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)       | \
64         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)          | \
65         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)         | \
66         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
67
68 /*----------------------------------------------------------------------*/
69
70 static struct device *chan2dev(struct dma_chan *chan)
71 {
72         return &chan->dev->device;
73 }
74
75 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
76 {
77         return to_dw_desc(dwc->active_list.next);
78 }
79
80 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
81 {
82         struct dw_desc          *desc = txd_to_dw_desc(tx);
83         struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
84         dma_cookie_t            cookie;
85         unsigned long           flags;
86
87         spin_lock_irqsave(&dwc->lock, flags);
88         cookie = dma_cookie_assign(tx);
89
90         /*
91          * REVISIT: We should attempt to chain as many descriptors as
92          * possible, perhaps even appending to those already submitted
93          * for DMA. But this is hard to do in a race-free manner.
94          */
95
96         list_add_tail(&desc->desc_node, &dwc->queue);
97         spin_unlock_irqrestore(&dwc->lock, flags);
98         dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
99                  __func__, desc->txd.cookie);
100
101         return cookie;
102 }
103
104 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
105 {
106         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
107         struct dw_desc *desc;
108         dma_addr_t phys;
109
110         desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
111         if (!desc)
112                 return NULL;
113
114         dwc->descs_allocated++;
115         INIT_LIST_HEAD(&desc->tx_list);
116         dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
117         desc->txd.tx_submit = dwc_tx_submit;
118         desc->txd.flags = DMA_CTRL_ACK;
119         desc->txd.phys = phys;
120         return desc;
121 }
122
123 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124 {
125         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
126         struct dw_desc *child, *_next;
127
128         if (unlikely(!desc))
129                 return;
130
131         list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
132                 list_del(&child->desc_node);
133                 dma_pool_free(dw->desc_pool, child, child->txd.phys);
134                 dwc->descs_allocated--;
135         }
136
137         dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
138         dwc->descs_allocated--;
139 }
140
141 static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc)
142 {
143         u32 cfghi = 0;
144         u32 cfglo = 0;
145
146         /* Set default burst alignment */
147         cfglo |= IDMA32C_CFGL_DST_BURST_ALIGN | IDMA32C_CFGL_SRC_BURST_ALIGN;
148
149         /* Low 4 bits of the request lines */
150         cfghi |= IDMA32C_CFGH_DST_PER(dwc->dws.dst_id & 0xf);
151         cfghi |= IDMA32C_CFGH_SRC_PER(dwc->dws.src_id & 0xf);
152
153         /* Request line extension (2 bits) */
154         cfghi |= IDMA32C_CFGH_DST_PER_EXT(dwc->dws.dst_id >> 4 & 0x3);
155         cfghi |= IDMA32C_CFGH_SRC_PER_EXT(dwc->dws.src_id >> 4 & 0x3);
156
157         channel_writel(dwc, CFG_LO, cfglo);
158         channel_writel(dwc, CFG_HI, cfghi);
159 }
160
161 static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc)
162 {
163         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
164         u32 cfghi = DWC_CFGH_FIFO_MODE;
165         u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
166         bool hs_polarity = dwc->dws.hs_polarity;
167
168         cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id);
169         cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id);
170         cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl);
171
172         /* Set polarity of handshake interface */
173         cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0;
174
175         channel_writel(dwc, CFG_LO, cfglo);
176         channel_writel(dwc, CFG_HI, cfghi);
177 }
178
179 static void dwc_initialize(struct dw_dma_chan *dwc)
180 {
181         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
182
183         if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
184                 return;
185
186         if (dw->pdata->is_idma32)
187                 dwc_initialize_chan_idma32(dwc);
188         else
189                 dwc_initialize_chan_dw(dwc);
190
191         /* Enable interrupts */
192         channel_set_bit(dw, MASK.XFER, dwc->mask);
193         channel_set_bit(dw, MASK.ERROR, dwc->mask);
194
195         set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
196 }
197
198 /*----------------------------------------------------------------------*/
199
200 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
201 {
202         dev_err(chan2dev(&dwc->chan),
203                 "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
204                 channel_readl(dwc, SAR),
205                 channel_readl(dwc, DAR),
206                 channel_readl(dwc, LLP),
207                 channel_readl(dwc, CTL_HI),
208                 channel_readl(dwc, CTL_LO));
209 }
210
211 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
212 {
213         channel_clear_bit(dw, CH_EN, dwc->mask);
214         while (dma_readl(dw, CH_EN) & dwc->mask)
215                 cpu_relax();
216 }
217
218 static u32 bytes2block(struct dw_dma_chan *dwc, size_t bytes,
219                           unsigned int width, size_t *len)
220 {
221         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
222         u32 block;
223
224         /* Always in bytes for iDMA 32-bit */
225         if (dw->pdata->is_idma32)
226                 width = 0;
227
228         if ((bytes >> width) > dwc->block_size) {
229                 block = dwc->block_size;
230                 *len = block << width;
231         } else {
232                 block = bytes >> width;
233                 *len = bytes;
234         }
235
236         return block;
237 }
238
239 static size_t block2bytes(struct dw_dma_chan *dwc, u32 block, u32 width)
240 {
241         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
242
243         if (dw->pdata->is_idma32)
244                 return IDMA32C_CTLH_BLOCK_TS(block);
245
246         return DWC_CTLH_BLOCK_TS(block) << width;
247 }
248
249 /*----------------------------------------------------------------------*/
250
251 /* Perform single block transfer */
252 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
253                                        struct dw_desc *desc)
254 {
255         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
256         u32             ctllo;
257
258         /*
259          * Software emulation of LLP mode relies on interrupts to continue
260          * multi block transfer.
261          */
262         ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
263
264         channel_writel(dwc, SAR, lli_read(desc, sar));
265         channel_writel(dwc, DAR, lli_read(desc, dar));
266         channel_writel(dwc, CTL_LO, ctllo);
267         channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
268         channel_set_bit(dw, CH_EN, dwc->mask);
269
270         /* Move pointer to next descriptor */
271         dwc->tx_node_active = dwc->tx_node_active->next;
272 }
273
274 /* Called with dwc->lock held and bh disabled */
275 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
276 {
277         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
278         u8              lms = DWC_LLP_LMS(dwc->dws.m_master);
279         unsigned long   was_soft_llp;
280
281         /* ASSERT:  channel is idle */
282         if (dma_readl(dw, CH_EN) & dwc->mask) {
283                 dev_err(chan2dev(&dwc->chan),
284                         "%s: BUG: Attempted to start non-idle channel\n",
285                         __func__);
286                 dwc_dump_chan_regs(dwc);
287
288                 /* The tasklet will hopefully advance the queue... */
289                 return;
290         }
291
292         if (dwc->nollp) {
293                 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
294                                                 &dwc->flags);
295                 if (was_soft_llp) {
296                         dev_err(chan2dev(&dwc->chan),
297                                 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
298                         return;
299                 }
300
301                 dwc_initialize(dwc);
302
303                 first->residue = first->total_len;
304                 dwc->tx_node_active = &first->tx_list;
305
306                 /* Submit first block */
307                 dwc_do_single_block(dwc, first);
308
309                 return;
310         }
311
312         dwc_initialize(dwc);
313
314         channel_writel(dwc, LLP, first->txd.phys | lms);
315         channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
316         channel_writel(dwc, CTL_HI, 0);
317         channel_set_bit(dw, CH_EN, dwc->mask);
318 }
319
320 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
321 {
322         struct dw_desc *desc;
323
324         if (list_empty(&dwc->queue))
325                 return;
326
327         list_move(dwc->queue.next, &dwc->active_list);
328         desc = dwc_first_active(dwc);
329         dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
330         dwc_dostart(dwc, desc);
331 }
332
333 /*----------------------------------------------------------------------*/
334
335 static void
336 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
337                 bool callback_required)
338 {
339         struct dma_async_tx_descriptor  *txd = &desc->txd;
340         struct dw_desc                  *child;
341         unsigned long                   flags;
342         struct dmaengine_desc_callback  cb;
343
344         dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
345
346         spin_lock_irqsave(&dwc->lock, flags);
347         dma_cookie_complete(txd);
348         if (callback_required)
349                 dmaengine_desc_get_callback(txd, &cb);
350         else
351                 memset(&cb, 0, sizeof(cb));
352
353         /* async_tx_ack */
354         list_for_each_entry(child, &desc->tx_list, desc_node)
355                 async_tx_ack(&child->txd);
356         async_tx_ack(&desc->txd);
357         dwc_desc_put(dwc, desc);
358         spin_unlock_irqrestore(&dwc->lock, flags);
359
360         dmaengine_desc_callback_invoke(&cb, NULL);
361 }
362
363 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
364 {
365         struct dw_desc *desc, *_desc;
366         LIST_HEAD(list);
367         unsigned long flags;
368
369         spin_lock_irqsave(&dwc->lock, flags);
370         if (dma_readl(dw, CH_EN) & dwc->mask) {
371                 dev_err(chan2dev(&dwc->chan),
372                         "BUG: XFER bit set, but channel not idle!\n");
373
374                 /* Try to continue after resetting the channel... */
375                 dwc_chan_disable(dw, dwc);
376         }
377
378         /*
379          * Submit queued descriptors ASAP, i.e. before we go through
380          * the completed ones.
381          */
382         list_splice_init(&dwc->active_list, &list);
383         dwc_dostart_first_queued(dwc);
384
385         spin_unlock_irqrestore(&dwc->lock, flags);
386
387         list_for_each_entry_safe(desc, _desc, &list, desc_node)
388                 dwc_descriptor_complete(dwc, desc, true);
389 }
390
391 /* Returns how many bytes were already received from source */
392 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
393 {
394         u32 ctlhi = channel_readl(dwc, CTL_HI);
395         u32 ctllo = channel_readl(dwc, CTL_LO);
396
397         return block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
398 }
399
400 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
401 {
402         dma_addr_t llp;
403         struct dw_desc *desc, *_desc;
404         struct dw_desc *child;
405         u32 status_xfer;
406         unsigned long flags;
407
408         spin_lock_irqsave(&dwc->lock, flags);
409         llp = channel_readl(dwc, LLP);
410         status_xfer = dma_readl(dw, RAW.XFER);
411
412         if (status_xfer & dwc->mask) {
413                 /* Everything we've submitted is done */
414                 dma_writel(dw, CLEAR.XFER, dwc->mask);
415
416                 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
417                         struct list_head *head, *active = dwc->tx_node_active;
418
419                         /*
420                          * We are inside first active descriptor.
421                          * Otherwise something is really wrong.
422                          */
423                         desc = dwc_first_active(dwc);
424
425                         head = &desc->tx_list;
426                         if (active != head) {
427                                 /* Update residue to reflect last sent descriptor */
428                                 if (active == head->next)
429                                         desc->residue -= desc->len;
430                                 else
431                                         desc->residue -= to_dw_desc(active->prev)->len;
432
433                                 child = to_dw_desc(active);
434
435                                 /* Submit next block */
436                                 dwc_do_single_block(dwc, child);
437
438                                 spin_unlock_irqrestore(&dwc->lock, flags);
439                                 return;
440                         }
441
442                         /* We are done here */
443                         clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
444                 }
445
446                 spin_unlock_irqrestore(&dwc->lock, flags);
447
448                 dwc_complete_all(dw, dwc);
449                 return;
450         }
451
452         if (list_empty(&dwc->active_list)) {
453                 spin_unlock_irqrestore(&dwc->lock, flags);
454                 return;
455         }
456
457         if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
458                 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
459                 spin_unlock_irqrestore(&dwc->lock, flags);
460                 return;
461         }
462
463         dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
464
465         list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
466                 /* Initial residue value */
467                 desc->residue = desc->total_len;
468
469                 /* Check first descriptors addr */
470                 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
471                         spin_unlock_irqrestore(&dwc->lock, flags);
472                         return;
473                 }
474
475                 /* Check first descriptors llp */
476                 if (lli_read(desc, llp) == llp) {
477                         /* This one is currently in progress */
478                         desc->residue -= dwc_get_sent(dwc);
479                         spin_unlock_irqrestore(&dwc->lock, flags);
480                         return;
481                 }
482
483                 desc->residue -= desc->len;
484                 list_for_each_entry(child, &desc->tx_list, desc_node) {
485                         if (lli_read(child, llp) == llp) {
486                                 /* Currently in progress */
487                                 desc->residue -= dwc_get_sent(dwc);
488                                 spin_unlock_irqrestore(&dwc->lock, flags);
489                                 return;
490                         }
491                         desc->residue -= child->len;
492                 }
493
494                 /*
495                  * No descriptors so far seem to be in progress, i.e.
496                  * this one must be done.
497                  */
498                 spin_unlock_irqrestore(&dwc->lock, flags);
499                 dwc_descriptor_complete(dwc, desc, true);
500                 spin_lock_irqsave(&dwc->lock, flags);
501         }
502
503         dev_err(chan2dev(&dwc->chan),
504                 "BUG: All descriptors done, but channel not idle!\n");
505
506         /* Try to continue after resetting the channel... */
507         dwc_chan_disable(dw, dwc);
508
509         dwc_dostart_first_queued(dwc);
510         spin_unlock_irqrestore(&dwc->lock, flags);
511 }
512
513 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
514 {
515         dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
516                  lli_read(desc, sar),
517                  lli_read(desc, dar),
518                  lli_read(desc, llp),
519                  lli_read(desc, ctlhi),
520                  lli_read(desc, ctllo));
521 }
522
523 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
524 {
525         struct dw_desc *bad_desc;
526         struct dw_desc *child;
527         unsigned long flags;
528
529         dwc_scan_descriptors(dw, dwc);
530
531         spin_lock_irqsave(&dwc->lock, flags);
532
533         /*
534          * The descriptor currently at the head of the active list is
535          * borked. Since we don't have any way to report errors, we'll
536          * just have to scream loudly and try to carry on.
537          */
538         bad_desc = dwc_first_active(dwc);
539         list_del_init(&bad_desc->desc_node);
540         list_move(dwc->queue.next, dwc->active_list.prev);
541
542         /* Clear the error flag and try to restart the controller */
543         dma_writel(dw, CLEAR.ERROR, dwc->mask);
544         if (!list_empty(&dwc->active_list))
545                 dwc_dostart(dwc, dwc_first_active(dwc));
546
547         /*
548          * WARN may seem harsh, but since this only happens
549          * when someone submits a bad physical address in a
550          * descriptor, we should consider ourselves lucky that the
551          * controller flagged an error instead of scribbling over
552          * random memory locations.
553          */
554         dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
555                                        "  cookie: %d\n", bad_desc->txd.cookie);
556         dwc_dump_lli(dwc, bad_desc);
557         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
558                 dwc_dump_lli(dwc, child);
559
560         spin_unlock_irqrestore(&dwc->lock, flags);
561
562         /* Pretend the descriptor completed successfully */
563         dwc_descriptor_complete(dwc, bad_desc, true);
564 }
565
566 static void dw_dma_tasklet(unsigned long data)
567 {
568         struct dw_dma *dw = (struct dw_dma *)data;
569         struct dw_dma_chan *dwc;
570         u32 status_xfer;
571         u32 status_err;
572         unsigned int i;
573
574         status_xfer = dma_readl(dw, RAW.XFER);
575         status_err = dma_readl(dw, RAW.ERROR);
576
577         dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
578
579         for (i = 0; i < dw->dma.chancnt; i++) {
580                 dwc = &dw->chan[i];
581                 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
582                         dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
583                 else if (status_err & (1 << i))
584                         dwc_handle_error(dw, dwc);
585                 else if (status_xfer & (1 << i))
586                         dwc_scan_descriptors(dw, dwc);
587         }
588
589         /* Re-enable interrupts */
590         channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
591         channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
592 }
593
594 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
595 {
596         struct dw_dma *dw = dev_id;
597         u32 status;
598
599         /* Check if we have any interrupt from the DMAC which is not in use */
600         if (!dw->in_use)
601                 return IRQ_NONE;
602
603         status = dma_readl(dw, STATUS_INT);
604         dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
605
606         /* Check if we have any interrupt from the DMAC */
607         if (!status)
608                 return IRQ_NONE;
609
610         /*
611          * Just disable the interrupts. We'll turn them back on in the
612          * softirq handler.
613          */
614         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
615         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
616         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
617
618         status = dma_readl(dw, STATUS_INT);
619         if (status) {
620                 dev_err(dw->dma.dev,
621                         "BUG: Unexpected interrupts pending: 0x%x\n",
622                         status);
623
624                 /* Try to recover */
625                 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
626                 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
627                 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
628                 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
629                 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
630         }
631
632         tasklet_schedule(&dw->tasklet);
633
634         return IRQ_HANDLED;
635 }
636
637 /*----------------------------------------------------------------------*/
638
639 static struct dma_async_tx_descriptor *
640 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
641                 size_t len, unsigned long flags)
642 {
643         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
644         struct dw_dma           *dw = to_dw_dma(chan->device);
645         struct dw_desc          *desc;
646         struct dw_desc          *first;
647         struct dw_desc          *prev;
648         size_t                  xfer_count;
649         size_t                  offset;
650         u8                      m_master = dwc->dws.m_master;
651         unsigned int            src_width;
652         unsigned int            dst_width;
653         unsigned int            data_width = dw->pdata->data_width[m_master];
654         u32                     ctllo;
655         u8                      lms = DWC_LLP_LMS(m_master);
656
657         dev_vdbg(chan2dev(chan),
658                         "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
659                         &dest, &src, len, flags);
660
661         if (unlikely(!len)) {
662                 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
663                 return NULL;
664         }
665
666         dwc->direction = DMA_MEM_TO_MEM;
667
668         src_width = dst_width = __ffs(data_width | src | dest | len);
669
670         ctllo = DWC_DEFAULT_CTLLO(chan)
671                         | DWC_CTLL_DST_WIDTH(dst_width)
672                         | DWC_CTLL_SRC_WIDTH(src_width)
673                         | DWC_CTLL_DST_INC
674                         | DWC_CTLL_SRC_INC
675                         | DWC_CTLL_FC_M2M;
676         prev = first = NULL;
677
678         for (offset = 0; offset < len; offset += xfer_count) {
679                 desc = dwc_desc_get(dwc);
680                 if (!desc)
681                         goto err_desc_get;
682
683                 lli_write(desc, sar, src + offset);
684                 lli_write(desc, dar, dest + offset);
685                 lli_write(desc, ctllo, ctllo);
686                 lli_write(desc, ctlhi, bytes2block(dwc, len - offset, src_width, &xfer_count));
687                 desc->len = xfer_count;
688
689                 if (!first) {
690                         first = desc;
691                 } else {
692                         lli_write(prev, llp, desc->txd.phys | lms);
693                         list_add_tail(&desc->desc_node, &first->tx_list);
694                 }
695                 prev = desc;
696         }
697
698         if (flags & DMA_PREP_INTERRUPT)
699                 /* Trigger interrupt after last block */
700                 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
701
702         prev->lli.llp = 0;
703         lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
704         first->txd.flags = flags;
705         first->total_len = len;
706
707         return &first->txd;
708
709 err_desc_get:
710         dwc_desc_put(dwc, first);
711         return NULL;
712 }
713
714 static struct dma_async_tx_descriptor *
715 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
716                 unsigned int sg_len, enum dma_transfer_direction direction,
717                 unsigned long flags, void *context)
718 {
719         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
720         struct dw_dma           *dw = to_dw_dma(chan->device);
721         struct dma_slave_config *sconfig = &dwc->dma_sconfig;
722         struct dw_desc          *prev;
723         struct dw_desc          *first;
724         u32                     ctllo;
725         u8                      m_master = dwc->dws.m_master;
726         u8                      lms = DWC_LLP_LMS(m_master);
727         dma_addr_t              reg;
728         unsigned int            reg_width;
729         unsigned int            mem_width;
730         unsigned int            data_width = dw->pdata->data_width[m_master];
731         unsigned int            i;
732         struct scatterlist      *sg;
733         size_t                  total_len = 0;
734
735         dev_vdbg(chan2dev(chan), "%s\n", __func__);
736
737         if (unlikely(!is_slave_direction(direction) || !sg_len))
738                 return NULL;
739
740         dwc->direction = direction;
741
742         prev = first = NULL;
743
744         switch (direction) {
745         case DMA_MEM_TO_DEV:
746                 reg_width = __ffs(sconfig->dst_addr_width);
747                 reg = sconfig->dst_addr;
748                 ctllo = (DWC_DEFAULT_CTLLO(chan)
749                                 | DWC_CTLL_DST_WIDTH(reg_width)
750                                 | DWC_CTLL_DST_FIX
751                                 | DWC_CTLL_SRC_INC);
752
753                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
754                         DWC_CTLL_FC(DW_DMA_FC_D_M2P);
755
756                 for_each_sg(sgl, sg, sg_len, i) {
757                         struct dw_desc  *desc;
758                         u32             len, mem;
759                         size_t          dlen;
760
761                         mem = sg_dma_address(sg);
762                         len = sg_dma_len(sg);
763
764                         mem_width = __ffs(data_width | mem | len);
765
766 slave_sg_todev_fill_desc:
767                         desc = dwc_desc_get(dwc);
768                         if (!desc)
769                                 goto err_desc_get;
770
771                         lli_write(desc, sar, mem);
772                         lli_write(desc, dar, reg);
773                         lli_write(desc, ctlhi, bytes2block(dwc, len, mem_width, &dlen));
774                         lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
775                         desc->len = dlen;
776
777                         if (!first) {
778                                 first = desc;
779                         } else {
780                                 lli_write(prev, llp, desc->txd.phys | lms);
781                                 list_add_tail(&desc->desc_node, &first->tx_list);
782                         }
783                         prev = desc;
784
785                         mem += dlen;
786                         len -= dlen;
787                         total_len += dlen;
788
789                         if (len)
790                                 goto slave_sg_todev_fill_desc;
791                 }
792                 break;
793         case DMA_DEV_TO_MEM:
794                 reg_width = __ffs(sconfig->src_addr_width);
795                 reg = sconfig->src_addr;
796                 ctllo = (DWC_DEFAULT_CTLLO(chan)
797                                 | DWC_CTLL_SRC_WIDTH(reg_width)
798                                 | DWC_CTLL_DST_INC
799                                 | DWC_CTLL_SRC_FIX);
800
801                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
802                         DWC_CTLL_FC(DW_DMA_FC_D_P2M);
803
804                 for_each_sg(sgl, sg, sg_len, i) {
805                         struct dw_desc  *desc;
806                         u32             len, mem;
807                         size_t          dlen;
808
809                         mem = sg_dma_address(sg);
810                         len = sg_dma_len(sg);
811
812 slave_sg_fromdev_fill_desc:
813                         desc = dwc_desc_get(dwc);
814                         if (!desc)
815                                 goto err_desc_get;
816
817                         lli_write(desc, sar, reg);
818                         lli_write(desc, dar, mem);
819                         lli_write(desc, ctlhi, bytes2block(dwc, len, reg_width, &dlen));
820                         mem_width = __ffs(data_width | mem | dlen);
821                         lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
822                         desc->len = dlen;
823
824                         if (!first) {
825                                 first = desc;
826                         } else {
827                                 lli_write(prev, llp, desc->txd.phys | lms);
828                                 list_add_tail(&desc->desc_node, &first->tx_list);
829                         }
830                         prev = desc;
831
832                         mem += dlen;
833                         len -= dlen;
834                         total_len += dlen;
835
836                         if (len)
837                                 goto slave_sg_fromdev_fill_desc;
838                 }
839                 break;
840         default:
841                 return NULL;
842         }
843
844         if (flags & DMA_PREP_INTERRUPT)
845                 /* Trigger interrupt after last block */
846                 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
847
848         prev->lli.llp = 0;
849         lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
850         first->total_len = total_len;
851
852         return &first->txd;
853
854 err_desc_get:
855         dev_err(chan2dev(chan),
856                 "not enough descriptors available. Direction %d\n", direction);
857         dwc_desc_put(dwc, first);
858         return NULL;
859 }
860
861 bool dw_dma_filter(struct dma_chan *chan, void *param)
862 {
863         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
864         struct dw_dma_slave *dws = param;
865
866         if (dws->dma_dev != chan->device->dev)
867                 return false;
868
869         /* We have to copy data since dws can be temporary storage */
870         memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
871
872         return true;
873 }
874 EXPORT_SYMBOL_GPL(dw_dma_filter);
875
876 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
877 {
878         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
879         struct dma_slave_config *sc = &dwc->dma_sconfig;
880         struct dw_dma *dw = to_dw_dma(chan->device);
881         /*
882          * Fix sconfig's burst size according to dw_dmac. We need to convert
883          * them as:
884          * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
885          *
886          * NOTE: burst size 2 is not supported by DesignWare controller.
887          *       iDMA 32-bit supports it.
888          */
889         u32 s = dw->pdata->is_idma32 ? 1 : 2;
890
891         /* Check if chan will be configured for slave transfers */
892         if (!is_slave_direction(sconfig->direction))
893                 return -EINVAL;
894
895         memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
896         dwc->direction = sconfig->direction;
897
898         sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
899         sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
900
901         return 0;
902 }
903
904 static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
905 {
906         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
907         unsigned int            count = 20;     /* timeout iterations */
908         u32                     cfglo;
909
910         cfglo = channel_readl(dwc, CFG_LO);
911         if (dw->pdata->is_idma32) {
912                 if (drain)
913                         cfglo |= IDMA32C_CFGL_CH_DRAIN;
914                 else
915                         cfglo &= ~IDMA32C_CFGL_CH_DRAIN;
916         }
917         channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
918         while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
919                 udelay(2);
920
921         set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
922 }
923
924 static int dwc_pause(struct dma_chan *chan)
925 {
926         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
927         unsigned long           flags;
928
929         spin_lock_irqsave(&dwc->lock, flags);
930         dwc_chan_pause(dwc, false);
931         spin_unlock_irqrestore(&dwc->lock, flags);
932
933         return 0;
934 }
935
936 static inline void dwc_chan_resume(struct dw_dma_chan *dwc)
937 {
938         u32 cfglo = channel_readl(dwc, CFG_LO);
939
940         channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
941
942         clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
943 }
944
945 static int dwc_resume(struct dma_chan *chan)
946 {
947         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
948         unsigned long           flags;
949
950         spin_lock_irqsave(&dwc->lock, flags);
951
952         if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
953                 dwc_chan_resume(dwc);
954
955         spin_unlock_irqrestore(&dwc->lock, flags);
956
957         return 0;
958 }
959
960 static int dwc_terminate_all(struct dma_chan *chan)
961 {
962         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
963         struct dw_dma           *dw = to_dw_dma(chan->device);
964         struct dw_desc          *desc, *_desc;
965         unsigned long           flags;
966         LIST_HEAD(list);
967
968         spin_lock_irqsave(&dwc->lock, flags);
969
970         clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
971
972         dwc_chan_pause(dwc, true);
973
974         dwc_chan_disable(dw, dwc);
975
976         dwc_chan_resume(dwc);
977
978         /* active_list entries will end up before queued entries */
979         list_splice_init(&dwc->queue, &list);
980         list_splice_init(&dwc->active_list, &list);
981
982         spin_unlock_irqrestore(&dwc->lock, flags);
983
984         /* Flush all pending and queued descriptors */
985         list_for_each_entry_safe(desc, _desc, &list, desc_node)
986                 dwc_descriptor_complete(dwc, desc, false);
987
988         return 0;
989 }
990
991 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
992 {
993         struct dw_desc *desc;
994
995         list_for_each_entry(desc, &dwc->active_list, desc_node)
996                 if (desc->txd.cookie == c)
997                         return desc;
998
999         return NULL;
1000 }
1001
1002 static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
1003 {
1004         struct dw_desc *desc;
1005         unsigned long flags;
1006         u32 residue;
1007
1008         spin_lock_irqsave(&dwc->lock, flags);
1009
1010         desc = dwc_find_desc(dwc, cookie);
1011         if (desc) {
1012                 if (desc == dwc_first_active(dwc)) {
1013                         residue = desc->residue;
1014                         if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
1015                                 residue -= dwc_get_sent(dwc);
1016                 } else {
1017                         residue = desc->total_len;
1018                 }
1019         } else {
1020                 residue = 0;
1021         }
1022
1023         spin_unlock_irqrestore(&dwc->lock, flags);
1024         return residue;
1025 }
1026
1027 static enum dma_status
1028 dwc_tx_status(struct dma_chan *chan,
1029               dma_cookie_t cookie,
1030               struct dma_tx_state *txstate)
1031 {
1032         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1033         enum dma_status         ret;
1034
1035         ret = dma_cookie_status(chan, cookie, txstate);
1036         if (ret == DMA_COMPLETE)
1037                 return ret;
1038
1039         dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1040
1041         ret = dma_cookie_status(chan, cookie, txstate);
1042         if (ret == DMA_COMPLETE)
1043                 return ret;
1044
1045         dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
1046
1047         if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
1048                 return DMA_PAUSED;
1049
1050         return ret;
1051 }
1052
1053 static void dwc_issue_pending(struct dma_chan *chan)
1054 {
1055         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1056         unsigned long           flags;
1057
1058         spin_lock_irqsave(&dwc->lock, flags);
1059         if (list_empty(&dwc->active_list))
1060                 dwc_dostart_first_queued(dwc);
1061         spin_unlock_irqrestore(&dwc->lock, flags);
1062 }
1063
1064 /*----------------------------------------------------------------------*/
1065
1066 /*
1067  * Program FIFO size of channels.
1068  *
1069  * By default full FIFO (512 bytes) is assigned to channel 0. Here we
1070  * slice FIFO on equal parts between channels.
1071  */
1072 static void idma32_fifo_partition(struct dw_dma *dw)
1073 {
1074         u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
1075                     IDMA32C_FP_UPDATE;
1076         u64 fifo_partition = 0;
1077
1078         if (!dw->pdata->is_idma32)
1079                 return;
1080
1081         /* Fill FIFO_PARTITION low bits (Channels 0..1, 4..5) */
1082         fifo_partition |= value << 0;
1083
1084         /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
1085         fifo_partition |= value << 32;
1086
1087         /* Program FIFO Partition registers - 64 bytes per channel */
1088         idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
1089         idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
1090 }
1091
1092 static void dw_dma_off(struct dw_dma *dw)
1093 {
1094         unsigned int i;
1095
1096         dma_writel(dw, CFG, 0);
1097
1098         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1099         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
1100         channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1101         channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1102         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1103
1104         while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1105                 cpu_relax();
1106
1107         for (i = 0; i < dw->dma.chancnt; i++)
1108                 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
1109 }
1110
1111 static void dw_dma_on(struct dw_dma *dw)
1112 {
1113         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1114 }
1115
1116 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1117 {
1118         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1119         struct dw_dma           *dw = to_dw_dma(chan->device);
1120
1121         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1122
1123         /* ASSERT:  channel is idle */
1124         if (dma_readl(dw, CH_EN) & dwc->mask) {
1125                 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1126                 return -EIO;
1127         }
1128
1129         dma_cookie_init(chan);
1130
1131         /*
1132          * NOTE: some controllers may have additional features that we
1133          * need to initialize here, like "scatter-gather" (which
1134          * doesn't mean what you think it means), and status writeback.
1135          */
1136
1137         /*
1138          * We need controller-specific data to set up slave transfers.
1139          */
1140         if (chan->private && !dw_dma_filter(chan, chan->private)) {
1141                 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1142                 return -EINVAL;
1143         }
1144
1145         /* Enable controller here if needed */
1146         if (!dw->in_use)
1147                 dw_dma_on(dw);
1148         dw->in_use |= dwc->mask;
1149
1150         return 0;
1151 }
1152
1153 static void dwc_free_chan_resources(struct dma_chan *chan)
1154 {
1155         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1156         struct dw_dma           *dw = to_dw_dma(chan->device);
1157         unsigned long           flags;
1158         LIST_HEAD(list);
1159
1160         dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1161                         dwc->descs_allocated);
1162
1163         /* ASSERT:  channel is idle */
1164         BUG_ON(!list_empty(&dwc->active_list));
1165         BUG_ON(!list_empty(&dwc->queue));
1166         BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1167
1168         spin_lock_irqsave(&dwc->lock, flags);
1169
1170         /* Clear custom channel configuration */
1171         memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1172
1173         clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
1174
1175         /* Disable interrupts */
1176         channel_clear_bit(dw, MASK.XFER, dwc->mask);
1177         channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1178         channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1179
1180         spin_unlock_irqrestore(&dwc->lock, flags);
1181
1182         /* Disable controller in case it was a last user */
1183         dw->in_use &= ~dwc->mask;
1184         if (!dw->in_use)
1185                 dw_dma_off(dw);
1186
1187         dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1188 }
1189
1190 int dw_dma_probe(struct dw_dma_chip *chip)
1191 {
1192         struct dw_dma_platform_data *pdata;
1193         struct dw_dma           *dw;
1194         bool                    autocfg = false;
1195         unsigned int            dw_params;
1196         unsigned int            i;
1197         int                     err;
1198
1199         dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
1200         if (!dw)
1201                 return -ENOMEM;
1202
1203         dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1204         if (!dw->pdata)
1205                 return -ENOMEM;
1206
1207         dw->regs = chip->regs;
1208         chip->dw = dw;
1209
1210         pm_runtime_get_sync(chip->dev);
1211
1212         if (!chip->pdata) {
1213                 dw_params = dma_readl(dw, DW_PARAMS);
1214                 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1215
1216                 autocfg = dw_params >> DW_PARAMS_EN & 1;
1217                 if (!autocfg) {
1218                         err = -EINVAL;
1219                         goto err_pdata;
1220                 }
1221
1222                 /* Reassign the platform data pointer */
1223                 pdata = dw->pdata;
1224
1225                 /* Get hardware configuration parameters */
1226                 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1227                 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1228                 for (i = 0; i < pdata->nr_masters; i++) {
1229                         pdata->data_width[i] =
1230                                 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1231                 }
1232                 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1233
1234                 /* Fill platform data with the default values */
1235                 pdata->is_private = true;
1236                 pdata->is_memcpy = true;
1237                 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1238                 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1239         } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1240                 err = -EINVAL;
1241                 goto err_pdata;
1242         } else {
1243                 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1244
1245                 /* Reassign the platform data pointer */
1246                 pdata = dw->pdata;
1247         }
1248
1249         dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1250                                 GFP_KERNEL);
1251         if (!dw->chan) {
1252                 err = -ENOMEM;
1253                 goto err_pdata;
1254         }
1255
1256         /* Calculate all channel mask before DMA setup */
1257         dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1258
1259         /* Force dma off, just in case */
1260         dw_dma_off(dw);
1261
1262         idma32_fifo_partition(dw);
1263
1264         /* Device and instance ID for IRQ and DMA pool */
1265         if (pdata->is_idma32)
1266                 snprintf(dw->name, sizeof(dw->name), "idma32:dmac%d", chip->id);
1267         else
1268                 snprintf(dw->name, sizeof(dw->name), "dw:dmac%d", chip->id);
1269
1270         /* Create a pool of consistent memory blocks for hardware descriptors */
1271         dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
1272                                          sizeof(struct dw_desc), 4, 0);
1273         if (!dw->desc_pool) {
1274                 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1275                 err = -ENOMEM;
1276                 goto err_pdata;
1277         }
1278
1279         tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1280
1281         err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1282                           dw->name, dw);
1283         if (err)
1284                 goto err_pdata;
1285
1286         INIT_LIST_HEAD(&dw->dma.channels);
1287         for (i = 0; i < pdata->nr_channels; i++) {
1288                 struct dw_dma_chan      *dwc = &dw->chan[i];
1289
1290                 dwc->chan.device = &dw->dma;
1291                 dma_cookie_init(&dwc->chan);
1292                 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1293                         list_add_tail(&dwc->chan.device_node,
1294                                         &dw->dma.channels);
1295                 else
1296                         list_add(&dwc->chan.device_node, &dw->dma.channels);
1297
1298                 /* 7 is highest priority & 0 is lowest. */
1299                 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1300                         dwc->priority = pdata->nr_channels - i - 1;
1301                 else
1302                         dwc->priority = i;
1303
1304                 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1305                 spin_lock_init(&dwc->lock);
1306                 dwc->mask = 1 << i;
1307
1308                 INIT_LIST_HEAD(&dwc->active_list);
1309                 INIT_LIST_HEAD(&dwc->queue);
1310
1311                 channel_clear_bit(dw, CH_EN, dwc->mask);
1312
1313                 dwc->direction = DMA_TRANS_NONE;
1314
1315                 /* Hardware configuration */
1316                 if (autocfg) {
1317                         unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1318                         void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1319                         unsigned int dwc_params = readl(addr);
1320
1321                         dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1322                                            dwc_params);
1323
1324                         /*
1325                          * Decode maximum block size for given channel. The
1326                          * stored 4 bit value represents blocks from 0x00 for 3
1327                          * up to 0x0a for 4095.
1328                          */
1329                         dwc->block_size =
1330                                 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1331                         dwc->nollp =
1332                                 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1333                 } else {
1334                         dwc->block_size = pdata->block_size;
1335                         dwc->nollp = !pdata->multi_block[i];
1336                 }
1337         }
1338
1339         /* Clear all interrupts on all channels. */
1340         dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1341         dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1342         dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1343         dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1344         dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1345
1346         /* Set capabilities */
1347         dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1348         if (pdata->is_private)
1349                 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1350         if (pdata->is_memcpy)
1351                 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1352
1353         dw->dma.dev = chip->dev;
1354         dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1355         dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1356
1357         dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1358         dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1359
1360         dw->dma.device_config = dwc_config;
1361         dw->dma.device_pause = dwc_pause;
1362         dw->dma.device_resume = dwc_resume;
1363         dw->dma.device_terminate_all = dwc_terminate_all;
1364
1365         dw->dma.device_tx_status = dwc_tx_status;
1366         dw->dma.device_issue_pending = dwc_issue_pending;
1367
1368         /* DMA capabilities */
1369         dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1370         dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1371         dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1372                              BIT(DMA_MEM_TO_MEM);
1373         dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1374
1375         err = dma_async_device_register(&dw->dma);
1376         if (err)
1377                 goto err_dma_register;
1378
1379         dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1380                  pdata->nr_channels);
1381
1382         pm_runtime_put_sync_suspend(chip->dev);
1383
1384         return 0;
1385
1386 err_dma_register:
1387         free_irq(chip->irq, dw);
1388 err_pdata:
1389         pm_runtime_put_sync_suspend(chip->dev);
1390         return err;
1391 }
1392 EXPORT_SYMBOL_GPL(dw_dma_probe);
1393
1394 int dw_dma_remove(struct dw_dma_chip *chip)
1395 {
1396         struct dw_dma           *dw = chip->dw;
1397         struct dw_dma_chan      *dwc, *_dwc;
1398
1399         pm_runtime_get_sync(chip->dev);
1400
1401         dw_dma_off(dw);
1402         dma_async_device_unregister(&dw->dma);
1403
1404         free_irq(chip->irq, dw);
1405         tasklet_kill(&dw->tasklet);
1406
1407         list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1408                         chan.device_node) {
1409                 list_del(&dwc->chan.device_node);
1410                 channel_clear_bit(dw, CH_EN, dwc->mask);
1411         }
1412
1413         pm_runtime_put_sync_suspend(chip->dev);
1414         return 0;
1415 }
1416 EXPORT_SYMBOL_GPL(dw_dma_remove);
1417
1418 int dw_dma_disable(struct dw_dma_chip *chip)
1419 {
1420         struct dw_dma *dw = chip->dw;
1421
1422         dw_dma_off(dw);
1423         return 0;
1424 }
1425 EXPORT_SYMBOL_GPL(dw_dma_disable);
1426
1427 int dw_dma_enable(struct dw_dma_chip *chip)
1428 {
1429         struct dw_dma *dw = chip->dw;
1430
1431         idma32_fifo_partition(dw);
1432
1433         dw_dma_on(dw);
1434         return 0;
1435 }
1436 EXPORT_SYMBOL_GPL(dw_dma_enable);
1437
1438 MODULE_LICENSE("GPL v2");
1439 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1440 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1441 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");