GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / usb / musb / musb_cppi41.c
1 #include <linux/device.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/dmaengine.h>
4 #include <linux/sizes.h>
5 #include <linux/platform_device.h>
6 #include <linux/of.h>
7
8 #include "cppi_dma.h"
9 #include "musb_core.h"
10 #include "musb_trace.h"
11
12 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
13
14 #define EP_MODE_AUTOREQ_NONE            0
15 #define EP_MODE_AUTOREQ_ALL_NEOP        1
16 #define EP_MODE_AUTOREQ_ALWAYS          3
17
18 #define EP_MODE_DMA_TRANSPARENT         0
19 #define EP_MODE_DMA_RNDIS               1
20 #define EP_MODE_DMA_GEN_RNDIS           3
21
22 #define USB_CTRL_TX_MODE        0x70
23 #define USB_CTRL_RX_MODE        0x74
24 #define USB_CTRL_AUTOREQ        0xd0
25 #define USB_TDOWN               0xd8
26
27 #define MUSB_DMA_NUM_CHANNELS 15
28
29 #define DA8XX_USB_MODE          0x10
30 #define DA8XX_USB_AUTOREQ       0x14
31 #define DA8XX_USB_TEARDOWN      0x1c
32
33 #define DA8XX_DMA_NUM_CHANNELS 4
34
35 struct cppi41_dma_controller {
36         struct dma_controller controller;
37         struct cppi41_dma_channel *rx_channel;
38         struct cppi41_dma_channel *tx_channel;
39         struct hrtimer early_tx;
40         struct list_head early_tx_list;
41         u32 rx_mode;
42         u32 tx_mode;
43         u32 auto_req;
44
45         u32 tdown_reg;
46         u32 autoreq_reg;
47
48         void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
49                              unsigned int mode);
50         u8 num_channels;
51 };
52
53 static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
54 {
55         u16 csr;
56         u8 toggle;
57
58         if (cppi41_channel->is_tx)
59                 return;
60         if (!is_host_active(cppi41_channel->controller->controller.musb))
61                 return;
62
63         csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
64         toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
65
66         cppi41_channel->usb_toggle = toggle;
67 }
68
69 static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
70 {
71         struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
72         struct musb *musb = hw_ep->musb;
73         u16 csr;
74         u8 toggle;
75
76         if (cppi41_channel->is_tx)
77                 return;
78         if (!is_host_active(musb))
79                 return;
80
81         musb_ep_select(musb->mregs, hw_ep->epnum);
82         csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
83         toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
84
85         /*
86          * AM335x Advisory 1.0.13: Due to internal synchronisation error the
87          * data toggle may reset from DATA1 to DATA0 during receiving data from
88          * more than one endpoint.
89          */
90         if (!toggle && toggle == cppi41_channel->usb_toggle) {
91                 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
92                 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
93                 musb_dbg(musb, "Restoring DATA1 toggle.");
94         }
95
96         cppi41_channel->usb_toggle = toggle;
97 }
98
99 static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
100 {
101         u8              epnum = hw_ep->epnum;
102         struct musb     *musb = hw_ep->musb;
103         void __iomem    *epio = musb->endpoints[epnum].regs;
104         u16             csr;
105
106         musb_ep_select(musb->mregs, hw_ep->epnum);
107         csr = musb_readw(epio, MUSB_TXCSR);
108         if (csr & MUSB_TXCSR_TXPKTRDY)
109                 return false;
110         return true;
111 }
112
113 static void cppi41_dma_callback(void *private_data,
114                                 const struct dmaengine_result *result);
115
116 static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
117 {
118         struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
119         struct musb *musb = hw_ep->musb;
120         void __iomem *epio = hw_ep->regs;
121         u16 csr;
122
123         if (!cppi41_channel->prog_len ||
124             (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
125
126                 /* done, complete */
127                 cppi41_channel->channel.actual_len =
128                         cppi41_channel->transferred;
129                 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
130                 cppi41_channel->channel.rx_packet_done = true;
131
132                 /*
133                  * transmit ZLP using PIO mode for transfers which size is
134                  * multiple of EP packet size.
135                  */
136                 if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
137                                         cppi41_channel->packet_sz) == 0) {
138                         musb_ep_select(musb->mregs, hw_ep->epnum);
139                         csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
140                         musb_writew(epio, MUSB_TXCSR, csr);
141                 }
142
143                 trace_musb_cppi41_done(cppi41_channel);
144                 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
145         } else {
146                 /* next iteration, reload */
147                 struct dma_chan *dc = cppi41_channel->dc;
148                 struct dma_async_tx_descriptor *dma_desc;
149                 enum dma_transfer_direction direction;
150                 u32 remain_bytes;
151
152                 cppi41_channel->buf_addr += cppi41_channel->packet_sz;
153
154                 remain_bytes = cppi41_channel->total_len;
155                 remain_bytes -= cppi41_channel->transferred;
156                 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
157                 cppi41_channel->prog_len = remain_bytes;
158
159                 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
160                         : DMA_DEV_TO_MEM;
161                 dma_desc = dmaengine_prep_slave_single(dc,
162                                 cppi41_channel->buf_addr,
163                                 remain_bytes,
164                                 direction,
165                                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
166                 if (WARN_ON(!dma_desc))
167                         return;
168
169                 dma_desc->callback_result = cppi41_dma_callback;
170                 dma_desc->callback_param = &cppi41_channel->channel;
171                 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
172                 trace_musb_cppi41_cont(cppi41_channel);
173                 dma_async_issue_pending(dc);
174
175                 if (!cppi41_channel->is_tx) {
176                         musb_ep_select(musb->mregs, hw_ep->epnum);
177                         csr = musb_readw(epio, MUSB_RXCSR);
178                         csr |= MUSB_RXCSR_H_REQPKT;
179                         musb_writew(epio, MUSB_RXCSR, csr);
180                 }
181         }
182 }
183
184 static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
185 {
186         struct cppi41_dma_controller *controller;
187         struct cppi41_dma_channel *cppi41_channel, *n;
188         struct musb *musb;
189         unsigned long flags;
190         enum hrtimer_restart ret = HRTIMER_NORESTART;
191
192         controller = container_of(timer, struct cppi41_dma_controller,
193                         early_tx);
194         musb = controller->controller.musb;
195
196         spin_lock_irqsave(&musb->lock, flags);
197         list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
198                         tx_check) {
199                 bool empty;
200                 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
201
202                 empty = musb_is_tx_fifo_empty(hw_ep);
203                 if (empty) {
204                         list_del_init(&cppi41_channel->tx_check);
205                         cppi41_trans_done(cppi41_channel);
206                 }
207         }
208
209         if (!list_empty(&controller->early_tx_list) &&
210             !hrtimer_is_queued(&controller->early_tx)) {
211                 ret = HRTIMER_RESTART;
212                 hrtimer_forward_now(&controller->early_tx, 20 * NSEC_PER_USEC);
213         }
214
215         spin_unlock_irqrestore(&musb->lock, flags);
216         return ret;
217 }
218
219 static void cppi41_dma_callback(void *private_data,
220                                 const struct dmaengine_result *result)
221 {
222         struct dma_channel *channel = private_data;
223         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
224         struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
225         struct cppi41_dma_controller *controller;
226         struct musb *musb = hw_ep->musb;
227         unsigned long flags;
228         struct dma_tx_state txstate;
229         u32 transferred;
230         int is_hs = 0;
231         bool empty;
232
233         controller = cppi41_channel->controller;
234         if (controller->controller.dma_callback)
235                 controller->controller.dma_callback(&controller->controller);
236
237         if (result->result == DMA_TRANS_ABORTED)
238                 return;
239
240         spin_lock_irqsave(&musb->lock, flags);
241
242         dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
243                         &txstate);
244         transferred = cppi41_channel->prog_len - txstate.residue;
245         cppi41_channel->transferred += transferred;
246
247         trace_musb_cppi41_gb(cppi41_channel);
248         update_rx_toggle(cppi41_channel);
249
250         if (cppi41_channel->transferred == cppi41_channel->total_len ||
251                         transferred < cppi41_channel->packet_sz)
252                 cppi41_channel->prog_len = 0;
253
254         if (cppi41_channel->is_tx) {
255                 u8 type;
256
257                 if (is_host_active(musb))
258                         type = hw_ep->out_qh->type;
259                 else
260                         type = hw_ep->ep_in.type;
261
262                 if (type == USB_ENDPOINT_XFER_ISOC)
263                         /*
264                          * Don't use the early-TX-interrupt workaround below
265                          * for Isoch transfter. Since Isoch are periodic
266                          * transfer, by the time the next transfer is
267                          * scheduled, the current one should be done already.
268                          *
269                          * This avoids audio playback underrun issue.
270                          */
271                         empty = true;
272                 else
273                         empty = musb_is_tx_fifo_empty(hw_ep);
274         }
275
276         if (!cppi41_channel->is_tx || empty) {
277                 cppi41_trans_done(cppi41_channel);
278                 goto out;
279         }
280
281         /*
282          * On AM335x it has been observed that the TX interrupt fires
283          * too early that means the TXFIFO is not yet empty but the DMA
284          * engine says that it is done with the transfer. We don't
285          * receive a FIFO empty interrupt so the only thing we can do is
286          * to poll for the bit. On HS it usually takes 2us, on FS around
287          * 110us - 150us depending on the transfer size.
288          * We spin on HS (no longer than than 25us and setup a timer on
289          * FS to check for the bit and complete the transfer.
290          */
291         if (is_host_active(musb)) {
292                 if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
293                         is_hs = 1;
294         } else {
295                 if (musb->g.speed == USB_SPEED_HIGH)
296                         is_hs = 1;
297         }
298         if (is_hs) {
299                 unsigned wait = 25;
300
301                 do {
302                         empty = musb_is_tx_fifo_empty(hw_ep);
303                         if (empty) {
304                                 cppi41_trans_done(cppi41_channel);
305                                 goto out;
306                         }
307                         wait--;
308                         if (!wait)
309                                 break;
310                         cpu_relax();
311                 } while (1);
312         }
313         list_add_tail(&cppi41_channel->tx_check,
314                         &controller->early_tx_list);
315         if (!hrtimer_is_queued(&controller->early_tx)) {
316                 unsigned long usecs = cppi41_channel->total_len / 10;
317
318                 hrtimer_start_range_ns(&controller->early_tx,
319                                        usecs * NSEC_PER_USEC,
320                                        20 * NSEC_PER_USEC,
321                                        HRTIMER_MODE_REL);
322         }
323
324 out:
325         spin_unlock_irqrestore(&musb->lock, flags);
326 }
327
328 static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
329 {
330         unsigned shift;
331
332         shift = (ep - 1) * 2;
333         old &= ~(3 << shift);
334         old |= mode << shift;
335         return old;
336 }
337
338 static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
339                 unsigned mode)
340 {
341         struct cppi41_dma_controller *controller = cppi41_channel->controller;
342         struct musb *musb = controller->controller.musb;
343         u32 port;
344         u32 new_mode;
345         u32 old_mode;
346
347         if (cppi41_channel->is_tx)
348                 old_mode = controller->tx_mode;
349         else
350                 old_mode = controller->rx_mode;
351         port = cppi41_channel->port_num;
352         new_mode = update_ep_mode(port, mode, old_mode);
353
354         if (new_mode == old_mode)
355                 return;
356         if (cppi41_channel->is_tx) {
357                 controller->tx_mode = new_mode;
358                 musb_writel(musb->ctrl_base, USB_CTRL_TX_MODE, new_mode);
359         } else {
360                 controller->rx_mode = new_mode;
361                 musb_writel(musb->ctrl_base, USB_CTRL_RX_MODE, new_mode);
362         }
363 }
364
365 static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
366                 unsigned int mode)
367 {
368         struct cppi41_dma_controller *controller = cppi41_channel->controller;
369         struct musb *musb = controller->controller.musb;
370         unsigned int shift;
371         u32 port;
372         u32 new_mode;
373         u32 old_mode;
374
375         old_mode = controller->tx_mode;
376         port = cppi41_channel->port_num;
377
378         shift = (port - 1) * 4;
379         if (!cppi41_channel->is_tx)
380                 shift += 16;
381         new_mode = old_mode & ~(3 << shift);
382         new_mode |= mode << shift;
383
384         if (new_mode == old_mode)
385                 return;
386         controller->tx_mode = new_mode;
387         musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
388 }
389
390
391 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
392                 unsigned mode)
393 {
394         struct cppi41_dma_controller *controller = cppi41_channel->controller;
395         u32 port;
396         u32 new_mode;
397         u32 old_mode;
398
399         old_mode = controller->auto_req;
400         port = cppi41_channel->port_num;
401         new_mode = update_ep_mode(port, mode, old_mode);
402
403         if (new_mode == old_mode)
404                 return;
405         controller->auto_req = new_mode;
406         musb_writel(controller->controller.musb->ctrl_base,
407                     controller->autoreq_reg, new_mode);
408 }
409
410 static bool cppi41_configure_channel(struct dma_channel *channel,
411                                 u16 packet_sz, u8 mode,
412                                 dma_addr_t dma_addr, u32 len)
413 {
414         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
415         struct cppi41_dma_controller *controller = cppi41_channel->controller;
416         struct dma_chan *dc = cppi41_channel->dc;
417         struct dma_async_tx_descriptor *dma_desc;
418         enum dma_transfer_direction direction;
419         struct musb *musb = cppi41_channel->controller->controller.musb;
420         unsigned use_gen_rndis = 0;
421
422         cppi41_channel->buf_addr = dma_addr;
423         cppi41_channel->total_len = len;
424         cppi41_channel->transferred = 0;
425         cppi41_channel->packet_sz = packet_sz;
426         cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
427
428         /*
429          * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
430          * than max packet size at a time.
431          */
432         if (cppi41_channel->is_tx)
433                 use_gen_rndis = 1;
434
435         if (use_gen_rndis) {
436                 /* RNDIS mode */
437                 if (len > packet_sz) {
438                         musb_writel(musb->ctrl_base,
439                                 RNDIS_REG(cppi41_channel->port_num), len);
440                         /* gen rndis */
441                         controller->set_dma_mode(cppi41_channel,
442                                         EP_MODE_DMA_GEN_RNDIS);
443
444                         /* auto req */
445                         cppi41_set_autoreq_mode(cppi41_channel,
446                                         EP_MODE_AUTOREQ_ALL_NEOP);
447                 } else {
448                         musb_writel(musb->ctrl_base,
449                                         RNDIS_REG(cppi41_channel->port_num), 0);
450                         controller->set_dma_mode(cppi41_channel,
451                                         EP_MODE_DMA_TRANSPARENT);
452                         cppi41_set_autoreq_mode(cppi41_channel,
453                                         EP_MODE_AUTOREQ_NONE);
454                 }
455         } else {
456                 /* fallback mode */
457                 controller->set_dma_mode(cppi41_channel,
458                                 EP_MODE_DMA_TRANSPARENT);
459                 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
460                 len = min_t(u32, packet_sz, len);
461         }
462         cppi41_channel->prog_len = len;
463         direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
464         dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
465                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
466         if (!dma_desc)
467                 return false;
468
469         dma_desc->callback_result = cppi41_dma_callback;
470         dma_desc->callback_param = channel;
471         cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
472         cppi41_channel->channel.rx_packet_done = false;
473
474         trace_musb_cppi41_config(cppi41_channel);
475
476         save_rx_toggle(cppi41_channel);
477         dma_async_issue_pending(dc);
478         return true;
479 }
480
481 static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
482                                 struct musb_hw_ep *hw_ep, u8 is_tx)
483 {
484         struct cppi41_dma_controller *controller = container_of(c,
485                         struct cppi41_dma_controller, controller);
486         struct cppi41_dma_channel *cppi41_channel = NULL;
487         u8 ch_num = hw_ep->epnum - 1;
488
489         if (ch_num >= controller->num_channels)
490                 return NULL;
491
492         if (is_tx)
493                 cppi41_channel = &controller->tx_channel[ch_num];
494         else
495                 cppi41_channel = &controller->rx_channel[ch_num];
496
497         if (!cppi41_channel->dc)
498                 return NULL;
499
500         if (cppi41_channel->is_allocated)
501                 return NULL;
502
503         cppi41_channel->hw_ep = hw_ep;
504         cppi41_channel->is_allocated = 1;
505
506         trace_musb_cppi41_alloc(cppi41_channel);
507         return &cppi41_channel->channel;
508 }
509
510 static void cppi41_dma_channel_release(struct dma_channel *channel)
511 {
512         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
513
514         trace_musb_cppi41_free(cppi41_channel);
515         if (cppi41_channel->is_allocated) {
516                 cppi41_channel->is_allocated = 0;
517                 channel->status = MUSB_DMA_STATUS_FREE;
518                 channel->actual_len = 0;
519         }
520 }
521
522 static int cppi41_dma_channel_program(struct dma_channel *channel,
523                                 u16 packet_sz, u8 mode,
524                                 dma_addr_t dma_addr, u32 len)
525 {
526         int ret;
527         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
528         int hb_mult = 0;
529
530         BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
531                 channel->status == MUSB_DMA_STATUS_BUSY);
532
533         if (is_host_active(cppi41_channel->controller->controller.musb)) {
534                 if (cppi41_channel->is_tx)
535                         hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
536                 else
537                         hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
538         }
539
540         channel->status = MUSB_DMA_STATUS_BUSY;
541         channel->actual_len = 0;
542
543         if (hb_mult)
544                 packet_sz = hb_mult * (packet_sz & 0x7FF);
545
546         ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
547         if (!ret)
548                 channel->status = MUSB_DMA_STATUS_FREE;
549
550         return ret;
551 }
552
553 static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
554                 void *buf, u32 length)
555 {
556         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
557         struct cppi41_dma_controller *controller = cppi41_channel->controller;
558         struct musb *musb = controller->controller.musb;
559
560         if (is_host_active(musb)) {
561                 WARN_ON(1);
562                 return 1;
563         }
564         if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
565                 return 0;
566         if (cppi41_channel->is_tx)
567                 return 1;
568         /* AM335x Advisory 1.0.13. No workaround for device RX mode */
569         return 0;
570 }
571
572 static int cppi41_dma_channel_abort(struct dma_channel *channel)
573 {
574         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
575         struct cppi41_dma_controller *controller = cppi41_channel->controller;
576         struct musb *musb = controller->controller.musb;
577         void __iomem *epio = cppi41_channel->hw_ep->regs;
578         int tdbit;
579         int ret;
580         unsigned is_tx;
581         u16 csr;
582
583         is_tx = cppi41_channel->is_tx;
584         trace_musb_cppi41_abort(cppi41_channel);
585
586         if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
587                 return 0;
588
589         list_del_init(&cppi41_channel->tx_check);
590         if (is_tx) {
591                 csr = musb_readw(epio, MUSB_TXCSR);
592                 csr &= ~MUSB_TXCSR_DMAENAB;
593                 musb_writew(epio, MUSB_TXCSR, csr);
594         } else {
595                 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
596
597                 /* delay to drain to cppi dma pipeline for isoch */
598                 udelay(250);
599
600                 csr = musb_readw(epio, MUSB_RXCSR);
601                 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
602                 musb_writew(epio, MUSB_RXCSR, csr);
603
604                 /* wait to drain cppi dma pipe line */
605                 udelay(50);
606
607                 csr = musb_readw(epio, MUSB_RXCSR);
608                 if (csr & MUSB_RXCSR_RXPKTRDY) {
609                         csr |= MUSB_RXCSR_FLUSHFIFO;
610                         musb_writew(epio, MUSB_RXCSR, csr);
611                         musb_writew(epio, MUSB_RXCSR, csr);
612                 }
613         }
614
615         /* DA8xx Advisory 2.3.27: wait 250 ms before to start the teardown */
616         if (musb->io.quirks & MUSB_DA8XX)
617                 mdelay(250);
618
619         tdbit = 1 << cppi41_channel->port_num;
620         if (is_tx)
621                 tdbit <<= 16;
622
623         do {
624                 if (is_tx)
625                         musb_writel(musb->ctrl_base, controller->tdown_reg,
626                                     tdbit);
627                 ret = dmaengine_terminate_all(cppi41_channel->dc);
628         } while (ret == -EAGAIN);
629
630         if (is_tx) {
631                 musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
632
633                 csr = musb_readw(epio, MUSB_TXCSR);
634                 if (csr & MUSB_TXCSR_TXPKTRDY) {
635                         csr |= MUSB_TXCSR_FLUSHFIFO;
636                         musb_writew(epio, MUSB_TXCSR, csr);
637                 }
638         }
639
640         cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
641         return 0;
642 }
643
644 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
645 {
646         struct dma_chan *dc;
647         int i;
648
649         for (i = 0; i < ctrl->num_channels; i++) {
650                 dc = ctrl->tx_channel[i].dc;
651                 if (dc)
652                         dma_release_channel(dc);
653                 dc = ctrl->rx_channel[i].dc;
654                 if (dc)
655                         dma_release_channel(dc);
656         }
657 }
658
659 static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
660 {
661         cppi41_release_all_dma_chans(controller);
662 }
663
664 static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
665 {
666         struct musb *musb = controller->controller.musb;
667         struct device *dev = musb->controller;
668         struct device_node *np = dev->parent->of_node;
669         struct cppi41_dma_channel *cppi41_channel;
670         int count;
671         int i;
672         int ret;
673
674         count = of_property_count_strings(np, "dma-names");
675         if (count < 0)
676                 return count;
677
678         for (i = 0; i < count; i++) {
679                 struct dma_chan *dc;
680                 struct dma_channel *musb_dma;
681                 const char *str;
682                 unsigned is_tx;
683                 unsigned int port;
684
685                 ret = of_property_read_string_index(np, "dma-names", i, &str);
686                 if (ret)
687                         goto err;
688                 if (strstarts(str, "tx"))
689                         is_tx = 1;
690                 else if (strstarts(str, "rx"))
691                         is_tx = 0;
692                 else {
693                         dev_err(dev, "Wrong dmatype %s\n", str);
694                         goto err;
695                 }
696                 ret = kstrtouint(str + 2, 0, &port);
697                 if (ret)
698                         goto err;
699
700                 ret = -EINVAL;
701                 if (port > controller->num_channels || !port)
702                         goto err;
703                 if (is_tx)
704                         cppi41_channel = &controller->tx_channel[port - 1];
705                 else
706                         cppi41_channel = &controller->rx_channel[port - 1];
707
708                 cppi41_channel->controller = controller;
709                 cppi41_channel->port_num = port;
710                 cppi41_channel->is_tx = is_tx;
711                 INIT_LIST_HEAD(&cppi41_channel->tx_check);
712
713                 musb_dma = &cppi41_channel->channel;
714                 musb_dma->private_data = cppi41_channel;
715                 musb_dma->status = MUSB_DMA_STATUS_FREE;
716                 musb_dma->max_len = SZ_4M;
717
718                 dc = dma_request_chan(dev->parent, str);
719                 if (IS_ERR(dc)) {
720                         ret = PTR_ERR(dc);
721                         if (ret != -EPROBE_DEFER)
722                                 dev_err(dev, "Failed to request %s: %d.\n",
723                                         str, ret);
724                         goto err;
725                 }
726
727                 cppi41_channel->dc = dc;
728         }
729         return 0;
730 err:
731         cppi41_release_all_dma_chans(controller);
732         return ret;
733 }
734
735 void cppi41_dma_controller_destroy(struct dma_controller *c)
736 {
737         struct cppi41_dma_controller *controller = container_of(c,
738                         struct cppi41_dma_controller, controller);
739
740         hrtimer_cancel(&controller->early_tx);
741         cppi41_dma_controller_stop(controller);
742         kfree(controller->rx_channel);
743         kfree(controller->tx_channel);
744         kfree(controller);
745 }
746 EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
747
748 struct dma_controller *
749 cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
750 {
751         struct cppi41_dma_controller *controller;
752         int channel_size;
753         int ret = 0;
754
755         if (!musb->controller->parent->of_node) {
756                 dev_err(musb->controller, "Need DT for the DMA engine.\n");
757                 return NULL;
758         }
759
760         controller = kzalloc(sizeof(*controller), GFP_KERNEL);
761         if (!controller)
762                 goto kzalloc_fail;
763
764         hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
765         controller->early_tx.function = cppi41_recheck_tx_req;
766         INIT_LIST_HEAD(&controller->early_tx_list);
767
768         controller->controller.channel_alloc = cppi41_dma_channel_allocate;
769         controller->controller.channel_release = cppi41_dma_channel_release;
770         controller->controller.channel_program = cppi41_dma_channel_program;
771         controller->controller.channel_abort = cppi41_dma_channel_abort;
772         controller->controller.is_compatible = cppi41_is_compatible;
773         controller->controller.musb = musb;
774
775         if (musb->io.quirks & MUSB_DA8XX) {
776                 controller->tdown_reg = DA8XX_USB_TEARDOWN;
777                 controller->autoreq_reg = DA8XX_USB_AUTOREQ;
778                 controller->set_dma_mode = da8xx_set_dma_mode;
779                 controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
780         } else {
781                 controller->tdown_reg = USB_TDOWN;
782                 controller->autoreq_reg = USB_CTRL_AUTOREQ;
783                 controller->set_dma_mode = cppi41_set_dma_mode;
784                 controller->num_channels = MUSB_DMA_NUM_CHANNELS;
785         }
786
787         channel_size = controller->num_channels *
788                         sizeof(struct cppi41_dma_channel);
789         controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
790         if (!controller->rx_channel)
791                 goto rx_channel_alloc_fail;
792         controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
793         if (!controller->tx_channel)
794                 goto tx_channel_alloc_fail;
795
796         ret = cppi41_dma_controller_start(controller);
797         if (ret)
798                 goto plat_get_fail;
799         return &controller->controller;
800
801 plat_get_fail:
802         kfree(controller->tx_channel);
803 tx_channel_alloc_fail:
804         kfree(controller->rx_channel);
805 rx_channel_alloc_fail:
806         kfree(controller);
807 kzalloc_fail:
808         if (ret == -EPROBE_DEFER)
809                 return ERR_PTR(ret);
810         return NULL;
811 }
812 EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);