GNU Linux-libre 4.4.284-gnu1
[releases.git] / drivers / usb / musb / musb_cppi41.c
1 #include <linux/device.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/dmaengine.h>
4 #include <linux/sizes.h>
5 #include <linux/platform_device.h>
6 #include <linux/of.h>
7
8 #include "musb_core.h"
9
10 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
11
12 #define EP_MODE_AUTOREQ_NONE            0
13 #define EP_MODE_AUTOREQ_ALL_NEOP        1
14 #define EP_MODE_AUTOREQ_ALWAYS          3
15
16 #define EP_MODE_DMA_TRANSPARENT         0
17 #define EP_MODE_DMA_RNDIS               1
18 #define EP_MODE_DMA_GEN_RNDIS           3
19
20 #define USB_CTRL_TX_MODE        0x70
21 #define USB_CTRL_RX_MODE        0x74
22 #define USB_CTRL_AUTOREQ        0xd0
23 #define USB_TDOWN               0xd8
24
25 struct cppi41_dma_channel {
26         struct dma_channel channel;
27         struct cppi41_dma_controller *controller;
28         struct musb_hw_ep *hw_ep;
29         struct dma_chan *dc;
30         dma_cookie_t cookie;
31         u8 port_num;
32         u8 is_tx;
33         u8 is_allocated;
34         u8 usb_toggle;
35
36         dma_addr_t buf_addr;
37         u32 total_len;
38         u32 prog_len;
39         u32 transferred;
40         u32 packet_sz;
41         struct list_head tx_check;
42         int tx_zlp;
43 };
44
45 #define MUSB_DMA_NUM_CHANNELS 15
46
47 struct cppi41_dma_controller {
48         struct dma_controller controller;
49         struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
50         struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
51         struct musb *musb;
52         struct hrtimer early_tx;
53         struct list_head early_tx_list;
54         u32 rx_mode;
55         u32 tx_mode;
56         u32 auto_req;
57 };
58
59 static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
60 {
61         u16 csr;
62         u8 toggle;
63
64         if (cppi41_channel->is_tx)
65                 return;
66         if (!is_host_active(cppi41_channel->controller->musb))
67                 return;
68
69         csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
70         toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
71
72         cppi41_channel->usb_toggle = toggle;
73 }
74
75 static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
76 {
77         struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
78         struct musb *musb = hw_ep->musb;
79         u16 csr;
80         u8 toggle;
81
82         if (cppi41_channel->is_tx)
83                 return;
84         if (!is_host_active(musb))
85                 return;
86
87         musb_ep_select(musb->mregs, hw_ep->epnum);
88         csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
89         toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
90
91         /*
92          * AM335x Advisory 1.0.13: Due to internal synchronisation error the
93          * data toggle may reset from DATA1 to DATA0 during receiving data from
94          * more than one endpoint.
95          */
96         if (!toggle && toggle == cppi41_channel->usb_toggle) {
97                 csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
98                 musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
99                 dev_dbg(cppi41_channel->controller->musb->controller,
100                                 "Restoring DATA1 toggle.\n");
101         }
102
103         cppi41_channel->usb_toggle = toggle;
104 }
105
106 static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep)
107 {
108         u8              epnum = hw_ep->epnum;
109         struct musb     *musb = hw_ep->musb;
110         void __iomem    *epio = musb->endpoints[epnum].regs;
111         u16             csr;
112
113         musb_ep_select(musb->mregs, hw_ep->epnum);
114         csr = musb_readw(epio, MUSB_TXCSR);
115         if (csr & MUSB_TXCSR_TXPKTRDY)
116                 return false;
117         return true;
118 }
119
120 static void cppi41_dma_callback(void *private_data);
121
122 static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
123 {
124         struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
125         struct musb *musb = hw_ep->musb;
126         void __iomem *epio = hw_ep->regs;
127         u16 csr;
128
129         if (!cppi41_channel->prog_len ||
130             (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) {
131
132                 /* done, complete */
133                 cppi41_channel->channel.actual_len =
134                         cppi41_channel->transferred;
135                 cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
136                 cppi41_channel->channel.rx_packet_done = true;
137
138                 /*
139                  * transmit ZLP using PIO mode for transfers which size is
140                  * multiple of EP packet size.
141                  */
142                 if (cppi41_channel->tx_zlp && (cppi41_channel->transferred %
143                                         cppi41_channel->packet_sz) == 0) {
144                         musb_ep_select(musb->mregs, hw_ep->epnum);
145                         csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
146                         musb_writew(epio, MUSB_TXCSR, csr);
147                 }
148                 musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
149         } else {
150                 /* next iteration, reload */
151                 struct dma_chan *dc = cppi41_channel->dc;
152                 struct dma_async_tx_descriptor *dma_desc;
153                 enum dma_transfer_direction direction;
154                 u32 remain_bytes;
155
156                 cppi41_channel->buf_addr += cppi41_channel->packet_sz;
157
158                 remain_bytes = cppi41_channel->total_len;
159                 remain_bytes -= cppi41_channel->transferred;
160                 remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
161                 cppi41_channel->prog_len = remain_bytes;
162
163                 direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
164                         : DMA_DEV_TO_MEM;
165                 dma_desc = dmaengine_prep_slave_single(dc,
166                                 cppi41_channel->buf_addr,
167                                 remain_bytes,
168                                 direction,
169                                 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
170                 if (WARN_ON(!dma_desc))
171                         return;
172
173                 dma_desc->callback = cppi41_dma_callback;
174                 dma_desc->callback_param = &cppi41_channel->channel;
175                 cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
176                 dma_async_issue_pending(dc);
177
178                 if (!cppi41_channel->is_tx) {
179                         musb_ep_select(musb->mregs, hw_ep->epnum);
180                         csr = musb_readw(epio, MUSB_RXCSR);
181                         csr |= MUSB_RXCSR_H_REQPKT;
182                         musb_writew(epio, MUSB_RXCSR, csr);
183                 }
184         }
185 }
186
187 static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
188 {
189         struct cppi41_dma_controller *controller;
190         struct cppi41_dma_channel *cppi41_channel, *n;
191         struct musb *musb;
192         unsigned long flags;
193         enum hrtimer_restart ret = HRTIMER_NORESTART;
194
195         controller = container_of(timer, struct cppi41_dma_controller,
196                         early_tx);
197         musb = controller->musb;
198
199         spin_lock_irqsave(&musb->lock, flags);
200         list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list,
201                         tx_check) {
202                 bool empty;
203                 struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
204
205                 empty = musb_is_tx_fifo_empty(hw_ep);
206                 if (empty) {
207                         list_del_init(&cppi41_channel->tx_check);
208                         cppi41_trans_done(cppi41_channel);
209                 }
210         }
211
212         if (!list_empty(&controller->early_tx_list) &&
213             !hrtimer_is_queued(&controller->early_tx)) {
214                 ret = HRTIMER_RESTART;
215                 hrtimer_forward_now(&controller->early_tx,
216                                 ktime_set(0, 20 * NSEC_PER_USEC));
217         }
218
219         spin_unlock_irqrestore(&musb->lock, flags);
220         return ret;
221 }
222
223 static void cppi41_dma_callback(void *private_data)
224 {
225         struct dma_channel *channel = private_data;
226         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
227         struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
228         struct cppi41_dma_controller *controller;
229         struct musb *musb = hw_ep->musb;
230         unsigned long flags;
231         struct dma_tx_state txstate;
232         u32 transferred;
233         int is_hs = 0;
234         bool empty;
235
236         spin_lock_irqsave(&musb->lock, flags);
237
238         dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
239                         &txstate);
240         transferred = cppi41_channel->prog_len - txstate.residue;
241         cppi41_channel->transferred += transferred;
242
243         dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
244                 hw_ep->epnum, cppi41_channel->transferred,
245                 cppi41_channel->total_len);
246
247         update_rx_toggle(cppi41_channel);
248
249         if (cppi41_channel->transferred == cppi41_channel->total_len ||
250                         transferred < cppi41_channel->packet_sz)
251                 cppi41_channel->prog_len = 0;
252
253         if (cppi41_channel->is_tx) {
254                 u8 type;
255
256                 if (is_host_active(musb))
257                         type = hw_ep->out_qh->type;
258                 else
259                         type = hw_ep->ep_in.type;
260
261                 if (type == USB_ENDPOINT_XFER_ISOC)
262                         /*
263                          * Don't use the early-TX-interrupt workaround below
264                          * for Isoch transfter. Since Isoch are periodic
265                          * transfer, by the time the next transfer is
266                          * scheduled, the current one should be done already.
267                          *
268                          * This avoids audio playback underrun issue.
269                          */
270                         empty = true;
271                 else
272                         empty = musb_is_tx_fifo_empty(hw_ep);
273         }
274
275         if (!cppi41_channel->is_tx || empty) {
276                 cppi41_trans_done(cppi41_channel);
277                 goto out;
278         }
279
280         /*
281          * On AM335x it has been observed that the TX interrupt fires
282          * too early that means the TXFIFO is not yet empty but the DMA
283          * engine says that it is done with the transfer. We don't
284          * receive a FIFO empty interrupt so the only thing we can do is
285          * to poll for the bit. On HS it usually takes 2us, on FS around
286          * 110us - 150us depending on the transfer size.
287          * We spin on HS (no longer than than 25us and setup a timer on
288          * FS to check for the bit and complete the transfer.
289          */
290         controller = cppi41_channel->controller;
291
292         if (is_host_active(musb)) {
293                 if (musb->port1_status & USB_PORT_STAT_HIGH_SPEED)
294                         is_hs = 1;
295         } else {
296                 if (musb->g.speed == USB_SPEED_HIGH)
297                         is_hs = 1;
298         }
299         if (is_hs) {
300                 unsigned wait = 25;
301
302                 do {
303                         empty = musb_is_tx_fifo_empty(hw_ep);
304                         if (empty) {
305                                 cppi41_trans_done(cppi41_channel);
306                                 goto out;
307                         }
308                         wait--;
309                         if (!wait)
310                                 break;
311                         cpu_relax();
312                 } while (1);
313         }
314         list_add_tail(&cppi41_channel->tx_check,
315                         &controller->early_tx_list);
316         if (!hrtimer_is_queued(&controller->early_tx)) {
317                 unsigned long usecs = cppi41_channel->total_len / 10;
318
319                 hrtimer_start_range_ns(&controller->early_tx,
320                                 ktime_set(0, usecs * NSEC_PER_USEC),
321                                 20 * NSEC_PER_USEC,
322                                 HRTIMER_MODE_REL);
323         }
324
325 out:
326         spin_unlock_irqrestore(&musb->lock, flags);
327 }
328
329 static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
330 {
331         unsigned shift;
332
333         shift = (ep - 1) * 2;
334         old &= ~(3 << shift);
335         old |= mode << shift;
336         return old;
337 }
338
339 static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
340                 unsigned mode)
341 {
342         struct cppi41_dma_controller *controller = cppi41_channel->controller;
343         u32 port;
344         u32 new_mode;
345         u32 old_mode;
346
347         if (cppi41_channel->is_tx)
348                 old_mode = controller->tx_mode;
349         else
350                 old_mode = controller->rx_mode;
351         port = cppi41_channel->port_num;
352         new_mode = update_ep_mode(port, mode, old_mode);
353
354         if (new_mode == old_mode)
355                 return;
356         if (cppi41_channel->is_tx) {
357                 controller->tx_mode = new_mode;
358                 musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
359                                 new_mode);
360         } else {
361                 controller->rx_mode = new_mode;
362                 musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
363                                 new_mode);
364         }
365 }
366
367 static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
368                 unsigned mode)
369 {
370         struct cppi41_dma_controller *controller = cppi41_channel->controller;
371         u32 port;
372         u32 new_mode;
373         u32 old_mode;
374
375         old_mode = controller->auto_req;
376         port = cppi41_channel->port_num;
377         new_mode = update_ep_mode(port, mode, old_mode);
378
379         if (new_mode == old_mode)
380                 return;
381         controller->auto_req = new_mode;
382         musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
383 }
384
385 static bool cppi41_configure_channel(struct dma_channel *channel,
386                                 u16 packet_sz, u8 mode,
387                                 dma_addr_t dma_addr, u32 len)
388 {
389         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
390         struct dma_chan *dc = cppi41_channel->dc;
391         struct dma_async_tx_descriptor *dma_desc;
392         enum dma_transfer_direction direction;
393         struct musb *musb = cppi41_channel->controller->musb;
394         unsigned use_gen_rndis = 0;
395
396         dev_dbg(musb->controller,
397                 "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
398                 cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
399                 packet_sz, mode, (unsigned long long) dma_addr,
400                 len, cppi41_channel->is_tx);
401
402         cppi41_channel->buf_addr = dma_addr;
403         cppi41_channel->total_len = len;
404         cppi41_channel->transferred = 0;
405         cppi41_channel->packet_sz = packet_sz;
406         cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0;
407
408         /*
409          * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
410          * than max packet size at a time.
411          */
412         if (cppi41_channel->is_tx)
413                 use_gen_rndis = 1;
414
415         if (use_gen_rndis) {
416                 /* RNDIS mode */
417                 if (len > packet_sz) {
418                         musb_writel(musb->ctrl_base,
419                                 RNDIS_REG(cppi41_channel->port_num), len);
420                         /* gen rndis */
421                         cppi41_set_dma_mode(cppi41_channel,
422                                         EP_MODE_DMA_GEN_RNDIS);
423
424                         /* auto req */
425                         cppi41_set_autoreq_mode(cppi41_channel,
426                                         EP_MODE_AUTOREQ_ALL_NEOP);
427                 } else {
428                         musb_writel(musb->ctrl_base,
429                                         RNDIS_REG(cppi41_channel->port_num), 0);
430                         cppi41_set_dma_mode(cppi41_channel,
431                                         EP_MODE_DMA_TRANSPARENT);
432                         cppi41_set_autoreq_mode(cppi41_channel,
433                                         EP_MODE_AUTOREQ_NONE);
434                 }
435         } else {
436                 /* fallback mode */
437                 cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
438                 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
439                 len = min_t(u32, packet_sz, len);
440         }
441         cppi41_channel->prog_len = len;
442         direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
443         dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
444                         DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
445         if (!dma_desc)
446                 return false;
447
448         dma_desc->callback = cppi41_dma_callback;
449         dma_desc->callback_param = channel;
450         cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
451         cppi41_channel->channel.rx_packet_done = false;
452
453         save_rx_toggle(cppi41_channel);
454         dma_async_issue_pending(dc);
455         return true;
456 }
457
458 static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
459                                 struct musb_hw_ep *hw_ep, u8 is_tx)
460 {
461         struct cppi41_dma_controller *controller = container_of(c,
462                         struct cppi41_dma_controller, controller);
463         struct cppi41_dma_channel *cppi41_channel = NULL;
464         u8 ch_num = hw_ep->epnum - 1;
465
466         if (ch_num >= MUSB_DMA_NUM_CHANNELS)
467                 return NULL;
468
469         if (is_tx)
470                 cppi41_channel = &controller->tx_channel[ch_num];
471         else
472                 cppi41_channel = &controller->rx_channel[ch_num];
473
474         if (!cppi41_channel->dc)
475                 return NULL;
476
477         if (cppi41_channel->is_allocated)
478                 return NULL;
479
480         cppi41_channel->hw_ep = hw_ep;
481         cppi41_channel->is_allocated = 1;
482
483         return &cppi41_channel->channel;
484 }
485
486 static void cppi41_dma_channel_release(struct dma_channel *channel)
487 {
488         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
489
490         if (cppi41_channel->is_allocated) {
491                 cppi41_channel->is_allocated = 0;
492                 channel->status = MUSB_DMA_STATUS_FREE;
493                 channel->actual_len = 0;
494         }
495 }
496
497 static int cppi41_dma_channel_program(struct dma_channel *channel,
498                                 u16 packet_sz, u8 mode,
499                                 dma_addr_t dma_addr, u32 len)
500 {
501         int ret;
502         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
503         int hb_mult = 0;
504
505         BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
506                 channel->status == MUSB_DMA_STATUS_BUSY);
507
508         if (is_host_active(cppi41_channel->controller->musb)) {
509                 if (cppi41_channel->is_tx)
510                         hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult;
511                 else
512                         hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult;
513         }
514
515         channel->status = MUSB_DMA_STATUS_BUSY;
516         channel->actual_len = 0;
517
518         if (hb_mult)
519                 packet_sz = hb_mult * (packet_sz & 0x7FF);
520
521         ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
522         if (!ret)
523                 channel->status = MUSB_DMA_STATUS_FREE;
524
525         return ret;
526 }
527
528 static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
529                 void *buf, u32 length)
530 {
531         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
532         struct cppi41_dma_controller *controller = cppi41_channel->controller;
533         struct musb *musb = controller->musb;
534
535         if (is_host_active(musb)) {
536                 WARN_ON(1);
537                 return 1;
538         }
539         if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK)
540                 return 0;
541         if (cppi41_channel->is_tx)
542                 return 1;
543         /* AM335x Advisory 1.0.13. No workaround for device RX mode */
544         return 0;
545 }
546
547 static int cppi41_dma_channel_abort(struct dma_channel *channel)
548 {
549         struct cppi41_dma_channel *cppi41_channel = channel->private_data;
550         struct cppi41_dma_controller *controller = cppi41_channel->controller;
551         struct musb *musb = controller->musb;
552         void __iomem *epio = cppi41_channel->hw_ep->regs;
553         int tdbit;
554         int ret;
555         unsigned is_tx;
556         u16 csr;
557
558         is_tx = cppi41_channel->is_tx;
559         dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
560                         cppi41_channel->port_num, is_tx);
561
562         if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
563                 return 0;
564
565         list_del_init(&cppi41_channel->tx_check);
566         if (is_tx) {
567                 csr = musb_readw(epio, MUSB_TXCSR);
568                 csr &= ~MUSB_TXCSR_DMAENAB;
569                 musb_writew(epio, MUSB_TXCSR, csr);
570         } else {
571                 cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
572
573                 /* delay to drain to cppi dma pipeline for isoch */
574                 udelay(250);
575
576                 csr = musb_readw(epio, MUSB_RXCSR);
577                 csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
578                 musb_writew(epio, MUSB_RXCSR, csr);
579
580                 /* wait to drain cppi dma pipe line */
581                 udelay(50);
582
583                 csr = musb_readw(epio, MUSB_RXCSR);
584                 if (csr & MUSB_RXCSR_RXPKTRDY) {
585                         csr |= MUSB_RXCSR_FLUSHFIFO;
586                         musb_writew(epio, MUSB_RXCSR, csr);
587                         musb_writew(epio, MUSB_RXCSR, csr);
588                 }
589         }
590
591         tdbit = 1 << cppi41_channel->port_num;
592         if (is_tx)
593                 tdbit <<= 16;
594
595         do {
596                 if (is_tx)
597                         musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
598                 ret = dmaengine_terminate_all(cppi41_channel->dc);
599         } while (ret == -EAGAIN);
600
601         if (is_tx) {
602                 musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
603
604                 csr = musb_readw(epio, MUSB_TXCSR);
605                 if (csr & MUSB_TXCSR_TXPKTRDY) {
606                         csr |= MUSB_TXCSR_FLUSHFIFO;
607                         musb_writew(epio, MUSB_TXCSR, csr);
608                 }
609         }
610
611         cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
612         return 0;
613 }
614
615 static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
616 {
617         struct dma_chan *dc;
618         int i;
619
620         for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
621                 dc = ctrl->tx_channel[i].dc;
622                 if (dc)
623                         dma_release_channel(dc);
624                 dc = ctrl->rx_channel[i].dc;
625                 if (dc)
626                         dma_release_channel(dc);
627         }
628 }
629
630 static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
631 {
632         cppi41_release_all_dma_chans(controller);
633 }
634
635 static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
636 {
637         struct musb *musb = controller->musb;
638         struct device *dev = musb->controller;
639         struct device_node *np = dev->parent->of_node;
640         struct cppi41_dma_channel *cppi41_channel;
641         int count;
642         int i;
643         int ret;
644
645         count = of_property_count_strings(np, "dma-names");
646         if (count < 0)
647                 return count;
648
649         for (i = 0; i < count; i++) {
650                 struct dma_chan *dc;
651                 struct dma_channel *musb_dma;
652                 const char *str;
653                 unsigned is_tx;
654                 unsigned int port;
655
656                 ret = of_property_read_string_index(np, "dma-names", i, &str);
657                 if (ret)
658                         goto err;
659                 if (strstarts(str, "tx"))
660                         is_tx = 1;
661                 else if (strstarts(str, "rx"))
662                         is_tx = 0;
663                 else {
664                         dev_err(dev, "Wrong dmatype %s\n", str);
665                         goto err;
666                 }
667                 ret = kstrtouint(str + 2, 0, &port);
668                 if (ret)
669                         goto err;
670
671                 ret = -EINVAL;
672                 if (port > MUSB_DMA_NUM_CHANNELS || !port)
673                         goto err;
674                 if (is_tx)
675                         cppi41_channel = &controller->tx_channel[port - 1];
676                 else
677                         cppi41_channel = &controller->rx_channel[port - 1];
678
679                 cppi41_channel->controller = controller;
680                 cppi41_channel->port_num = port;
681                 cppi41_channel->is_tx = is_tx;
682                 INIT_LIST_HEAD(&cppi41_channel->tx_check);
683
684                 musb_dma = &cppi41_channel->channel;
685                 musb_dma->private_data = cppi41_channel;
686                 musb_dma->status = MUSB_DMA_STATUS_FREE;
687                 musb_dma->max_len = SZ_4M;
688
689                 dc = dma_request_slave_channel(dev->parent, str);
690                 if (!dc) {
691                         dev_err(dev, "Failed to request %s.\n", str);
692                         ret = -EPROBE_DEFER;
693                         goto err;
694                 }
695                 cppi41_channel->dc = dc;
696         }
697         return 0;
698 err:
699         cppi41_release_all_dma_chans(controller);
700         return ret;
701 }
702
703 void cppi41_dma_controller_destroy(struct dma_controller *c)
704 {
705         struct cppi41_dma_controller *controller = container_of(c,
706                         struct cppi41_dma_controller, controller);
707
708         hrtimer_cancel(&controller->early_tx);
709         cppi41_dma_controller_stop(controller);
710         kfree(controller);
711 }
712 EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
713
714 struct dma_controller *
715 cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
716 {
717         struct cppi41_dma_controller *controller;
718         int ret = 0;
719
720         if (!musb->controller->parent->of_node) {
721                 dev_err(musb->controller, "Need DT for the DMA engine.\n");
722                 return NULL;
723         }
724
725         controller = kzalloc(sizeof(*controller), GFP_KERNEL);
726         if (!controller)
727                 goto kzalloc_fail;
728
729         hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
730         controller->early_tx.function = cppi41_recheck_tx_req;
731         INIT_LIST_HEAD(&controller->early_tx_list);
732         controller->musb = musb;
733
734         controller->controller.channel_alloc = cppi41_dma_channel_allocate;
735         controller->controller.channel_release = cppi41_dma_channel_release;
736         controller->controller.channel_program = cppi41_dma_channel_program;
737         controller->controller.channel_abort = cppi41_dma_channel_abort;
738         controller->controller.is_compatible = cppi41_is_compatible;
739
740         ret = cppi41_dma_controller_start(controller);
741         if (ret)
742                 goto plat_get_fail;
743         return &controller->controller;
744
745 plat_get_fail:
746         kfree(controller);
747 kzalloc_fail:
748         if (ret == -EPROBE_DEFER)
749                 return ERR_PTR(ret);
750         return NULL;
751 }
752 EXPORT_SYMBOL_GPL(cppi41_dma_controller_create);