2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
4 * extracted from shdma.c
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
16 #include <linux/delay.h>
17 #include <linux/shdma-base.h>
18 #include <linux/dmaengine.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
26 #include "../dmaengine.h"
28 /* DMA descriptor control */
29 enum shdma_desc_status {
33 DESC_COMPLETED, /* completed, have to call callback */
34 DESC_WAITING, /* callback called, waiting for ack / re-submit */
37 #define NR_DESCS_PER_CHANNEL 32
39 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
40 #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
43 * For slave DMA we assume, that there is a finite number of DMA slaves in the
44 * system, and that each such slave can only use a finite number of channels.
45 * We use slave channel IDs to make sure, that no such slave channel ID is
46 * allocated more than once.
48 static unsigned int slave_num = 256;
49 module_param(slave_num, uint, 0444);
51 /* A bitmask with slave_num bits */
52 static unsigned long *shdma_slave_used;
54 /* Called under spin_lock_irq(&schan->chan_lock") */
55 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
57 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
58 const struct shdma_ops *ops = sdev->ops;
59 struct shdma_desc *sdesc;
62 if (ops->channel_busy(schan))
65 /* Find the first not transferred descriptor */
66 list_for_each_entry(sdesc, &schan->ld_queue, node)
67 if (sdesc->mark == DESC_SUBMITTED) {
68 ops->start_xfer(schan, sdesc);
73 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
75 struct shdma_desc *chunk, *c, *desc =
76 container_of(tx, struct shdma_desc, async_tx);
77 struct shdma_chan *schan = to_shdma_chan(tx->chan);
78 dma_async_tx_callback callback = tx->callback;
82 spin_lock_irq(&schan->chan_lock);
84 power_up = list_empty(&schan->ld_queue);
86 cookie = dma_cookie_assign(tx);
88 /* Mark all chunks of this descriptor as submitted, move to the queue */
89 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
91 * All chunks are on the global ld_free, so, we have to find
92 * the end of the chain ourselves
94 if (chunk != desc && (chunk->mark == DESC_IDLE ||
95 chunk->async_tx.cookie > 0 ||
96 chunk->async_tx.cookie == -EBUSY ||
97 &chunk->node == &schan->ld_free))
99 chunk->mark = DESC_SUBMITTED;
100 if (chunk->chunks == 1) {
101 chunk->async_tx.callback = callback;
102 chunk->async_tx.callback_param = tx->callback_param;
104 /* Callback goes to the last chunk */
105 chunk->async_tx.callback = NULL;
107 chunk->cookie = cookie;
108 list_move_tail(&chunk->node, &schan->ld_queue);
110 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
111 tx->cookie, &chunk->async_tx, schan->id);
116 schan->pm_state = SHDMA_PM_BUSY;
118 ret = pm_runtime_get(schan->dev);
120 spin_unlock_irq(&schan->chan_lock);
122 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
123 pm_runtime_put(schan->dev);
126 pm_runtime_barrier(schan->dev);
128 spin_lock_irq(&schan->chan_lock);
130 /* Have we been reset, while waiting? */
131 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
132 struct shdma_dev *sdev =
133 to_shdma_dev(schan->dma_chan.device);
134 const struct shdma_ops *ops = sdev->ops;
135 dev_dbg(schan->dev, "Bring up channel %d\n",
138 * TODO: .xfer_setup() might fail on some platforms.
139 * Make it int then, on error remove chunks from the
142 ops->setup_xfer(schan, schan->slave_id);
144 if (schan->pm_state == SHDMA_PM_PENDING)
145 shdma_chan_xfer_ld_queue(schan);
146 schan->pm_state = SHDMA_PM_ESTABLISHED;
150 * Tell .device_issue_pending() not to run the queue, interrupts
153 schan->pm_state = SHDMA_PM_PENDING;
156 spin_unlock_irq(&schan->chan_lock);
161 /* Called with desc_lock held */
162 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
164 struct shdma_desc *sdesc;
166 list_for_each_entry(sdesc, &schan->ld_free, node)
167 if (sdesc->mark != DESC_PREPARED) {
168 BUG_ON(sdesc->mark != DESC_IDLE);
169 list_del(&sdesc->node);
176 static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
178 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
179 const struct shdma_ops *ops = sdev->ops;
182 if (schan->dev->of_node) {
183 match = schan->hw_req;
184 ret = ops->set_slave(schan, match, slave_addr, true);
188 match = schan->real_slave_id;
191 if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
194 if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
197 ret = ops->set_slave(schan, match, slave_addr, false);
199 clear_bit(schan->real_slave_id, shdma_slave_used);
203 schan->slave_id = schan->real_slave_id;
208 static int shdma_alloc_chan_resources(struct dma_chan *chan)
210 struct shdma_chan *schan = to_shdma_chan(chan);
211 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
212 const struct shdma_ops *ops = sdev->ops;
213 struct shdma_desc *desc;
214 struct shdma_slave *slave = chan->private;
218 * This relies on the guarantee from dmaengine that alloc_chan_resources
219 * never runs concurrently with itself or free_chan_resources.
222 /* Legacy mode: .private is set in filter */
223 schan->real_slave_id = slave->slave_id;
224 ret = shdma_setup_slave(schan, 0);
228 /* Normal mode: real_slave_id was set by filter */
229 schan->slave_id = -EINVAL;
232 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
233 sdev->desc_size, GFP_KERNEL);
238 schan->desc_num = NR_DESCS_PER_CHANNEL;
240 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
241 desc = ops->embedded_desc(schan->desc, i);
242 dma_async_tx_descriptor_init(&desc->async_tx,
244 desc->async_tx.tx_submit = shdma_tx_submit;
245 desc->mark = DESC_IDLE;
247 list_add(&desc->node, &schan->ld_free);
250 return NR_DESCS_PER_CHANNEL;
255 clear_bit(slave->slave_id, shdma_slave_used);
256 chan->private = NULL;
261 * This is the standard shdma filter function to be used as a replacement to the
262 * "old" method, using the .private pointer.
263 * You always have to pass a valid slave id as the argument, old drivers that
264 * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
265 * need to be updated so we can remove the slave_id field from dma_slave_config.
266 * parameter. If this filter is used, the slave driver, after calling
267 * dma_request_channel(), will also have to call dmaengine_slave_config() with
268 * .direction, and either .src_addr or .dst_addr set.
270 * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
271 * capability! If this becomes a requirement, hardware glue drivers, using this
272 * services would have to provide their own filters, which first would check
273 * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
274 * this, and only then, in case of a match, call this common filter.
275 * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
276 * In that case the MID-RID value is used for slave channel filtering and is
277 * passed to this function in the "arg" parameter.
279 bool shdma_chan_filter(struct dma_chan *chan, void *arg)
281 struct shdma_chan *schan;
282 struct shdma_dev *sdev;
283 int slave_id = (long)arg;
286 /* Only support channels handled by this driver. */
287 if (chan->device->device_alloc_chan_resources !=
288 shdma_alloc_chan_resources)
291 schan = to_shdma_chan(chan);
292 sdev = to_shdma_dev(chan->device);
295 * For DT, the schan->slave_id field is generated by the
296 * set_slave function from the slave ID that is passed in
297 * from xlate. For the non-DT case, the slave ID is
298 * directly passed into the filter function by the driver
300 if (schan->dev->of_node) {
301 ret = sdev->ops->set_slave(schan, slave_id, 0, true);
305 schan->real_slave_id = schan->slave_id;
310 /* No slave requested - arbitrary channel */
311 dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
315 if (slave_id >= slave_num)
318 ret = sdev->ops->set_slave(schan, slave_id, 0, true);
322 schan->real_slave_id = slave_id;
326 EXPORT_SYMBOL(shdma_chan_filter);
328 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
330 struct shdma_desc *desc, *_desc;
331 /* Is the "exposed" head of a chain acked? */
332 bool head_acked = false;
333 dma_cookie_t cookie = 0;
334 dma_async_tx_callback callback = NULL;
335 struct dmaengine_desc_callback cb;
337 LIST_HEAD(cyclic_list);
339 memset(&cb, 0, sizeof(cb));
340 spin_lock_irqsave(&schan->chan_lock, flags);
341 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
342 struct dma_async_tx_descriptor *tx = &desc->async_tx;
344 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
345 BUG_ON(desc->mark != DESC_SUBMITTED &&
346 desc->mark != DESC_COMPLETED &&
347 desc->mark != DESC_WAITING);
350 * queue is ordered, and we use this loop to (1) clean up all
351 * completed descriptors, and to (2) update descriptor flags of
352 * any chunks in a (partially) completed chain
354 if (!all && desc->mark == DESC_SUBMITTED &&
355 desc->cookie != cookie)
361 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
362 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
364 "Completing cookie %d, expected %d\n",
366 schan->dma_chan.completed_cookie + 1);
367 schan->dma_chan.completed_cookie = desc->cookie;
370 /* Call callback on the last chunk */
371 if (desc->mark == DESC_COMPLETED && tx->callback) {
372 desc->mark = DESC_WAITING;
373 dmaengine_desc_get_callback(tx, &cb);
374 callback = tx->callback;
375 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
376 tx->cookie, tx, schan->id);
377 BUG_ON(desc->chunks != 1);
381 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
382 if (desc->mark == DESC_COMPLETED) {
383 BUG_ON(tx->cookie < 0);
384 desc->mark = DESC_WAITING;
386 head_acked = async_tx_test_ack(tx);
388 switch (desc->mark) {
390 desc->mark = DESC_WAITING;
394 async_tx_ack(&desc->async_tx);
398 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
401 if (((desc->mark == DESC_COMPLETED ||
402 desc->mark == DESC_WAITING) &&
403 async_tx_test_ack(&desc->async_tx)) || all) {
405 if (all || !desc->cyclic) {
406 /* Remove from ld_queue list */
407 desc->mark = DESC_IDLE;
408 list_move(&desc->node, &schan->ld_free);
410 /* reuse as cyclic */
411 desc->mark = DESC_SUBMITTED;
412 list_move_tail(&desc->node, &cyclic_list);
415 if (list_empty(&schan->ld_queue)) {
416 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
417 pm_runtime_put(schan->dev);
418 schan->pm_state = SHDMA_PM_ESTABLISHED;
419 } else if (schan->pm_state == SHDMA_PM_PENDING) {
420 shdma_chan_xfer_ld_queue(schan);
425 if (all && !callback)
427 * Terminating and the loop completed normally: forgive
428 * uncompleted cookies
430 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
432 list_splice_tail(&cyclic_list, &schan->ld_queue);
434 spin_unlock_irqrestore(&schan->chan_lock, flags);
436 dmaengine_desc_callback_invoke(&cb, NULL);
442 * shdma_chan_ld_cleanup - Clean up link descriptors
444 * Clean up the ld_queue of DMA channel.
446 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
448 while (__ld_cleanup(schan, all))
453 * shdma_free_chan_resources - Free all resources of the channel.
455 static void shdma_free_chan_resources(struct dma_chan *chan)
457 struct shdma_chan *schan = to_shdma_chan(chan);
458 struct shdma_dev *sdev = to_shdma_dev(chan->device);
459 const struct shdma_ops *ops = sdev->ops;
462 /* Protect against ISR */
463 spin_lock_irq(&schan->chan_lock);
464 ops->halt_channel(schan);
465 spin_unlock_irq(&schan->chan_lock);
467 /* Now no new interrupts will occur */
469 /* Prepared and not submitted descriptors can still be on the queue */
470 if (!list_empty(&schan->ld_queue))
471 shdma_chan_ld_cleanup(schan, true);
473 if (schan->slave_id >= 0) {
474 /* The caller is holding dma_list_mutex */
475 clear_bit(schan->slave_id, shdma_slave_used);
476 chan->private = NULL;
479 schan->real_slave_id = 0;
481 spin_lock_irq(&schan->chan_lock);
483 list_splice_init(&schan->ld_free, &list);
486 spin_unlock_irq(&schan->chan_lock);
492 * shdma_add_desc - get, set up and return one transfer descriptor
493 * @schan: DMA channel
494 * @flags: DMA transfer flags
495 * @dst: destination DMA address, incremented when direction equals
496 * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
497 * @src: source DMA address, incremented when direction equals
498 * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
499 * @len: DMA transfer length
500 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
501 * @direction: needed for slave DMA to decide which address to keep constant,
502 * equals DMA_MEM_TO_MEM for MEMCPY
503 * Returns 0 or an error
504 * Locks: called with desc_lock held
506 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
507 unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
508 struct shdma_desc **first, enum dma_transfer_direction direction)
510 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
511 const struct shdma_ops *ops = sdev->ops;
512 struct shdma_desc *new;
513 size_t copy_size = *len;
518 /* Allocate the link descriptor from the free list */
519 new = shdma_get_desc(schan);
521 dev_err(schan->dev, "No free link descriptor available\n");
525 ops->desc_setup(schan, new, *src, *dst, ©_size);
529 new->async_tx.cookie = -EBUSY;
532 /* Other desc - invisible to the user */
533 new->async_tx.cookie = -EINVAL;
537 "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
538 copy_size, *len, src, dst, &new->async_tx,
539 new->async_tx.cookie);
541 new->mark = DESC_PREPARED;
542 new->async_tx.flags = flags;
543 new->direction = direction;
547 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
549 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
556 * shdma_prep_sg - prepare transfer descriptors from an SG list
558 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
559 * converted to scatter-gather to guarantee consistent locking and a correct
560 * list manipulation. For slave DMA direction carries the usual meaning, and,
561 * logically, the SG list is RAM and the addr variable contains slave address,
562 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
563 * and the SG list contains only one element and points at the source buffer.
565 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
566 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
567 enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
569 struct scatterlist *sg;
570 struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
573 unsigned long irq_flags;
576 for_each_sg(sgl, sg, sg_len, i)
577 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
579 /* Have to lock the whole loop to protect against concurrent release */
580 spin_lock_irqsave(&schan->chan_lock, irq_flags);
584 * first descriptor is what user is dealing with in all API calls, its
585 * cookie is at first set to -EBUSY, at tx-submit to a positive
587 * if more than one chunk is needed further chunks have cookie = -EINVAL
588 * the last chunk, if not equal to the first, has cookie = -ENOSPC
589 * all chunks are linked onto the tx_list head with their .node heads
590 * only during this function, then they are immediately spliced
591 * back onto the free list in form of a chain
593 for_each_sg(sgl, sg, sg_len, i) {
594 dma_addr_t sg_addr = sg_dma_address(sg);
595 size_t len = sg_dma_len(sg);
601 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
602 i, sg, len, &sg_addr);
604 if (direction == DMA_DEV_TO_MEM)
605 new = shdma_add_desc(schan, flags,
606 &sg_addr, addr, &len, &first,
609 new = shdma_add_desc(schan, flags,
610 addr, &sg_addr, &len, &first,
615 new->cyclic = cyclic;
619 new->chunks = chunks--;
620 list_add_tail(&new->node, &tx_list);
625 new->async_tx.cookie = -ENOSPC;
627 /* Put them back on the free list, so, they don't get lost */
628 list_splice_tail(&tx_list, &schan->ld_free);
630 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
632 return &first->async_tx;
635 list_for_each_entry(new, &tx_list, node)
636 new->mark = DESC_IDLE;
637 list_splice(&tx_list, &schan->ld_free);
639 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
644 static struct dma_async_tx_descriptor *shdma_prep_memcpy(
645 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
646 size_t len, unsigned long flags)
648 struct shdma_chan *schan = to_shdma_chan(chan);
649 struct scatterlist sg;
654 BUG_ON(!schan->desc_num);
656 sg_init_table(&sg, 1);
657 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
658 offset_in_page(dma_src));
659 sg_dma_address(&sg) = dma_src;
660 sg_dma_len(&sg) = len;
662 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
666 static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
667 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
668 enum dma_transfer_direction direction, unsigned long flags, void *context)
670 struct shdma_chan *schan = to_shdma_chan(chan);
671 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
672 const struct shdma_ops *ops = sdev->ops;
673 int slave_id = schan->slave_id;
674 dma_addr_t slave_addr;
679 BUG_ON(!schan->desc_num);
681 /* Someone calling slave DMA on a generic channel? */
682 if (slave_id < 0 || !sg_len) {
683 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
684 __func__, sg_len, slave_id);
688 slave_addr = ops->slave_addr(schan);
690 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
691 direction, flags, false);
694 #define SHDMA_MAX_SG_LEN 32
696 static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
697 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
698 size_t period_len, enum dma_transfer_direction direction,
701 struct shdma_chan *schan = to_shdma_chan(chan);
702 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
703 struct dma_async_tx_descriptor *desc;
704 const struct shdma_ops *ops = sdev->ops;
705 unsigned int sg_len = buf_len / period_len;
706 int slave_id = schan->slave_id;
707 dma_addr_t slave_addr;
708 struct scatterlist *sgl;
714 BUG_ON(!schan->desc_num);
716 if (sg_len > SHDMA_MAX_SG_LEN) {
717 dev_err(schan->dev, "sg length %d exceds limit %d",
718 sg_len, SHDMA_MAX_SG_LEN);
722 /* Someone calling slave DMA on a generic channel? */
723 if (slave_id < 0 || (buf_len < period_len)) {
725 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
726 __func__, buf_len, period_len, slave_id);
730 slave_addr = ops->slave_addr(schan);
733 * Allocate the sg list dynamically as it would consumer too much stack
736 sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL);
740 sg_init_table(sgl, sg_len);
742 for (i = 0; i < sg_len; i++) {
743 dma_addr_t src = buf_addr + (period_len * i);
745 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
746 offset_in_page(src));
747 sg_dma_address(&sgl[i]) = src;
748 sg_dma_len(&sgl[i]) = period_len;
751 desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
752 direction, flags, true);
758 static int shdma_terminate_all(struct dma_chan *chan)
760 struct shdma_chan *schan = to_shdma_chan(chan);
761 struct shdma_dev *sdev = to_shdma_dev(chan->device);
762 const struct shdma_ops *ops = sdev->ops;
765 spin_lock_irqsave(&schan->chan_lock, flags);
766 ops->halt_channel(schan);
768 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
769 /* Record partial transfer */
770 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
771 struct shdma_desc, node);
772 desc->partial = ops->get_partial(schan, desc);
775 spin_unlock_irqrestore(&schan->chan_lock, flags);
777 shdma_chan_ld_cleanup(schan, true);
782 static int shdma_config(struct dma_chan *chan,
783 struct dma_slave_config *config)
785 struct shdma_chan *schan = to_shdma_chan(chan);
788 * So far only .slave_id is used, but the slave drivers are
789 * encouraged to also set a transfer direction and an address.
795 * overriding the slave_id through dma_slave_config is deprecated,
796 * but possibly some out-of-tree drivers still do it.
798 if (WARN_ON_ONCE(config->slave_id &&
799 config->slave_id != schan->real_slave_id))
800 schan->real_slave_id = config->slave_id;
803 * We could lock this, but you shouldn't be configuring the
804 * channel, while using it...
806 return shdma_setup_slave(schan,
807 config->direction == DMA_DEV_TO_MEM ?
808 config->src_addr : config->dst_addr);
811 static void shdma_issue_pending(struct dma_chan *chan)
813 struct shdma_chan *schan = to_shdma_chan(chan);
815 spin_lock_irq(&schan->chan_lock);
816 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
817 shdma_chan_xfer_ld_queue(schan);
819 schan->pm_state = SHDMA_PM_PENDING;
820 spin_unlock_irq(&schan->chan_lock);
823 static enum dma_status shdma_tx_status(struct dma_chan *chan,
825 struct dma_tx_state *txstate)
827 struct shdma_chan *schan = to_shdma_chan(chan);
828 enum dma_status status;
831 shdma_chan_ld_cleanup(schan, false);
833 spin_lock_irqsave(&schan->chan_lock, flags);
835 status = dma_cookie_status(chan, cookie, txstate);
838 * If we don't find cookie on the queue, it has been aborted and we have
841 if (status != DMA_COMPLETE) {
842 struct shdma_desc *sdesc;
844 list_for_each_entry(sdesc, &schan->ld_queue, node)
845 if (sdesc->cookie == cookie) {
846 status = DMA_IN_PROGRESS;
851 spin_unlock_irqrestore(&schan->chan_lock, flags);
856 /* Called from error IRQ or NMI */
857 bool shdma_reset(struct shdma_dev *sdev)
859 const struct shdma_ops *ops = sdev->ops;
860 struct shdma_chan *schan;
861 unsigned int handled = 0;
864 /* Reset all channels */
865 shdma_for_each_chan(schan, sdev, i) {
866 struct shdma_desc *sdesc;
872 spin_lock(&schan->chan_lock);
874 /* Stop the channel */
875 ops->halt_channel(schan);
877 list_splice_init(&schan->ld_queue, &dl);
879 if (!list_empty(&dl)) {
880 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
881 pm_runtime_put(schan->dev);
883 schan->pm_state = SHDMA_PM_ESTABLISHED;
885 spin_unlock(&schan->chan_lock);
888 list_for_each_entry(sdesc, &dl, node) {
889 struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
891 sdesc->mark = DESC_IDLE;
892 dmaengine_desc_get_callback_invoke(tx, NULL);
895 spin_lock(&schan->chan_lock);
896 list_splice(&dl, &schan->ld_free);
897 spin_unlock(&schan->chan_lock);
904 EXPORT_SYMBOL(shdma_reset);
906 static irqreturn_t chan_irq(int irq, void *dev)
908 struct shdma_chan *schan = dev;
909 const struct shdma_ops *ops =
910 to_shdma_dev(schan->dma_chan.device)->ops;
913 spin_lock(&schan->chan_lock);
915 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
917 spin_unlock(&schan->chan_lock);
922 static irqreturn_t chan_irqt(int irq, void *dev)
924 struct shdma_chan *schan = dev;
925 const struct shdma_ops *ops =
926 to_shdma_dev(schan->dma_chan.device)->ops;
927 struct shdma_desc *sdesc;
929 spin_lock_irq(&schan->chan_lock);
930 list_for_each_entry(sdesc, &schan->ld_queue, node) {
931 if (sdesc->mark == DESC_SUBMITTED &&
932 ops->desc_completed(schan, sdesc)) {
933 dev_dbg(schan->dev, "done #%d@%p\n",
934 sdesc->async_tx.cookie, &sdesc->async_tx);
935 sdesc->mark = DESC_COMPLETED;
940 shdma_chan_xfer_ld_queue(schan);
941 spin_unlock_irq(&schan->chan_lock);
943 shdma_chan_ld_cleanup(schan, false);
948 int shdma_request_irq(struct shdma_chan *schan, int irq,
949 unsigned long flags, const char *name)
951 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
952 chan_irqt, flags, name, schan);
954 schan->irq = ret < 0 ? ret : irq;
958 EXPORT_SYMBOL(shdma_request_irq);
960 void shdma_chan_probe(struct shdma_dev *sdev,
961 struct shdma_chan *schan, int id)
963 schan->pm_state = SHDMA_PM_ESTABLISHED;
965 /* reference struct dma_device */
966 schan->dma_chan.device = &sdev->dma_dev;
967 dma_cookie_init(&schan->dma_chan);
969 schan->dev = sdev->dma_dev.dev;
972 if (!schan->max_xfer_len)
973 schan->max_xfer_len = PAGE_SIZE;
975 spin_lock_init(&schan->chan_lock);
977 /* Init descripter manage list */
978 INIT_LIST_HEAD(&schan->ld_queue);
979 INIT_LIST_HEAD(&schan->ld_free);
981 /* Add the channel to DMA device channel list */
982 list_add_tail(&schan->dma_chan.device_node,
983 &sdev->dma_dev.channels);
984 sdev->schan[id] = schan;
986 EXPORT_SYMBOL(shdma_chan_probe);
988 void shdma_chan_remove(struct shdma_chan *schan)
990 list_del(&schan->dma_chan.device_node);
992 EXPORT_SYMBOL(shdma_chan_remove);
994 int shdma_init(struct device *dev, struct shdma_dev *sdev,
997 struct dma_device *dma_dev = &sdev->dma_dev;
1000 * Require all call-backs for now, they can trivially be made optional
1005 !sdev->ops->embedded_desc ||
1006 !sdev->ops->start_xfer ||
1007 !sdev->ops->setup_xfer ||
1008 !sdev->ops->set_slave ||
1009 !sdev->ops->desc_setup ||
1010 !sdev->ops->slave_addr ||
1011 !sdev->ops->channel_busy ||
1012 !sdev->ops->halt_channel ||
1013 !sdev->ops->desc_completed)
1016 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
1020 INIT_LIST_HEAD(&dma_dev->channels);
1022 /* Common and MEMCPY operations */
1023 dma_dev->device_alloc_chan_resources
1024 = shdma_alloc_chan_resources;
1025 dma_dev->device_free_chan_resources = shdma_free_chan_resources;
1026 dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
1027 dma_dev->device_tx_status = shdma_tx_status;
1028 dma_dev->device_issue_pending = shdma_issue_pending;
1030 /* Compulsory for DMA_SLAVE fields */
1031 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1032 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1033 dma_dev->device_config = shdma_config;
1034 dma_dev->device_terminate_all = shdma_terminate_all;
1040 EXPORT_SYMBOL(shdma_init);
1042 void shdma_cleanup(struct shdma_dev *sdev)
1046 EXPORT_SYMBOL(shdma_cleanup);
1048 static int __init shdma_enter(void)
1050 shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
1051 sizeof(long), GFP_KERNEL);
1052 if (!shdma_slave_used)
1056 module_init(shdma_enter);
1058 static void __exit shdma_exit(void)
1060 kfree(shdma_slave_used);
1062 module_exit(shdma_exit);
1064 MODULE_LICENSE("GPL v2");
1065 MODULE_DESCRIPTION("SH-DMA driver base library");
1066 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");