2 * Keystone Queue Manager subsystem driver
4 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Cyril Chemparathy <cyril@ti.com>
7 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include <linux/debugfs.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/firmware.h>
22 #include <linux/interrupt.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/of_device.h>
27 #include <linux/of_irq.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/slab.h>
30 #include <linux/soc/ti/knav_qmss.h>
32 #include "knav_qmss.h"
34 static struct knav_device *kdev;
35 static DEFINE_MUTEX(knav_dev_lock);
37 /* Queue manager register indices in DTS */
38 #define KNAV_QUEUE_PEEK_REG_INDEX 0
39 #define KNAV_QUEUE_STATUS_REG_INDEX 1
40 #define KNAV_QUEUE_CONFIG_REG_INDEX 2
41 #define KNAV_QUEUE_REGION_REG_INDEX 3
42 #define KNAV_QUEUE_PUSH_REG_INDEX 4
43 #define KNAV_QUEUE_POP_REG_INDEX 5
45 /* PDSP register indices in DTS */
46 #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0
47 #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1
48 #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2
49 #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3
51 #define knav_queue_idx_to_inst(kdev, idx) \
52 (kdev->instances + (idx << kdev->inst_shift))
54 #define for_each_handle_rcu(qh, inst) \
55 list_for_each_entry_rcu(qh, &inst->handles, list)
57 #define for_each_instance(idx, inst, kdev) \
58 for (idx = 0, inst = kdev->instances; \
59 idx < (kdev)->num_queues_in_use; \
60 idx++, inst = knav_queue_idx_to_inst(kdev, idx))
62 /* All firmware file names end up here. List the firmware file names below.
63 * Newest followed by older ones. Search is done from start of the array
64 * until a firmware file is found.
66 const char *knav_acc_firmwares[] = {"/*(DEBLOBBED)*/"};
69 * knav_queue_notify: qmss queue notfier call
71 * @inst: qmss queue instance like accumulator
73 void knav_queue_notify(struct knav_queue_inst *inst)
75 struct knav_queue *qh;
81 for_each_handle_rcu(qh, inst) {
82 if (atomic_read(&qh->notifier_enabled) <= 0)
84 if (WARN_ON(!qh->notifier_fn))
86 atomic_inc(&qh->stats.notifies);
87 qh->notifier_fn(qh->notifier_fn_arg);
91 EXPORT_SYMBOL_GPL(knav_queue_notify);
93 static irqreturn_t knav_queue_int_handler(int irq, void *_instdata)
95 struct knav_queue_inst *inst = _instdata;
97 knav_queue_notify(inst);
101 static int knav_queue_setup_irq(struct knav_range_info *range,
102 struct knav_queue_inst *inst)
104 unsigned queue = inst->id - range->queue_base;
107 if (range->flags & RANGE_HAS_IRQ) {
108 irq = range->irqs[queue].irq;
109 ret = request_irq(irq, knav_queue_int_handler, 0,
110 inst->irq_name, inst);
114 if (range->irqs[queue].cpu_mask) {
115 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
117 dev_warn(range->kdev->dev,
118 "Failed to set IRQ affinity\n");
126 static void knav_queue_free_irq(struct knav_queue_inst *inst)
128 struct knav_range_info *range = inst->range;
129 unsigned queue = inst->id - inst->range->queue_base;
132 if (range->flags & RANGE_HAS_IRQ) {
133 irq = range->irqs[queue].irq;
134 irq_set_affinity_hint(irq, NULL);
139 static inline bool knav_queue_is_busy(struct knav_queue_inst *inst)
141 return !list_empty(&inst->handles);
144 static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst)
146 return inst->range->flags & RANGE_RESERVED;
149 static inline bool knav_queue_is_shared(struct knav_queue_inst *inst)
151 struct knav_queue *tmp;
154 for_each_handle_rcu(tmp, inst) {
155 if (tmp->flags & KNAV_QUEUE_SHARED) {
164 static inline bool knav_queue_match_type(struct knav_queue_inst *inst,
167 if ((type == KNAV_QUEUE_QPEND) &&
168 (inst->range->flags & RANGE_HAS_IRQ)) {
170 } else if ((type == KNAV_QUEUE_ACC) &&
171 (inst->range->flags & RANGE_HAS_ACCUMULATOR)) {
173 } else if ((type == KNAV_QUEUE_GP) &&
174 !(inst->range->flags &
175 (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) {
181 static inline struct knav_queue_inst *
182 knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id)
184 struct knav_queue_inst *inst;
187 for_each_instance(idx, inst, kdev) {
194 static inline struct knav_queue_inst *knav_queue_find_by_id(int id)
196 if (kdev->base_id <= id &&
197 kdev->base_id + kdev->num_queues > id) {
199 return knav_queue_match_id_to_inst(kdev, id);
204 static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst,
205 const char *name, unsigned flags)
207 struct knav_queue *qh;
211 qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL);
213 return ERR_PTR(-ENOMEM);
217 id = inst->id - inst->qmgr->start_queue;
218 qh->reg_push = &inst->qmgr->reg_push[id];
219 qh->reg_pop = &inst->qmgr->reg_pop[id];
220 qh->reg_peek = &inst->qmgr->reg_peek[id];
223 if (!knav_queue_is_busy(inst)) {
224 struct knav_range_info *range = inst->range;
226 inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
227 if (range->ops && range->ops->open_queue)
228 ret = range->ops->open_queue(range, inst, flags);
231 devm_kfree(inst->kdev->dev, qh);
235 list_add_tail_rcu(&qh->list, &inst->handles);
239 static struct knav_queue *
240 knav_queue_open_by_id(const char *name, unsigned id, unsigned flags)
242 struct knav_queue_inst *inst;
243 struct knav_queue *qh;
245 mutex_lock(&knav_dev_lock);
247 qh = ERR_PTR(-ENODEV);
248 inst = knav_queue_find_by_id(id);
252 qh = ERR_PTR(-EEXIST);
253 if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst))
256 qh = ERR_PTR(-EBUSY);
257 if ((flags & KNAV_QUEUE_SHARED) &&
258 (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst)))
261 qh = __knav_queue_open(inst, name, flags);
264 mutex_unlock(&knav_dev_lock);
269 static struct knav_queue *knav_queue_open_by_type(const char *name,
270 unsigned type, unsigned flags)
272 struct knav_queue_inst *inst;
273 struct knav_queue *qh = ERR_PTR(-EINVAL);
276 mutex_lock(&knav_dev_lock);
278 for_each_instance(idx, inst, kdev) {
279 if (knav_queue_is_reserved(inst))
281 if (!knav_queue_match_type(inst, type))
283 if (knav_queue_is_busy(inst))
285 qh = __knav_queue_open(inst, name, flags);
290 mutex_unlock(&knav_dev_lock);
294 static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled)
296 struct knav_range_info *range = inst->range;
298 if (range->ops && range->ops->set_notify)
299 range->ops->set_notify(range, inst, enabled);
302 static int knav_queue_enable_notifier(struct knav_queue *qh)
304 struct knav_queue_inst *inst = qh->inst;
307 if (WARN_ON(!qh->notifier_fn))
310 /* Adjust the per handle notifier count */
311 first = (atomic_inc_return(&qh->notifier_enabled) == 1);
313 return 0; /* nothing to do */
315 /* Now adjust the per instance notifier count */
316 first = (atomic_inc_return(&inst->num_notifiers) == 1);
318 knav_queue_set_notify(inst, true);
323 static int knav_queue_disable_notifier(struct knav_queue *qh)
325 struct knav_queue_inst *inst = qh->inst;
328 last = (atomic_dec_return(&qh->notifier_enabled) == 0);
330 return 0; /* nothing to do */
332 last = (atomic_dec_return(&inst->num_notifiers) == 0);
334 knav_queue_set_notify(inst, false);
339 static int knav_queue_set_notifier(struct knav_queue *qh,
340 struct knav_queue_notify_config *cfg)
342 knav_queue_notify_fn old_fn = qh->notifier_fn;
347 if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ)))
350 if (!cfg->fn && old_fn)
351 knav_queue_disable_notifier(qh);
353 qh->notifier_fn = cfg->fn;
354 qh->notifier_fn_arg = cfg->fn_arg;
356 if (cfg->fn && !old_fn)
357 knav_queue_enable_notifier(qh);
362 static int knav_gp_set_notify(struct knav_range_info *range,
363 struct knav_queue_inst *inst,
368 if (range->flags & RANGE_HAS_IRQ) {
369 queue = inst->id - range->queue_base;
371 enable_irq(range->irqs[queue].irq);
373 disable_irq_nosync(range->irqs[queue].irq);
378 static int knav_gp_open_queue(struct knav_range_info *range,
379 struct knav_queue_inst *inst, unsigned flags)
381 return knav_queue_setup_irq(range, inst);
384 static int knav_gp_close_queue(struct knav_range_info *range,
385 struct knav_queue_inst *inst)
387 knav_queue_free_irq(inst);
391 struct knav_range_ops knav_gp_range_ops = {
392 .set_notify = knav_gp_set_notify,
393 .open_queue = knav_gp_open_queue,
394 .close_queue = knav_gp_close_queue,
398 static int knav_queue_get_count(void *qhandle)
400 struct knav_queue *qh = qhandle;
401 struct knav_queue_inst *inst = qh->inst;
403 return readl_relaxed(&qh->reg_peek[0].entry_count) +
404 atomic_read(&inst->desc_count);
407 static void knav_queue_debug_show_instance(struct seq_file *s,
408 struct knav_queue_inst *inst)
410 struct knav_device *kdev = inst->kdev;
411 struct knav_queue *qh;
413 if (!knav_queue_is_busy(inst))
416 seq_printf(s, "\tqueue id %d (%s)\n",
417 kdev->base_id + inst->id, inst->name);
418 for_each_handle_rcu(qh, inst) {
419 seq_printf(s, "\t\thandle %p: ", qh);
420 seq_printf(s, "pushes %8d, ",
421 atomic_read(&qh->stats.pushes));
422 seq_printf(s, "pops %8d, ",
423 atomic_read(&qh->stats.pops));
424 seq_printf(s, "count %8d, ",
425 knav_queue_get_count(qh));
426 seq_printf(s, "notifies %8d, ",
427 atomic_read(&qh->stats.notifies));
428 seq_printf(s, "push errors %8d, ",
429 atomic_read(&qh->stats.push_errors));
430 seq_printf(s, "pop errors %8d\n",
431 atomic_read(&qh->stats.pop_errors));
435 static int knav_queue_debug_show(struct seq_file *s, void *v)
437 struct knav_queue_inst *inst;
440 mutex_lock(&knav_dev_lock);
441 seq_printf(s, "%s: %u-%u\n",
442 dev_name(kdev->dev), kdev->base_id,
443 kdev->base_id + kdev->num_queues - 1);
444 for_each_instance(idx, inst, kdev)
445 knav_queue_debug_show_instance(s, inst);
446 mutex_unlock(&knav_dev_lock);
451 static int knav_queue_debug_open(struct inode *inode, struct file *file)
453 return single_open(file, knav_queue_debug_show, NULL);
456 static const struct file_operations knav_queue_debug_ops = {
457 .open = knav_queue_debug_open,
460 .release = single_release,
463 static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout,
469 end = jiffies + msecs_to_jiffies(timeout);
470 while (time_after(end, jiffies)) {
471 val = readl_relaxed(addr);
478 return val ? -ETIMEDOUT : 0;
482 static int knav_queue_flush(struct knav_queue *qh)
484 struct knav_queue_inst *inst = qh->inst;
485 unsigned id = inst->id - inst->qmgr->start_queue;
487 atomic_set(&inst->desc_count, 0);
488 writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh);
493 * knav_queue_open() - open a hardware queue
494 * @name - name to give the queue handle
495 * @id - desired queue number if any or specifes the type
497 * @flags - the following flags are applicable to queues:
498 * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
499 * exclusive by default.
500 * Subsequent attempts to open a shared queue should
501 * also have this flag.
503 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
504 * to check the returned value for error codes.
506 void *knav_queue_open(const char *name, unsigned id,
509 struct knav_queue *qh = ERR_PTR(-EINVAL);
512 case KNAV_QUEUE_QPEND:
515 qh = knav_queue_open_by_type(name, id, flags);
519 qh = knav_queue_open_by_id(name, id, flags);
524 EXPORT_SYMBOL_GPL(knav_queue_open);
527 * knav_queue_close() - close a hardware queue handle
528 * @qh - handle to close
530 void knav_queue_close(void *qhandle)
532 struct knav_queue *qh = qhandle;
533 struct knav_queue_inst *inst = qh->inst;
535 while (atomic_read(&qh->notifier_enabled) > 0)
536 knav_queue_disable_notifier(qh);
538 mutex_lock(&knav_dev_lock);
539 list_del_rcu(&qh->list);
540 mutex_unlock(&knav_dev_lock);
542 if (!knav_queue_is_busy(inst)) {
543 struct knav_range_info *range = inst->range;
545 if (range->ops && range->ops->close_queue)
546 range->ops->close_queue(range, inst);
548 devm_kfree(inst->kdev->dev, qh);
550 EXPORT_SYMBOL_GPL(knav_queue_close);
553 * knav_queue_device_control() - Perform control operations on a queue
555 * @cmd - control commands
556 * @arg - command argument
558 * Returns 0 on success, errno otherwise.
560 int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd,
563 struct knav_queue *qh = qhandle;
564 struct knav_queue_notify_config *cfg;
568 case KNAV_QUEUE_GET_ID:
569 ret = qh->inst->kdev->base_id + qh->inst->id;
572 case KNAV_QUEUE_FLUSH:
573 ret = knav_queue_flush(qh);
576 case KNAV_QUEUE_SET_NOTIFIER:
578 ret = knav_queue_set_notifier(qh, cfg);
581 case KNAV_QUEUE_ENABLE_NOTIFY:
582 ret = knav_queue_enable_notifier(qh);
585 case KNAV_QUEUE_DISABLE_NOTIFY:
586 ret = knav_queue_disable_notifier(qh);
589 case KNAV_QUEUE_GET_COUNT:
590 ret = knav_queue_get_count(qh);
599 EXPORT_SYMBOL_GPL(knav_queue_device_control);
604 * knav_queue_push() - push data (or descriptor) to the tail of a queue
605 * @qh - hardware queue handle
606 * @data - data to push
607 * @size - size of data to push
608 * @flags - can be used to pass additional information
610 * Returns 0 on success, errno otherwise.
612 int knav_queue_push(void *qhandle, dma_addr_t dma,
613 unsigned size, unsigned flags)
615 struct knav_queue *qh = qhandle;
618 val = (u32)dma | ((size / 16) - 1);
619 writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh);
621 atomic_inc(&qh->stats.pushes);
624 EXPORT_SYMBOL_GPL(knav_queue_push);
627 * knav_queue_pop() - pop data (or descriptor) from the head of a queue
628 * @qh - hardware queue handle
629 * @size - (optional) size of the data pop'ed.
631 * Returns a DMA address on success, 0 on failure.
633 dma_addr_t knav_queue_pop(void *qhandle, unsigned *size)
635 struct knav_queue *qh = qhandle;
636 struct knav_queue_inst *inst = qh->inst;
640 /* are we accumulated? */
642 if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) {
643 atomic_inc(&inst->desc_count);
646 idx = atomic_inc_return(&inst->desc_head);
647 idx &= ACC_DESCS_MASK;
648 val = inst->descs[idx];
650 val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh);
655 dma = val & DESC_PTR_MASK;
657 *size = ((val & DESC_SIZE_MASK) + 1) * 16;
659 atomic_inc(&qh->stats.pops);
662 EXPORT_SYMBOL_GPL(knav_queue_pop);
664 /* carve out descriptors and push into queue */
665 static void kdesc_fill_pool(struct knav_pool *pool)
667 struct knav_region *region;
670 region = pool->region;
671 pool->desc_size = region->desc_size;
672 for (i = 0; i < pool->num_desc; i++) {
673 int index = pool->region_offset + i;
676 dma_addr = region->dma_start + (region->desc_size * index);
677 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES);
678 dma_sync_single_for_device(pool->dev, dma_addr, dma_size,
680 knav_queue_push(pool->queue, dma_addr, dma_size, 0);
684 /* pop out descriptors and close the queue */
685 static void kdesc_empty_pool(struct knav_pool *pool)
696 dma = knav_queue_pop(pool->queue, &size);
699 desc = knav_pool_desc_dma_to_virt(pool, dma);
701 dev_dbg(pool->kdev->dev,
702 "couldn't unmap desc, continuing\n");
706 WARN_ON(i != pool->num_desc);
707 knav_queue_close(pool->queue);
711 /* Get the DMA address of a descriptor */
712 dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt)
714 struct knav_pool *pool = ph;
715 return pool->region->dma_start + (virt - pool->region->virt_start);
717 EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
719 void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
721 struct knav_pool *pool = ph;
722 return pool->region->virt_start + (dma - pool->region->dma_start);
724 EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
727 * knav_pool_create() - Create a pool of descriptors
728 * @name - name to give the pool handle
729 * @num_desc - numbers of descriptors in the pool
730 * @region_id - QMSS region id from which the descriptors are to be
733 * Returns a pool handle on success.
734 * Use IS_ERR_OR_NULL() to identify error values on return.
736 void *knav_pool_create(const char *name,
737 int num_desc, int region_id)
739 struct knav_region *reg_itr, *region = NULL;
740 struct knav_pool *pool, *pi;
741 struct list_head *node;
742 unsigned last_offset;
747 return ERR_PTR(-EPROBE_DEFER);
750 return ERR_PTR(-ENODEV);
752 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
754 dev_err(kdev->dev, "out of memory allocating pool\n");
755 return ERR_PTR(-ENOMEM);
758 for_each_region(kdev, reg_itr) {
759 if (reg_itr->id != region_id)
766 dev_err(kdev->dev, "region-id(%d) not found\n", region_id);
771 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
772 if (IS_ERR_OR_NULL(pool->queue)) {
774 "failed to open queue for pool(%s), error %ld\n",
775 name, PTR_ERR(pool->queue));
776 ret = PTR_ERR(pool->queue);
780 pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL);
782 pool->dev = kdev->dev;
784 mutex_lock(&knav_dev_lock);
786 if (num_desc > (region->num_desc - region->used_desc)) {
787 dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n",
793 /* Region maintains a sorted (by region offset) list of pools
794 * use the first free slot which is large enough to accomodate
799 node = ®ion->pools;
800 list_for_each_entry(pi, ®ion->pools, region_inst) {
801 if ((pi->region_offset - last_offset) >= num_desc) {
805 last_offset = pi->region_offset + pi->num_desc;
807 node = &pi->region_inst;
810 pool->region = region;
811 pool->num_desc = num_desc;
812 pool->region_offset = last_offset;
813 region->used_desc += num_desc;
814 list_add_tail(&pool->list, &kdev->pools);
815 list_add_tail(&pool->region_inst, node);
817 dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n",
823 mutex_unlock(&knav_dev_lock);
824 kdesc_fill_pool(pool);
828 mutex_unlock(&knav_dev_lock);
831 devm_kfree(kdev->dev, pool);
834 EXPORT_SYMBOL_GPL(knav_pool_create);
837 * knav_pool_destroy() - Free a pool of descriptors
838 * @pool - pool handle
840 void knav_pool_destroy(void *ph)
842 struct knav_pool *pool = ph;
850 kdesc_empty_pool(pool);
851 mutex_lock(&knav_dev_lock);
853 pool->region->used_desc -= pool->num_desc;
854 list_del(&pool->region_inst);
855 list_del(&pool->list);
857 mutex_unlock(&knav_dev_lock);
859 devm_kfree(kdev->dev, pool);
861 EXPORT_SYMBOL_GPL(knav_pool_destroy);
865 * knav_pool_desc_get() - Get a descriptor from the pool
866 * @pool - pool handle
868 * Returns descriptor from the pool.
870 void *knav_pool_desc_get(void *ph)
872 struct knav_pool *pool = ph;
877 dma = knav_queue_pop(pool->queue, &size);
879 return ERR_PTR(-ENOMEM);
880 data = knav_pool_desc_dma_to_virt(pool, dma);
883 EXPORT_SYMBOL_GPL(knav_pool_desc_get);
886 * knav_pool_desc_put() - return a descriptor to the pool
887 * @pool - pool handle
889 void knav_pool_desc_put(void *ph, void *desc)
891 struct knav_pool *pool = ph;
893 dma = knav_pool_desc_virt_to_dma(pool, desc);
894 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
896 EXPORT_SYMBOL_GPL(knav_pool_desc_put);
899 * knav_pool_desc_map() - Map descriptor for DMA transfer
900 * @pool - pool handle
901 * @desc - address of descriptor to map
902 * @size - size of descriptor to map
903 * @dma - DMA address return pointer
904 * @dma_sz - adjusted return pointer
906 * Returns 0 on success, errno otherwise.
908 int knav_pool_desc_map(void *ph, void *desc, unsigned size,
909 dma_addr_t *dma, unsigned *dma_sz)
911 struct knav_pool *pool = ph;
912 *dma = knav_pool_desc_virt_to_dma(pool, desc);
913 size = min(size, pool->region->desc_size);
914 size = ALIGN(size, SMP_CACHE_BYTES);
916 dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE);
918 /* Ensure the descriptor reaches to the memory */
923 EXPORT_SYMBOL_GPL(knav_pool_desc_map);
926 * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
927 * @pool - pool handle
928 * @dma - DMA address of descriptor to unmap
929 * @dma_sz - size of descriptor to unmap
931 * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
932 * error values on return.
934 void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz)
936 struct knav_pool *pool = ph;
940 desc_sz = min(dma_sz, pool->region->desc_size);
941 desc = knav_pool_desc_dma_to_virt(pool, dma);
942 dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE);
946 EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
949 * knav_pool_count() - Get the number of descriptors in pool.
950 * @pool - pool handle
951 * Returns number of elements in the pool.
953 int knav_pool_count(void *ph)
955 struct knav_pool *pool = ph;
956 return knav_queue_get_count(pool->queue);
958 EXPORT_SYMBOL_GPL(knav_pool_count);
960 static void knav_queue_setup_region(struct knav_device *kdev,
961 struct knav_region *region)
963 unsigned hw_num_desc, hw_desc_size, size;
964 struct knav_reg_region __iomem *regs;
965 struct knav_qmgr_info *qmgr;
966 struct knav_pool *pool;
971 if (!region->num_desc) {
972 dev_warn(kdev->dev, "unused region %s\n", region->name);
976 /* get hardware descriptor value */
977 hw_num_desc = ilog2(region->num_desc - 1) + 1;
979 /* did we force fit ourselves into nothingness? */
980 if (region->num_desc < 32) {
981 region->num_desc = 0;
982 dev_warn(kdev->dev, "too few descriptors in region %s\n",
987 size = region->num_desc * region->desc_size;
988 region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA |
990 if (!region->virt_start) {
991 region->num_desc = 0;
992 dev_err(kdev->dev, "memory alloc failed for region %s\n",
996 region->virt_end = region->virt_start + size;
997 page = virt_to_page(region->virt_start);
999 region->dma_start = dma_map_page(kdev->dev, page, 0, size,
1001 if (dma_mapping_error(kdev->dev, region->dma_start)) {
1002 dev_err(kdev->dev, "dma map failed for region %s\n",
1006 region->dma_end = region->dma_start + size;
1008 pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL);
1010 dev_err(kdev->dev, "out of memory allocating dummy pool\n");
1014 pool->region_offset = region->num_desc;
1015 list_add(&pool->region_inst, ®ion->pools);
1018 "region %s (%d): size:%d, link:%d@%d, dma:%pad-%pad, virt:%p-%p\n",
1019 region->name, id, region->desc_size, region->num_desc,
1020 region->link_index, ®ion->dma_start, ®ion->dma_end,
1021 region->virt_start, region->virt_end);
1023 hw_desc_size = (region->desc_size / 16) - 1;
1026 for_each_qmgr(kdev, qmgr) {
1027 regs = qmgr->reg_region + id;
1028 writel_relaxed((u32)region->dma_start, ®s->base);
1029 writel_relaxed(region->link_index, ®s->start_index);
1030 writel_relaxed(hw_desc_size << 16 | hw_num_desc,
1036 if (region->dma_start)
1037 dma_unmap_page(kdev->dev, region->dma_start, size,
1039 if (region->virt_start)
1040 free_pages_exact(region->virt_start, size);
1041 region->num_desc = 0;
1045 static const char *knav_queue_find_name(struct device_node *node)
1049 if (of_property_read_string(node, "label", &name) < 0)
1056 static int knav_queue_setup_regions(struct knav_device *kdev,
1057 struct device_node *regions)
1059 struct device *dev = kdev->dev;
1060 struct knav_region *region;
1061 struct device_node *child;
1065 for_each_child_of_node(regions, child) {
1066 region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL);
1068 dev_err(dev, "out of memory allocating region\n");
1072 region->name = knav_queue_find_name(child);
1073 of_property_read_u32(child, "id", ®ion->id);
1074 ret = of_property_read_u32_array(child, "region-spec", temp, 2);
1076 region->num_desc = temp[0];
1077 region->desc_size = temp[1];
1079 dev_err(dev, "invalid region info %s\n", region->name);
1080 devm_kfree(dev, region);
1084 if (!of_get_property(child, "link-index", NULL)) {
1085 dev_err(dev, "No link info for %s\n", region->name);
1086 devm_kfree(dev, region);
1089 ret = of_property_read_u32(child, "link-index",
1090 ®ion->link_index);
1092 dev_err(dev, "link index not found for %s\n",
1094 devm_kfree(dev, region);
1098 INIT_LIST_HEAD(®ion->pools);
1099 list_add_tail(®ion->list, &kdev->regions);
1101 if (list_empty(&kdev->regions)) {
1102 dev_err(dev, "no valid region information found\n");
1106 /* Next, we run through the regions and set things up */
1107 for_each_region(kdev, region)
1108 knav_queue_setup_region(kdev, region);
1113 static int knav_get_link_ram(struct knav_device *kdev,
1115 struct knav_link_ram_block *block)
1117 struct platform_device *pdev = to_platform_device(kdev->dev);
1118 struct device_node *node = pdev->dev.of_node;
1122 * Note: link ram resources are specified in "entry" sized units. In
1123 * reality, although entries are ~40bits in hardware, we treat them as
1124 * 64-bit entities here.
1126 * For example, to specify the internal link ram for Keystone-I class
1127 * devices, we would set the linkram0 resource to 0x80000-0x83fff.
1129 * This gets a bit weird when other link rams are used. For example,
1130 * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries
1131 * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000,
1132 * which accounts for 64-bits per entry, for 16K entries.
1134 if (!of_property_read_u32_array(node, name , temp, 2)) {
1137 * queue_base specified => using internal or onchip
1138 * link ram WARNING - we do not "reserve" this block
1140 block->dma = (dma_addr_t)temp[0];
1142 block->size = temp[1];
1144 block->size = temp[1];
1145 /* queue_base not specific => allocate requested size */
1146 block->virt = dmam_alloc_coherent(kdev->dev,
1147 8 * block->size, &block->dma,
1150 dev_err(kdev->dev, "failed to alloc linkram\n");
1160 static int knav_queue_setup_link_ram(struct knav_device *kdev)
1162 struct knav_link_ram_block *block;
1163 struct knav_qmgr_info *qmgr;
1165 for_each_qmgr(kdev, qmgr) {
1166 block = &kdev->link_rams[0];
1167 dev_dbg(kdev->dev, "linkram0: dma:%pad, virt:%p, size:%x\n",
1168 &block->dma, block->virt, block->size);
1169 writel_relaxed((u32)block->dma, &qmgr->reg_config->link_ram_base0);
1170 writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0);
1176 dev_dbg(kdev->dev, "linkram1: dma:%pad, virt:%p, size:%x\n",
1177 &block->dma, block->virt, block->size);
1178 writel_relaxed(block->dma, &qmgr->reg_config->link_ram_base1);
1184 static int knav_setup_queue_range(struct knav_device *kdev,
1185 struct device_node *node)
1187 struct device *dev = kdev->dev;
1188 struct knav_range_info *range;
1189 struct knav_qmgr_info *qmgr;
1190 u32 temp[2], start, end, id, index;
1193 range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL);
1195 dev_err(dev, "out of memory allocating range\n");
1200 range->name = knav_queue_find_name(node);
1201 ret = of_property_read_u32_array(node, "qrange", temp, 2);
1203 range->queue_base = temp[0] - kdev->base_id;
1204 range->num_queues = temp[1];
1206 dev_err(dev, "invalid queue range %s\n", range->name);
1207 devm_kfree(dev, range);
1211 for (i = 0; i < RANGE_MAX_IRQS; i++) {
1212 struct of_phandle_args oirq;
1214 if (of_irq_parse_one(node, i, &oirq))
1217 range->irqs[i].irq = irq_create_of_mapping(&oirq);
1218 if (range->irqs[i].irq == IRQ_NONE)
1223 if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1227 range->irqs[i].cpu_mask = devm_kzalloc(dev,
1228 cpumask_size(), GFP_KERNEL);
1229 if (!range->irqs[i].cpu_mask)
1232 mask = (oirq.args[2] & 0x0000ff00) >> 8;
1233 for_each_set_bit(bit, &mask, BITS_PER_LONG)
1234 cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1238 range->num_irqs = min(range->num_irqs, range->num_queues);
1239 if (range->num_irqs)
1240 range->flags |= RANGE_HAS_IRQ;
1242 if (of_get_property(node, "qalloc-by-id", NULL))
1243 range->flags |= RANGE_RESERVED;
1245 if (of_get_property(node, "accumulator", NULL)) {
1246 ret = knav_init_acc_range(kdev, node, range);
1248 devm_kfree(dev, range);
1252 range->ops = &knav_gp_range_ops;
1255 /* set threshold to 1, and flush out the queues */
1256 for_each_qmgr(kdev, qmgr) {
1257 start = max(qmgr->start_queue, range->queue_base);
1258 end = min(qmgr->start_queue + qmgr->num_queues,
1259 range->queue_base + range->num_queues);
1260 for (id = start; id < end; id++) {
1261 index = id - qmgr->start_queue;
1262 writel_relaxed(THRESH_GTE | 1,
1263 &qmgr->reg_peek[index].ptr_size_thresh);
1265 &qmgr->reg_push[index].ptr_size_thresh);
1269 list_add_tail(&range->list, &kdev->queue_ranges);
1270 dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n",
1271 range->name, range->queue_base,
1272 range->queue_base + range->num_queues - 1,
1274 (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "",
1275 (range->flags & RANGE_RESERVED) ? ", reserved" : "",
1276 (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : "");
1277 kdev->num_queues_in_use += range->num_queues;
1281 static int knav_setup_queue_pools(struct knav_device *kdev,
1282 struct device_node *queue_pools)
1284 struct device_node *type, *range;
1287 for_each_child_of_node(queue_pools, type) {
1288 for_each_child_of_node(type, range) {
1289 ret = knav_setup_queue_range(kdev, range);
1290 /* return value ignored, we init the rest... */
1294 /* ... and barf if they all failed! */
1295 if (list_empty(&kdev->queue_ranges)) {
1296 dev_err(kdev->dev, "no valid queue range found\n");
1302 static void knav_free_queue_range(struct knav_device *kdev,
1303 struct knav_range_info *range)
1305 if (range->ops && range->ops->free_range)
1306 range->ops->free_range(range);
1307 list_del(&range->list);
1308 devm_kfree(kdev->dev, range);
1311 static void knav_free_queue_ranges(struct knav_device *kdev)
1313 struct knav_range_info *range;
1316 range = first_queue_range(kdev);
1319 knav_free_queue_range(kdev, range);
1323 static void knav_queue_free_regions(struct knav_device *kdev)
1325 struct knav_region *region;
1326 struct knav_pool *pool, *tmp;
1330 region = first_region(kdev);
1333 list_for_each_entry_safe(pool, tmp, ®ion->pools, region_inst)
1334 knav_pool_destroy(pool);
1336 size = region->virt_end - region->virt_start;
1338 free_pages_exact(region->virt_start, size);
1339 list_del(®ion->list);
1340 devm_kfree(kdev->dev, region);
1344 static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
1345 struct device_node *node, int index)
1347 struct resource res;
1351 ret = of_address_to_resource(node, index, &res);
1353 dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
1355 return ERR_PTR(ret);
1358 regs = devm_ioremap_resource(kdev->dev, &res);
1360 dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
1365 static int knav_queue_init_qmgrs(struct knav_device *kdev,
1366 struct device_node *qmgrs)
1368 struct device *dev = kdev->dev;
1369 struct knav_qmgr_info *qmgr;
1370 struct device_node *child;
1374 for_each_child_of_node(qmgrs, child) {
1375 qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL);
1377 dev_err(dev, "out of memory allocating qmgr\n");
1381 ret = of_property_read_u32_array(child, "managed-queues",
1384 qmgr->start_queue = temp[0];
1385 qmgr->num_queues = temp[1];
1387 dev_err(dev, "invalid qmgr queue range\n");
1388 devm_kfree(dev, qmgr);
1392 dev_info(dev, "qmgr start queue %d, number of queues %d\n",
1393 qmgr->start_queue, qmgr->num_queues);
1396 knav_queue_map_reg(kdev, child,
1397 KNAV_QUEUE_PEEK_REG_INDEX);
1399 knav_queue_map_reg(kdev, child,
1400 KNAV_QUEUE_STATUS_REG_INDEX);
1402 knav_queue_map_reg(kdev, child,
1403 KNAV_QUEUE_CONFIG_REG_INDEX);
1405 knav_queue_map_reg(kdev, child,
1406 KNAV_QUEUE_REGION_REG_INDEX);
1408 knav_queue_map_reg(kdev, child,
1409 KNAV_QUEUE_PUSH_REG_INDEX);
1411 knav_queue_map_reg(kdev, child,
1412 KNAV_QUEUE_POP_REG_INDEX);
1414 if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) ||
1415 IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) ||
1416 IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) {
1417 dev_err(dev, "failed to map qmgr regs\n");
1418 if (!IS_ERR(qmgr->reg_peek))
1419 devm_iounmap(dev, qmgr->reg_peek);
1420 if (!IS_ERR(qmgr->reg_status))
1421 devm_iounmap(dev, qmgr->reg_status);
1422 if (!IS_ERR(qmgr->reg_config))
1423 devm_iounmap(dev, qmgr->reg_config);
1424 if (!IS_ERR(qmgr->reg_region))
1425 devm_iounmap(dev, qmgr->reg_region);
1426 if (!IS_ERR(qmgr->reg_push))
1427 devm_iounmap(dev, qmgr->reg_push);
1428 if (!IS_ERR(qmgr->reg_pop))
1429 devm_iounmap(dev, qmgr->reg_pop);
1430 devm_kfree(dev, qmgr);
1434 list_add_tail(&qmgr->list, &kdev->qmgrs);
1435 dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n",
1436 qmgr->start_queue, qmgr->num_queues,
1437 qmgr->reg_peek, qmgr->reg_status,
1438 qmgr->reg_config, qmgr->reg_region,
1439 qmgr->reg_push, qmgr->reg_pop);
1444 static int knav_queue_init_pdsps(struct knav_device *kdev,
1445 struct device_node *pdsps)
1447 struct device *dev = kdev->dev;
1448 struct knav_pdsp_info *pdsp;
1449 struct device_node *child;
1451 for_each_child_of_node(pdsps, child) {
1452 pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL);
1454 dev_err(dev, "out of memory allocating pdsp\n");
1457 pdsp->name = knav_queue_find_name(child);
1459 knav_queue_map_reg(kdev, child,
1460 KNAV_QUEUE_PDSP_IRAM_REG_INDEX);
1462 knav_queue_map_reg(kdev, child,
1463 KNAV_QUEUE_PDSP_REGS_REG_INDEX);
1465 knav_queue_map_reg(kdev, child,
1466 KNAV_QUEUE_PDSP_INTD_REG_INDEX);
1468 knav_queue_map_reg(kdev, child,
1469 KNAV_QUEUE_PDSP_CMD_REG_INDEX);
1471 if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) ||
1472 IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) {
1473 dev_err(dev, "failed to map pdsp %s regs\n",
1475 if (!IS_ERR(pdsp->command))
1476 devm_iounmap(dev, pdsp->command);
1477 if (!IS_ERR(pdsp->iram))
1478 devm_iounmap(dev, pdsp->iram);
1479 if (!IS_ERR(pdsp->regs))
1480 devm_iounmap(dev, pdsp->regs);
1481 if (!IS_ERR(pdsp->intd))
1482 devm_iounmap(dev, pdsp->intd);
1483 devm_kfree(dev, pdsp);
1486 of_property_read_u32(child, "id", &pdsp->id);
1487 list_add_tail(&pdsp->list, &kdev->pdsps);
1488 dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p\n",
1489 pdsp->name, pdsp->command, pdsp->iram, pdsp->regs,
1495 static int knav_queue_stop_pdsp(struct knav_device *kdev,
1496 struct knav_pdsp_info *pdsp)
1498 u32 val, timeout = 1000;
1501 val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE;
1502 writel_relaxed(val, &pdsp->regs->control);
1503 ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout,
1506 dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name);
1509 pdsp->loaded = false;
1510 pdsp->started = false;
1514 static int knav_queue_load_pdsp(struct knav_device *kdev,
1515 struct knav_pdsp_info *pdsp)
1518 const struct firmware *fw;
1522 for (i = 0; i < ARRAY_SIZE(knav_acc_firmwares); i++) {
1523 if (knav_acc_firmwares[i]) {
1524 ret = reject_firmware_direct(&fw,
1525 knav_acc_firmwares[i],
1535 dev_err(kdev->dev, "failed to get firmware for pdsp\n");
1539 dev_info(kdev->dev, "firmware file %s downloaded for PDSP\n",
1540 knav_acc_firmwares[i]);
1542 writel_relaxed(pdsp->id + 1, pdsp->command + 0x18);
1543 /* download the firmware */
1544 fwdata = (u32 *)fw->data;
1545 fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32);
1546 for (i = 0; i < fwlen; i++)
1547 writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i);
1549 release_firmware(fw);
1553 static int knav_queue_start_pdsp(struct knav_device *kdev,
1554 struct knav_pdsp_info *pdsp)
1556 u32 val, timeout = 1000;
1559 /* write a command for sync */
1560 writel_relaxed(0xffffffff, pdsp->command);
1561 while (readl_relaxed(pdsp->command) != 0xffffffff)
1564 /* soft reset the PDSP */
1565 val = readl_relaxed(&pdsp->regs->control);
1566 val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET);
1567 writel_relaxed(val, &pdsp->regs->control);
1570 val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE;
1571 writel_relaxed(val, &pdsp->regs->control);
1573 /* wait for command register to clear */
1574 ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0);
1577 "timed out on pdsp %s command register wait\n",
1584 static void knav_queue_stop_pdsps(struct knav_device *kdev)
1586 struct knav_pdsp_info *pdsp;
1588 /* disable all pdsps */
1589 for_each_pdsp(kdev, pdsp)
1590 knav_queue_stop_pdsp(kdev, pdsp);
1593 static int knav_queue_start_pdsps(struct knav_device *kdev)
1595 struct knav_pdsp_info *pdsp;
1598 knav_queue_stop_pdsps(kdev);
1599 /* now load them all. We return success even if pdsp
1600 * is not loaded as acc channels are optional on having
1601 * firmware availability in the system. We set the loaded
1602 * and stated flag and when initialize the acc range, check
1603 * it and init the range only if pdsp is started.
1605 for_each_pdsp(kdev, pdsp) {
1606 ret = knav_queue_load_pdsp(kdev, pdsp);
1608 pdsp->loaded = true;
1611 for_each_pdsp(kdev, pdsp) {
1613 ret = knav_queue_start_pdsp(kdev, pdsp);
1615 pdsp->started = true;
1621 static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id)
1623 struct knav_qmgr_info *qmgr;
1625 for_each_qmgr(kdev, qmgr) {
1626 if ((id >= qmgr->start_queue) &&
1627 (id < qmgr->start_queue + qmgr->num_queues))
1633 static int knav_queue_init_queue(struct knav_device *kdev,
1634 struct knav_range_info *range,
1635 struct knav_queue_inst *inst,
1638 char irq_name[KNAV_NAME_SIZE];
1639 inst->qmgr = knav_find_qmgr(id);
1643 INIT_LIST_HEAD(&inst->handles);
1645 inst->range = range;
1648 scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id);
1649 inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL);
1651 if (range->ops && range->ops->init_queue)
1652 return range->ops->init_queue(range, inst);
1657 static int knav_queue_init_queues(struct knav_device *kdev)
1659 struct knav_range_info *range;
1660 int size, id, base_idx;
1661 int idx = 0, ret = 0;
1663 /* how much do we need for instance data? */
1664 size = sizeof(struct knav_queue_inst);
1666 /* round this up to a power of 2, keep the index to instance
1669 kdev->inst_shift = order_base_2(size);
1670 size = (1 << kdev->inst_shift) * kdev->num_queues_in_use;
1671 kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL);
1672 if (!kdev->instances)
1675 for_each_queue_range(kdev, range) {
1676 if (range->ops && range->ops->init_range)
1677 range->ops->init_range(range);
1679 for (id = range->queue_base;
1680 id < range->queue_base + range->num_queues; id++, idx++) {
1681 ret = knav_queue_init_queue(kdev, range,
1682 knav_queue_idx_to_inst(kdev, idx), id);
1686 range->queue_base_inst =
1687 knav_queue_idx_to_inst(kdev, base_idx);
1692 static int knav_queue_probe(struct platform_device *pdev)
1694 struct device_node *node = pdev->dev.of_node;
1695 struct device_node *qmgrs, *queue_pools, *regions, *pdsps;
1696 struct device *dev = &pdev->dev;
1701 dev_err(dev, "device tree info unavailable\n");
1705 kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL);
1707 dev_err(dev, "memory allocation failed\n");
1711 platform_set_drvdata(pdev, kdev);
1713 INIT_LIST_HEAD(&kdev->queue_ranges);
1714 INIT_LIST_HEAD(&kdev->qmgrs);
1715 INIT_LIST_HEAD(&kdev->pools);
1716 INIT_LIST_HEAD(&kdev->regions);
1717 INIT_LIST_HEAD(&kdev->pdsps);
1719 pm_runtime_enable(&pdev->dev);
1720 ret = pm_runtime_get_sync(&pdev->dev);
1722 pm_runtime_put_noidle(&pdev->dev);
1723 dev_err(dev, "Failed to enable QMSS\n");
1727 if (of_property_read_u32_array(node, "queue-range", temp, 2)) {
1728 dev_err(dev, "queue-range not specified\n");
1732 kdev->base_id = temp[0];
1733 kdev->num_queues = temp[1];
1735 /* Initialize queue managers using device tree configuration */
1736 qmgrs = of_get_child_by_name(node, "qmgrs");
1738 dev_err(dev, "queue manager info not specified\n");
1742 ret = knav_queue_init_qmgrs(kdev, qmgrs);
1747 /* get pdsp configuration values from device tree */
1748 pdsps = of_get_child_by_name(node, "pdsps");
1750 ret = knav_queue_init_pdsps(kdev, pdsps);
1754 ret = knav_queue_start_pdsps(kdev);
1760 /* get usable queue range values from device tree */
1761 queue_pools = of_get_child_by_name(node, "queue-pools");
1763 dev_err(dev, "queue-pools not specified\n");
1767 ret = knav_setup_queue_pools(kdev, queue_pools);
1768 of_node_put(queue_pools);
1772 ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]);
1774 dev_err(kdev->dev, "could not setup linking ram\n");
1778 ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]);
1781 * nothing really, we have one linking ram already, so we just
1782 * live within our means
1786 ret = knav_queue_setup_link_ram(kdev);
1790 regions = of_get_child_by_name(node, "descriptor-regions");
1792 dev_err(dev, "descriptor-regions not specified\n");
1796 ret = knav_queue_setup_regions(kdev, regions);
1797 of_node_put(regions);
1801 ret = knav_queue_init_queues(kdev);
1803 dev_err(dev, "hwqueue initialization failed\n");
1807 debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL,
1808 &knav_queue_debug_ops);
1812 knav_queue_stop_pdsps(kdev);
1813 knav_queue_free_regions(kdev);
1814 knav_free_queue_ranges(kdev);
1815 pm_runtime_put_sync(&pdev->dev);
1816 pm_runtime_disable(&pdev->dev);
1820 static int knav_queue_remove(struct platform_device *pdev)
1822 /* TODO: Free resources */
1823 pm_runtime_put_sync(&pdev->dev);
1824 pm_runtime_disable(&pdev->dev);
1828 /* Match table for of_platform binding */
1829 static struct of_device_id keystone_qmss_of_match[] = {
1830 { .compatible = "ti,keystone-navigator-qmss", },
1833 MODULE_DEVICE_TABLE(of, keystone_qmss_of_match);
1835 static struct platform_driver keystone_qmss_driver = {
1836 .probe = knav_queue_probe,
1837 .remove = knav_queue_remove,
1839 .name = "keystone-navigator-qmss",
1840 .of_match_table = keystone_qmss_of_match,
1843 module_platform_driver(keystone_qmss_driver);
1845 MODULE_LICENSE("GPL v2");
1846 MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs");
1847 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
1848 MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");