1 // SPDX-License-Identifier: GPL-2.0
3 * core.c - Implementation of core module of MOST Linux driver stack
5 * Copyright (C) 2013-2015 Microchip Technology Germany II GmbH & Co. KG
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <linux/list.h>
15 #include <linux/poll.h>
16 #include <linux/wait.h>
17 #include <linux/kobject.h>
18 #include <linux/mutex.h>
19 #include <linux/completion.h>
20 #include <linux/sysfs.h>
21 #include <linux/kthread.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/idr.h>
24 #include <most/core.h>
26 #define MAX_CHANNELS 64
27 #define STRING_SIZE 80
29 static struct ida mdev_id;
30 static int dummy_num_buffers;
32 static struct mostcore {
34 struct device_driver drv;
36 struct list_head comp_list;
39 #define to_driver(d) container_of(d, struct mostcore, drv)
42 struct core_component *comp;
49 struct completion cleanup;
51 atomic_t mbo_nq_level;
53 char name[STRING_SIZE];
55 struct mutex start_mutex;
56 struct mutex nq_mutex; /* nq thread synchronization */
58 struct most_interface *iface;
59 struct most_channel_config cfg;
62 struct list_head fifo;
64 struct list_head halt_fifo;
65 struct list_head list;
68 struct list_head trash_fifo;
69 struct task_struct *hdm_enqueue_task;
70 wait_queue_head_t hdm_fifo_wq;
74 #define to_channel(d) container_of(d, struct most_channel, dev)
76 struct interface_private {
78 char name[STRING_SIZE];
79 struct most_channel *channel[MAX_CHANNELS];
80 struct list_head channel_list;
84 int most_ch_data_type;
87 { MOST_CH_CONTROL, "control\n" },
88 { MOST_CH_ASYNC, "async\n" },
89 { MOST_CH_SYNC, "sync\n" },
90 { MOST_CH_ISOC, "isoc\n"},
91 { MOST_CH_ISOC, "isoc_avp\n"},
95 * list_pop_mbo - retrieves the first MBO of the list and removes it
96 * @ptr: the list head to grab the MBO from.
98 #define list_pop_mbo(ptr) \
100 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
101 list_del(&_mbo->list); \
106 * most_free_mbo_coherent - free an MBO and its coherent buffer
109 static void most_free_mbo_coherent(struct mbo *mbo)
111 struct most_channel *c = mbo->context;
112 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
114 if (c->iface->dma_free)
115 c->iface->dma_free(mbo, coherent_buf_size);
117 kfree(mbo->virt_address);
119 if (atomic_sub_and_test(1, &c->mbo_ref))
120 complete(&c->cleanup);
124 * flush_channel_fifos - clear the channel fifos
125 * @c: pointer to channel object
127 static void flush_channel_fifos(struct most_channel *c)
129 unsigned long flags, hf_flags;
130 struct mbo *mbo, *tmp;
132 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo))
135 spin_lock_irqsave(&c->fifo_lock, flags);
136 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) {
137 list_del(&mbo->list);
138 spin_unlock_irqrestore(&c->fifo_lock, flags);
139 most_free_mbo_coherent(mbo);
140 spin_lock_irqsave(&c->fifo_lock, flags);
142 spin_unlock_irqrestore(&c->fifo_lock, flags);
144 spin_lock_irqsave(&c->fifo_lock, hf_flags);
145 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) {
146 list_del(&mbo->list);
147 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
148 most_free_mbo_coherent(mbo);
149 spin_lock_irqsave(&c->fifo_lock, hf_flags);
151 spin_unlock_irqrestore(&c->fifo_lock, hf_flags);
153 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo))))
154 pr_info("WARN: fifo | trash fifo not empty\n");
158 * flush_trash_fifo - clear the trash fifo
159 * @c: pointer to channel object
161 static int flush_trash_fifo(struct most_channel *c)
163 struct mbo *mbo, *tmp;
166 spin_lock_irqsave(&c->fifo_lock, flags);
167 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) {
168 list_del(&mbo->list);
169 spin_unlock_irqrestore(&c->fifo_lock, flags);
170 most_free_mbo_coherent(mbo);
171 spin_lock_irqsave(&c->fifo_lock, flags);
173 spin_unlock_irqrestore(&c->fifo_lock, flags);
177 static ssize_t available_directions_show(struct device *dev,
178 struct device_attribute *attr,
181 struct most_channel *c = to_channel(dev);
182 unsigned int i = c->channel_id;
185 if (c->iface->channel_vector[i].direction & MOST_CH_RX)
187 if (c->iface->channel_vector[i].direction & MOST_CH_TX)
193 static ssize_t available_datatypes_show(struct device *dev,
194 struct device_attribute *attr,
197 struct most_channel *c = to_channel(dev);
198 unsigned int i = c->channel_id;
201 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL)
202 strcat(buf, "control ");
203 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC)
204 strcat(buf, "async ");
205 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC)
206 strcat(buf, "sync ");
207 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC)
208 strcat(buf, "isoc ");
213 static ssize_t number_of_packet_buffers_show(struct device *dev,
214 struct device_attribute *attr,
217 struct most_channel *c = to_channel(dev);
218 unsigned int i = c->channel_id;
220 return snprintf(buf, PAGE_SIZE, "%d\n",
221 c->iface->channel_vector[i].num_buffers_packet);
224 static ssize_t number_of_stream_buffers_show(struct device *dev,
225 struct device_attribute *attr,
228 struct most_channel *c = to_channel(dev);
229 unsigned int i = c->channel_id;
231 return snprintf(buf, PAGE_SIZE, "%d\n",
232 c->iface->channel_vector[i].num_buffers_streaming);
235 static ssize_t size_of_packet_buffer_show(struct device *dev,
236 struct device_attribute *attr,
239 struct most_channel *c = to_channel(dev);
240 unsigned int i = c->channel_id;
242 return snprintf(buf, PAGE_SIZE, "%d\n",
243 c->iface->channel_vector[i].buffer_size_packet);
246 static ssize_t size_of_stream_buffer_show(struct device *dev,
247 struct device_attribute *attr,
250 struct most_channel *c = to_channel(dev);
251 unsigned int i = c->channel_id;
253 return snprintf(buf, PAGE_SIZE, "%d\n",
254 c->iface->channel_vector[i].buffer_size_streaming);
257 static ssize_t channel_starving_show(struct device *dev,
258 struct device_attribute *attr,
261 struct most_channel *c = to_channel(dev);
263 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving);
266 static ssize_t set_number_of_buffers_show(struct device *dev,
267 struct device_attribute *attr,
270 struct most_channel *c = to_channel(dev);
272 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers);
275 static ssize_t set_number_of_buffers_store(struct device *dev,
276 struct device_attribute *attr,
280 struct most_channel *c = to_channel(dev);
281 int ret = kstrtou16(buf, 0, &c->cfg.num_buffers);
288 static ssize_t set_buffer_size_show(struct device *dev,
289 struct device_attribute *attr,
292 struct most_channel *c = to_channel(dev);
294 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size);
297 static ssize_t set_buffer_size_store(struct device *dev,
298 struct device_attribute *attr,
302 struct most_channel *c = to_channel(dev);
303 int ret = kstrtou16(buf, 0, &c->cfg.buffer_size);
310 static ssize_t set_direction_show(struct device *dev,
311 struct device_attribute *attr,
314 struct most_channel *c = to_channel(dev);
316 if (c->cfg.direction & MOST_CH_TX)
317 return snprintf(buf, PAGE_SIZE, "tx\n");
318 else if (c->cfg.direction & MOST_CH_RX)
319 return snprintf(buf, PAGE_SIZE, "rx\n");
320 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
323 static ssize_t set_direction_store(struct device *dev,
324 struct device_attribute *attr,
328 struct most_channel *c = to_channel(dev);
330 if (!strcmp(buf, "dir_rx\n")) {
331 c->cfg.direction = MOST_CH_RX;
332 } else if (!strcmp(buf, "rx\n")) {
333 c->cfg.direction = MOST_CH_RX;
334 } else if (!strcmp(buf, "dir_tx\n")) {
335 c->cfg.direction = MOST_CH_TX;
336 } else if (!strcmp(buf, "tx\n")) {
337 c->cfg.direction = MOST_CH_TX;
339 pr_info("WARN: invalid attribute settings\n");
345 static ssize_t set_datatype_show(struct device *dev,
346 struct device_attribute *attr,
350 struct most_channel *c = to_channel(dev);
352 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
353 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
354 return snprintf(buf, PAGE_SIZE, "%s", ch_data_type[i].name);
356 return snprintf(buf, PAGE_SIZE, "unconfigured\n");
359 static ssize_t set_datatype_store(struct device *dev,
360 struct device_attribute *attr,
365 struct most_channel *c = to_channel(dev);
367 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
368 if (!strcmp(buf, ch_data_type[i].name)) {
369 c->cfg.data_type = ch_data_type[i].most_ch_data_type;
374 if (i == ARRAY_SIZE(ch_data_type)) {
375 pr_info("WARN: invalid attribute settings\n");
381 static ssize_t set_subbuffer_size_show(struct device *dev,
382 struct device_attribute *attr,
385 struct most_channel *c = to_channel(dev);
387 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size);
390 static ssize_t set_subbuffer_size_store(struct device *dev,
391 struct device_attribute *attr,
395 struct most_channel *c = to_channel(dev);
396 int ret = kstrtou16(buf, 0, &c->cfg.subbuffer_size);
403 static ssize_t set_packets_per_xact_show(struct device *dev,
404 struct device_attribute *attr,
407 struct most_channel *c = to_channel(dev);
409 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact);
412 static ssize_t set_packets_per_xact_store(struct device *dev,
413 struct device_attribute *attr,
417 struct most_channel *c = to_channel(dev);
418 int ret = kstrtou16(buf, 0, &c->cfg.packets_per_xact);
425 static ssize_t set_dbr_size_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
428 struct most_channel *c = to_channel(dev);
430 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size);
433 static ssize_t set_dbr_size_store(struct device *dev,
434 struct device_attribute *attr,
435 const char *buf, size_t count)
437 struct most_channel *c = to_channel(dev);
438 int ret = kstrtou16(buf, 0, &c->cfg.dbr_size);
445 #define DEV_ATTR(_name) (&dev_attr_##_name.attr)
447 static DEVICE_ATTR_RO(available_directions);
448 static DEVICE_ATTR_RO(available_datatypes);
449 static DEVICE_ATTR_RO(number_of_packet_buffers);
450 static DEVICE_ATTR_RO(number_of_stream_buffers);
451 static DEVICE_ATTR_RO(size_of_stream_buffer);
452 static DEVICE_ATTR_RO(size_of_packet_buffer);
453 static DEVICE_ATTR_RO(channel_starving);
454 static DEVICE_ATTR_RW(set_buffer_size);
455 static DEVICE_ATTR_RW(set_number_of_buffers);
456 static DEVICE_ATTR_RW(set_direction);
457 static DEVICE_ATTR_RW(set_datatype);
458 static DEVICE_ATTR_RW(set_subbuffer_size);
459 static DEVICE_ATTR_RW(set_packets_per_xact);
460 static DEVICE_ATTR_RW(set_dbr_size);
462 static struct attribute *channel_attrs[] = {
463 DEV_ATTR(available_directions),
464 DEV_ATTR(available_datatypes),
465 DEV_ATTR(number_of_packet_buffers),
466 DEV_ATTR(number_of_stream_buffers),
467 DEV_ATTR(size_of_stream_buffer),
468 DEV_ATTR(size_of_packet_buffer),
469 DEV_ATTR(channel_starving),
470 DEV_ATTR(set_buffer_size),
471 DEV_ATTR(set_number_of_buffers),
472 DEV_ATTR(set_direction),
473 DEV_ATTR(set_datatype),
474 DEV_ATTR(set_subbuffer_size),
475 DEV_ATTR(set_packets_per_xact),
476 DEV_ATTR(set_dbr_size),
480 static struct attribute_group channel_attr_group = {
481 .attrs = channel_attrs,
484 static const struct attribute_group *channel_attr_groups[] = {
489 static ssize_t description_show(struct device *dev,
490 struct device_attribute *attr,
493 struct most_interface *iface = to_most_interface(dev);
495 return snprintf(buf, PAGE_SIZE, "%s\n", iface->description);
498 static ssize_t interface_show(struct device *dev,
499 struct device_attribute *attr,
502 struct most_interface *iface = to_most_interface(dev);
504 switch (iface->interface) {
506 return snprintf(buf, PAGE_SIZE, "loopback\n");
508 return snprintf(buf, PAGE_SIZE, "i2c\n");
510 return snprintf(buf, PAGE_SIZE, "i2s\n");
512 return snprintf(buf, PAGE_SIZE, "tsi\n");
514 return snprintf(buf, PAGE_SIZE, "hbi\n");
515 case ITYPE_MEDIALB_DIM:
516 return snprintf(buf, PAGE_SIZE, "mlb_dim\n");
517 case ITYPE_MEDIALB_DIM2:
518 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n");
520 return snprintf(buf, PAGE_SIZE, "usb\n");
522 return snprintf(buf, PAGE_SIZE, "pcie\n");
524 return snprintf(buf, PAGE_SIZE, "unknown\n");
527 static DEVICE_ATTR_RO(description);
528 static DEVICE_ATTR_RO(interface);
530 static struct attribute *interface_attrs[] = {
531 DEV_ATTR(description),
536 static struct attribute_group interface_attr_group = {
537 .attrs = interface_attrs,
540 static const struct attribute_group *interface_attr_groups[] = {
541 &interface_attr_group,
545 static struct core_component *match_component(char *name)
547 struct core_component *comp;
549 list_for_each_entry(comp, &mc.comp_list, list) {
550 if (!strcmp(comp->name, name))
556 struct show_links_data {
561 static int print_links(struct device *dev, void *data)
563 struct show_links_data *d = data;
566 struct most_channel *c;
567 struct most_interface *iface = to_most_interface(dev);
569 list_for_each_entry(c, &iface->p->channel_list, list) {
571 offs += snprintf(buf + offs,
575 dev_name(&iface->dev),
579 offs += snprintf(buf + offs,
583 dev_name(&iface->dev),
591 static ssize_t links_show(struct device_driver *drv, char *buf)
593 struct show_links_data d = { .buf = buf };
595 bus_for_each_dev(&mc.bus, NULL, &d, print_links);
599 static ssize_t components_show(struct device_driver *drv, char *buf)
601 struct core_component *comp;
604 list_for_each_entry(comp, &mc.comp_list, list) {
605 offs += snprintf(buf + offs, PAGE_SIZE - offs, "%s\n",
612 * split_string - parses buf and extracts ':' separated substrings.
614 * @buf: complete string from attribute 'add_channel'
615 * @a: storage for 1st substring (=interface name)
616 * @b: storage for 2nd substring (=channel name)
617 * @c: storage for 3rd substring (=component name)
618 * @d: storage optional 4th substring (=user defined name)
622 * Input: "mdev0:ch6:cdev:my_channel\n" or
623 * "mdev0:ch6:cdev:my_channel"
625 * Output: *a -> "mdev0", *b -> "ch6", *c -> "cdev" *d -> "my_channel"
627 * Input: "mdev1:ep81:cdev\n"
628 * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d -> ""
630 * Input: "mdev1:ep81"
631 * Output: *a -> "mdev1", *b -> "ep81", *c -> "cdev" *d == NULL
633 static int split_string(char *buf, char **a, char **b, char **c, char **d)
635 *a = strsep(&buf, ":");
639 *b = strsep(&buf, ":\n");
643 *c = strsep(&buf, ":\n");
648 *d = strsep(&buf, ":\n");
653 static int match_bus_dev(struct device *dev, void *data)
655 char *mdev_name = data;
657 return !strcmp(dev_name(dev), mdev_name);
661 * get_channel - get pointer to channel
662 * @mdev: name of the device interface
663 * @mdev_ch: name of channel
665 static struct most_channel *get_channel(char *mdev, char *mdev_ch)
667 struct device *dev = NULL;
668 struct most_interface *iface;
669 struct most_channel *c, *tmp;
671 dev = bus_find_device(&mc.bus, NULL, mdev, match_bus_dev);
674 iface = to_most_interface(dev);
675 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
676 if (!strcmp(dev_name(&c->dev), mdev_ch))
683 inline int link_channel_to_component(struct most_channel *c,
684 struct core_component *comp,
688 struct core_component **comp_ptr;
691 comp_ptr = &c->pipe0.comp;
692 else if (!c->pipe1.comp)
693 comp_ptr = &c->pipe1.comp;
698 ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, comp_param);
707 * add_link_store - store function for add_link attribute
708 * @drv: device driver
710 * @len: buffer length
712 * This parses the string given by buf and splits it into
713 * four substrings. Note: last substring is optional. In case a cdev
714 * component is loaded the optional 4th substring will make up the name of
715 * device node in the /dev directory. If omitted, the device node will
716 * inherit the channel's name within sysfs.
718 * Searches for (device, channel) pair and probes the component
721 * (1) echo "mdev0:ch6:cdev:my_rxchannel" >add_link
722 * (2) echo "mdev1:ep81:cdev" >add_link
724 * (1) would create the device node /dev/my_rxchannel
725 * (2) would create the device node /dev/mdev1-ep81
727 static ssize_t add_link_store(struct device_driver *drv,
731 struct most_channel *c;
732 struct core_component *comp;
733 char buffer[STRING_SIZE];
738 char devnod_buf[STRING_SIZE];
740 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
742 strlcpy(buffer, buf, max_len);
743 ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, &comp_param);
746 comp = match_component(comp_name);
749 if (!comp_param || *comp_param == 0) {
750 snprintf(devnod_buf, sizeof(devnod_buf), "%s-%s", mdev,
752 comp_param = devnod_buf;
755 c = get_channel(mdev, mdev_ch);
759 ret = link_channel_to_component(c, comp, comp_param);
766 * remove_link_store - store function for remove_link attribute
767 * @drv: device driver
769 * @len: buffer length
772 * echo "mdev0:ep81" >remove_link
774 static ssize_t remove_link_store(struct device_driver *drv,
778 struct most_channel *c;
779 struct core_component *comp;
780 char buffer[STRING_SIZE];
785 size_t max_len = min_t(size_t, len + 1, STRING_SIZE);
787 strlcpy(buffer, buf, max_len);
788 ret = split_string(buffer, &mdev, &mdev_ch, &comp_name, NULL);
791 comp = match_component(comp_name);
794 c = get_channel(mdev, mdev_ch);
798 if (comp->disconnect_channel(c->iface, c->channel_id))
800 if (c->pipe0.comp == comp)
801 c->pipe0.comp = NULL;
802 if (c->pipe1.comp == comp)
803 c->pipe1.comp = NULL;
807 #define DRV_ATTR(_name) (&driver_attr_##_name.attr)
809 static DRIVER_ATTR_RO(links);
810 static DRIVER_ATTR_RO(components);
811 static DRIVER_ATTR_WO(add_link);
812 static DRIVER_ATTR_WO(remove_link);
814 static struct attribute *mc_attrs[] = {
816 DRV_ATTR(components),
818 DRV_ATTR(remove_link),
822 static struct attribute_group mc_attr_group = {
826 static const struct attribute_group *mc_attr_groups[] = {
831 static int most_match(struct device *dev, struct device_driver *drv)
833 if (!strcmp(dev_name(dev), "most"))
839 static inline void trash_mbo(struct mbo *mbo)
842 struct most_channel *c = mbo->context;
844 spin_lock_irqsave(&c->fifo_lock, flags);
845 list_add(&mbo->list, &c->trash_fifo);
846 spin_unlock_irqrestore(&c->fifo_lock, flags);
849 static bool hdm_mbo_ready(struct most_channel *c)
856 spin_lock_irq(&c->fifo_lock);
857 empty = list_empty(&c->halt_fifo);
858 spin_unlock_irq(&c->fifo_lock);
863 static void nq_hdm_mbo(struct mbo *mbo)
866 struct most_channel *c = mbo->context;
868 spin_lock_irqsave(&c->fifo_lock, flags);
869 list_add_tail(&mbo->list, &c->halt_fifo);
870 spin_unlock_irqrestore(&c->fifo_lock, flags);
871 wake_up_interruptible(&c->hdm_fifo_wq);
874 static int hdm_enqueue_thread(void *data)
876 struct most_channel *c = data;
879 typeof(c->iface->enqueue) enqueue = c->iface->enqueue;
881 while (likely(!kthread_should_stop())) {
882 wait_event_interruptible(c->hdm_fifo_wq,
884 kthread_should_stop());
886 mutex_lock(&c->nq_mutex);
887 spin_lock_irq(&c->fifo_lock);
888 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) {
889 spin_unlock_irq(&c->fifo_lock);
890 mutex_unlock(&c->nq_mutex);
894 mbo = list_pop_mbo(&c->halt_fifo);
895 spin_unlock_irq(&c->fifo_lock);
897 if (c->cfg.direction == MOST_CH_RX)
898 mbo->buffer_length = c->cfg.buffer_size;
900 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo);
901 mutex_unlock(&c->nq_mutex);
904 pr_err("hdm enqueue failed\n");
906 c->hdm_enqueue_task = NULL;
914 static int run_enqueue_thread(struct most_channel *c, int channel_id)
916 struct task_struct *task =
917 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d",
921 return PTR_ERR(task);
923 c->hdm_enqueue_task = task;
928 * arm_mbo - recycle MBO for further usage
931 * This puts an MBO back to the list to have it ready for up coming
934 * In case the MBO belongs to a channel that recently has been
935 * poisoned, the MBO is scheduled to be trashed.
936 * Calls the completion handler of an attached component.
938 static void arm_mbo(struct mbo *mbo)
941 struct most_channel *c;
945 if (c->is_poisoned) {
950 spin_lock_irqsave(&c->fifo_lock, flags);
951 ++*mbo->num_buffers_ptr;
952 list_add_tail(&mbo->list, &c->fifo);
953 spin_unlock_irqrestore(&c->fifo_lock, flags);
955 if (c->pipe0.refs && c->pipe0.comp->tx_completion)
956 c->pipe0.comp->tx_completion(c->iface, c->channel_id);
958 if (c->pipe1.refs && c->pipe1.comp->tx_completion)
959 c->pipe1.comp->tx_completion(c->iface, c->channel_id);
963 * arm_mbo_chain - helper function that arms an MBO chain for the HDM
964 * @c: pointer to interface channel
965 * @dir: direction of the channel
966 * @compl: pointer to completion function
968 * This allocates buffer objects including the containing DMA coherent
969 * buffer and puts them in the fifo.
970 * Buffers of Rx channels are put in the kthread fifo, hence immediately
971 * submitted to the HDM.
973 * Returns the number of allocated and enqueued MBOs.
975 static int arm_mbo_chain(struct most_channel *c, int dir,
976 void (*compl)(struct mbo *))
981 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
983 atomic_set(&c->mbo_nq_level, 0);
985 for (i = 0; i < c->cfg.num_buffers; i++) {
986 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL);
992 mbo->hdm_channel_id = c->channel_id;
993 if (c->iface->dma_alloc) {
995 c->iface->dma_alloc(mbo, coherent_buf_size);
998 kzalloc(coherent_buf_size, GFP_KERNEL);
1000 if (!mbo->virt_address)
1003 mbo->complete = compl;
1004 mbo->num_buffers_ptr = &dummy_num_buffers;
1005 if (dir == MOST_CH_RX) {
1007 atomic_inc(&c->mbo_nq_level);
1009 spin_lock_irqsave(&c->fifo_lock, flags);
1010 list_add_tail(&mbo->list, &c->fifo);
1011 spin_unlock_irqrestore(&c->fifo_lock, flags);
1014 return c->cfg.num_buffers;
1020 flush_channel_fifos(c);
1025 * most_submit_mbo - submits an MBO to fifo
1028 void most_submit_mbo(struct mbo *mbo)
1030 if (WARN_ONCE(!mbo || !mbo->context,
1031 "bad mbo or missing channel reference\n"))
1036 EXPORT_SYMBOL_GPL(most_submit_mbo);
1039 * most_write_completion - write completion handler
1042 * This recycles the MBO for further usage. In case the channel has been
1043 * poisoned, the MBO is scheduled to be trashed.
1045 static void most_write_completion(struct mbo *mbo)
1047 struct most_channel *c;
1050 if (mbo->status == MBO_E_INVAL)
1051 pr_info("WARN: Tx MBO status: invalid\n");
1052 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
1058 int channel_has_mbo(struct most_interface *iface, int id,
1059 struct core_component *comp)
1061 struct most_channel *c = iface->p->channel[id];
1062 unsigned long flags;
1068 if (c->pipe0.refs && c->pipe1.refs &&
1069 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1070 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
1073 spin_lock_irqsave(&c->fifo_lock, flags);
1074 empty = list_empty(&c->fifo);
1075 spin_unlock_irqrestore(&c->fifo_lock, flags);
1078 EXPORT_SYMBOL_GPL(channel_has_mbo);
1081 * most_get_mbo - get pointer to an MBO of pool
1082 * @iface: pointer to interface instance
1084 * @comp: driver component
1086 * This attempts to get a free buffer out of the channel fifo.
1087 * Returns a pointer to MBO on success or NULL otherwise.
1089 struct mbo *most_get_mbo(struct most_interface *iface, int id,
1090 struct core_component *comp)
1093 struct most_channel *c;
1094 unsigned long flags;
1095 int *num_buffers_ptr;
1097 c = iface->p->channel[id];
1101 if (c->pipe0.refs && c->pipe1.refs &&
1102 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) ||
1103 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0)))
1106 if (comp == c->pipe0.comp)
1107 num_buffers_ptr = &c->pipe0.num_buffers;
1108 else if (comp == c->pipe1.comp)
1109 num_buffers_ptr = &c->pipe1.num_buffers;
1111 num_buffers_ptr = &dummy_num_buffers;
1113 spin_lock_irqsave(&c->fifo_lock, flags);
1114 if (list_empty(&c->fifo)) {
1115 spin_unlock_irqrestore(&c->fifo_lock, flags);
1118 mbo = list_pop_mbo(&c->fifo);
1120 spin_unlock_irqrestore(&c->fifo_lock, flags);
1122 mbo->num_buffers_ptr = num_buffers_ptr;
1123 mbo->buffer_length = c->cfg.buffer_size;
1126 EXPORT_SYMBOL_GPL(most_get_mbo);
1129 * most_put_mbo - return buffer to pool
1132 void most_put_mbo(struct mbo *mbo)
1134 struct most_channel *c = mbo->context;
1136 if (c->cfg.direction == MOST_CH_TX) {
1141 atomic_inc(&c->mbo_nq_level);
1143 EXPORT_SYMBOL_GPL(most_put_mbo);
1146 * most_read_completion - read completion handler
1149 * This function is called by the HDM when data has been received from the
1150 * hardware and copied to the buffer of the MBO.
1152 * In case the channel has been poisoned it puts the buffer in the trash queue.
1153 * Otherwise, it passes the buffer to an component for further processing.
1155 static void most_read_completion(struct mbo *mbo)
1157 struct most_channel *c = mbo->context;
1159 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
1164 if (mbo->status == MBO_E_INVAL) {
1166 atomic_inc(&c->mbo_nq_level);
1170 if (atomic_sub_and_test(1, &c->mbo_nq_level))
1173 if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
1174 c->pipe0.comp->rx_completion(mbo) == 0)
1177 if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
1178 c->pipe1.comp->rx_completion(mbo) == 0)
1185 * most_start_channel - prepares a channel for communication
1186 * @iface: pointer to interface instance
1188 * @comp: driver component
1190 * This prepares the channel for usage. Cross-checks whether the
1191 * channel's been properly configured.
1193 * Returns 0 on success or error code otherwise.
1195 int most_start_channel(struct most_interface *iface, int id,
1196 struct core_component *comp)
1200 struct most_channel *c = iface->p->channel[id];
1205 mutex_lock(&c->start_mutex);
1206 if (c->pipe0.refs + c->pipe1.refs > 0)
1207 goto out; /* already started by another component */
1209 if (!try_module_get(iface->mod)) {
1210 pr_info("failed to acquire HDM lock\n");
1211 mutex_unlock(&c->start_mutex);
1215 c->cfg.extra_len = 0;
1216 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
1217 pr_info("channel configuration failed. Go check settings...\n");
1222 init_waitqueue_head(&c->hdm_fifo_wq);
1224 if (c->cfg.direction == MOST_CH_RX)
1225 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1226 most_read_completion);
1228 num_buffer = arm_mbo_chain(c, c->cfg.direction,
1229 most_write_completion);
1230 if (unlikely(!num_buffer)) {
1235 ret = run_enqueue_thread(c, id);
1240 c->pipe0.num_buffers = c->cfg.num_buffers / 2;
1241 c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers;
1242 atomic_set(&c->mbo_ref, num_buffer);
1245 if (comp == c->pipe0.comp)
1247 if (comp == c->pipe1.comp)
1249 mutex_unlock(&c->start_mutex);
1253 module_put(iface->mod);
1254 mutex_unlock(&c->start_mutex);
1257 EXPORT_SYMBOL_GPL(most_start_channel);
1260 * most_stop_channel - stops a running channel
1261 * @iface: pointer to interface instance
1263 * @comp: driver component
1265 int most_stop_channel(struct most_interface *iface, int id,
1266 struct core_component *comp)
1268 struct most_channel *c;
1270 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
1271 pr_err("Bad interface or index out of range\n");
1274 c = iface->p->channel[id];
1278 mutex_lock(&c->start_mutex);
1279 if (c->pipe0.refs + c->pipe1.refs >= 2)
1282 if (c->hdm_enqueue_task)
1283 kthread_stop(c->hdm_enqueue_task);
1284 c->hdm_enqueue_task = NULL;
1287 module_put(iface->mod);
1289 c->is_poisoned = true;
1290 if (c->iface->poison_channel(c->iface, c->channel_id)) {
1291 pr_err("Cannot stop channel %d of mdev %s\n", c->channel_id,
1292 c->iface->description);
1293 mutex_unlock(&c->start_mutex);
1296 flush_trash_fifo(c);
1297 flush_channel_fifos(c);
1299 #ifdef CMPL_INTERRUPTIBLE
1300 if (wait_for_completion_interruptible(&c->cleanup)) {
1301 pr_info("Interrupted while clean up ch %d\n", c->channel_id);
1302 mutex_unlock(&c->start_mutex);
1306 wait_for_completion(&c->cleanup);
1308 c->is_poisoned = false;
1311 if (comp == c->pipe0.comp)
1313 if (comp == c->pipe1.comp)
1315 mutex_unlock(&c->start_mutex);
1318 EXPORT_SYMBOL_GPL(most_stop_channel);
1321 * most_register_component - registers a driver component with the core
1322 * @comp: driver component
1324 int most_register_component(struct core_component *comp)
1327 pr_err("Bad component\n");
1330 list_add_tail(&comp->list, &mc.comp_list);
1331 pr_info("registered new core component %s\n", comp->name);
1334 EXPORT_SYMBOL_GPL(most_register_component);
1336 static int disconnect_channels(struct device *dev, void *data)
1338 struct most_interface *iface;
1339 struct most_channel *c, *tmp;
1340 struct core_component *comp = data;
1342 iface = to_most_interface(dev);
1343 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) {
1344 if (c->pipe0.comp == comp || c->pipe1.comp == comp)
1345 comp->disconnect_channel(c->iface, c->channel_id);
1346 if (c->pipe0.comp == comp)
1347 c->pipe0.comp = NULL;
1348 if (c->pipe1.comp == comp)
1349 c->pipe1.comp = NULL;
1355 * most_deregister_component - deregisters a driver component with the core
1356 * @comp: driver component
1358 int most_deregister_component(struct core_component *comp)
1361 pr_err("Bad component\n");
1365 bus_for_each_dev(&mc.bus, NULL, comp, disconnect_channels);
1366 list_del(&comp->list);
1367 pr_info("deregistering component %s\n", comp->name);
1370 EXPORT_SYMBOL_GPL(most_deregister_component);
1372 static void release_interface(struct device *dev)
1374 pr_info("releasing interface dev %s...\n", dev_name(dev));
1377 static void release_channel(struct device *dev)
1379 pr_info("releasing channel dev %s...\n", dev_name(dev));
1383 * most_register_interface - registers an interface with core
1384 * @iface: device interface
1386 * Allocates and initializes a new interface instance and all of its channels.
1387 * Returns a pointer to kobject or an error pointer.
1389 int most_register_interface(struct most_interface *iface)
1393 struct most_channel *c;
1395 if (!iface || !iface->enqueue || !iface->configure ||
1396 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) {
1397 pr_err("Bad interface or channel overflow\n");
1401 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
1403 pr_info("Failed to alloc mdev ID\n");
1407 iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
1409 ida_simple_remove(&mdev_id, id);
1413 INIT_LIST_HEAD(&iface->p->channel_list);
1414 iface->p->dev_id = id;
1415 strscpy(iface->p->name, iface->description, sizeof(iface->p->name));
1416 iface->dev.init_name = iface->p->name;
1417 iface->dev.bus = &mc.bus;
1418 iface->dev.parent = &mc.dev;
1419 iface->dev.groups = interface_attr_groups;
1420 iface->dev.release = release_interface;
1421 if (device_register(&iface->dev)) {
1422 pr_err("registering iface->dev failed\n");
1424 ida_simple_remove(&mdev_id, id);
1428 for (i = 0; i < iface->num_channels; i++) {
1429 const char *name_suffix = iface->channel_vector[i].name_suffix;
1431 c = kzalloc(sizeof(*c), GFP_KERNEL);
1435 snprintf(c->name, STRING_SIZE, "ch%d", i);
1437 snprintf(c->name, STRING_SIZE, "%s", name_suffix);
1438 c->dev.init_name = c->name;
1439 c->dev.parent = &iface->dev;
1440 c->dev.groups = channel_attr_groups;
1441 c->dev.release = release_channel;
1442 if (device_register(&c->dev)) {
1443 pr_err("registering c->dev failed\n");
1444 goto free_instance_nodev;
1446 iface->p->channel[i] = c;
1450 c->keep_mbo = false;
1451 c->enqueue_halt = false;
1452 c->is_poisoned = false;
1453 c->cfg.direction = 0;
1454 c->cfg.data_type = 0;
1455 c->cfg.num_buffers = 0;
1456 c->cfg.buffer_size = 0;
1457 c->cfg.subbuffer_size = 0;
1458 c->cfg.packets_per_xact = 0;
1459 spin_lock_init(&c->fifo_lock);
1460 INIT_LIST_HEAD(&c->fifo);
1461 INIT_LIST_HEAD(&c->trash_fifo);
1462 INIT_LIST_HEAD(&c->halt_fifo);
1463 init_completion(&c->cleanup);
1464 atomic_set(&c->mbo_ref, 0);
1465 mutex_init(&c->start_mutex);
1466 mutex_init(&c->nq_mutex);
1467 list_add_tail(&c->list, &iface->p->channel_list);
1469 pr_info("registered new device mdev%d (%s)\n",
1470 id, iface->description);
1473 free_instance_nodev:
1478 c = iface->p->channel[--i];
1479 device_unregister(&c->dev);
1483 device_unregister(&iface->dev);
1484 ida_simple_remove(&mdev_id, id);
1487 EXPORT_SYMBOL_GPL(most_register_interface);
1490 * most_deregister_interface - deregisters an interface with core
1491 * @iface: device interface
1493 * Before removing an interface instance from the list, all running
1494 * channels are stopped and poisoned.
1496 void most_deregister_interface(struct most_interface *iface)
1499 struct most_channel *c;
1501 pr_info("deregistering device %s (%s)\n", dev_name(&iface->dev),
1502 iface->description);
1503 for (i = 0; i < iface->num_channels; i++) {
1504 c = iface->p->channel[i];
1506 c->pipe0.comp->disconnect_channel(c->iface,
1509 c->pipe1.comp->disconnect_channel(c->iface,
1511 c->pipe0.comp = NULL;
1512 c->pipe1.comp = NULL;
1514 device_unregister(&c->dev);
1518 ida_simple_remove(&mdev_id, iface->p->dev_id);
1520 device_unregister(&iface->dev);
1522 EXPORT_SYMBOL_GPL(most_deregister_interface);
1525 * most_stop_enqueue - prevents core from enqueueing MBOs
1526 * @iface: pointer to interface
1529 * This is called by an HDM that _cannot_ attend to its duties and
1530 * is imminent to get run over by the core. The core is not going to
1531 * enqueue any further packets unless the flagging HDM calls
1532 * most_resume enqueue().
1534 void most_stop_enqueue(struct most_interface *iface, int id)
1536 struct most_channel *c = iface->p->channel[id];
1541 mutex_lock(&c->nq_mutex);
1542 c->enqueue_halt = true;
1543 mutex_unlock(&c->nq_mutex);
1545 EXPORT_SYMBOL_GPL(most_stop_enqueue);
1548 * most_resume_enqueue - allow core to enqueue MBOs again
1549 * @iface: pointer to interface
1552 * This clears the enqueue halt flag and enqueues all MBOs currently
1553 * sitting in the wait fifo.
1555 void most_resume_enqueue(struct most_interface *iface, int id)
1557 struct most_channel *c = iface->p->channel[id];
1562 mutex_lock(&c->nq_mutex);
1563 c->enqueue_halt = false;
1564 mutex_unlock(&c->nq_mutex);
1566 wake_up_interruptible(&c->hdm_fifo_wq);
1568 EXPORT_SYMBOL_GPL(most_resume_enqueue);
1570 static void release_most_sub(struct device *dev)
1572 pr_info("releasing most_subsystem\n");
1575 static int __init most_init(void)
1579 pr_info("init()\n");
1580 INIT_LIST_HEAD(&mc.comp_list);
1583 mc.bus.name = "most",
1584 mc.bus.match = most_match,
1585 mc.drv.name = "most_core",
1586 mc.drv.bus = &mc.bus,
1587 mc.drv.groups = mc_attr_groups;
1589 err = bus_register(&mc.bus);
1591 pr_info("Cannot register most bus\n");
1594 err = driver_register(&mc.drv);
1596 pr_info("Cannot register core driver\n");
1599 mc.dev.init_name = "most_bus";
1600 mc.dev.release = release_most_sub;
1601 if (device_register(&mc.dev)) {
1609 driver_unregister(&mc.drv);
1611 bus_unregister(&mc.bus);
1615 static void __exit most_exit(void)
1617 pr_info("exit core module\n");
1618 device_unregister(&mc.dev);
1619 driver_unregister(&mc.drv);
1620 bus_unregister(&mc.bus);
1621 ida_destroy(&mdev_id);
1624 module_init(most_init);
1625 module_exit(most_exit);
1626 MODULE_LICENSE("GPL");
1627 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>");
1628 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");