2 * Broadcom BM2835 V4L2 driver
4 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
10 * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
11 * Dave Stevenson <dsteve@broadcom.com>
12 * Simon Mellor <simellor@broadcom.com>
13 * Luke Diamand <luked@broadcom.com>
15 * V4L2 driver MMAL vchiq interface code
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/errno.h>
21 #include <linux/kernel.h>
22 #include <linux/mutex.h>
24 #include <linux/slab.h>
25 #include <linux/completion.h>
26 #include <linux/vmalloc.h>
27 #include <linux/btree.h>
28 #include <asm/cacheflush.h>
29 #include <media/videobuf2-vmalloc.h>
31 #include "mmal-common.h"
32 #include "mmal-vchiq.h"
36 #include "interface/vchi/vchi.h"
38 /* maximum number of components supported */
39 #define VCHIQ_MMAL_MAX_COMPONENTS 4
41 /*#define FULL_MSG_DUMP 1*/
44 static const char *const msg_type_names[] = {
62 "GET_CORE_STATS_FOR_PORT",
66 "OPAQUE_ALLOCATOR_DESC",
69 "BUFFER_FROM_HOST_ZEROLEN",
75 static const char *const port_action_type_names[] = {
86 #if defined(FULL_MSG_DUMP)
87 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
89 pr_debug(TITLE" type:%s(%d) length:%d\n", \
90 msg_type_names[(MSG)->h.type], \
91 (MSG)->h.type, (MSG_LEN)); \
92 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
94 sizeof(struct mmal_msg_header), 1); \
95 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
97 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
98 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
101 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
103 pr_debug(TITLE" type:%s(%d) length:%d\n", \
104 msg_type_names[(MSG)->h.type], \
105 (MSG)->h.type, (MSG_LEN)); \
109 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
112 struct vchiq_mmal_instance;
114 /* normal message context */
115 struct mmal_msg_context {
116 struct vchiq_mmal_instance *instance;
121 /* work struct for defered callback - must come first */
122 struct work_struct work;
124 struct vchiq_mmal_instance *instance;
126 struct vchiq_mmal_port *port;
127 /* actual buffer used to store bulk reply */
128 struct mmal_buffer *buffer;
129 /* amount of buffer used */
130 unsigned long buffer_used;
131 /* MMAL buffer flags */
133 /* Presentation and Decode timestamps */
137 int status; /* context status */
139 } bulk; /* bulk data */
142 /* message handle to release */
143 VCHI_HELD_MSG_T msg_handle;
144 /* pointer to received message */
145 struct mmal_msg *msg;
146 /* received message length */
148 /* completion upon reply */
149 struct completion cmplt;
150 } sync; /* synchronous response */
155 struct vchiq_mmal_context_map {
156 /* ensure serialized access to the btree(contention should be low) */
158 struct btree_head32 btree_head;
162 struct vchiq_mmal_instance {
163 VCHI_SERVICE_HANDLE_T handle;
165 /* ensure serialised access to service */
166 struct mutex vchiq_mutex;
168 /* ensure serialised access to bulk operations */
169 struct mutex bulk_mutex;
171 /* vmalloc page to receive scratch bulk xfers into */
174 /* mapping table between context handles and mmal_msg_contexts */
175 struct vchiq_mmal_context_map context_map;
177 /* component to use next */
179 struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
182 static int __must_check
183 mmal_context_map_init(struct vchiq_mmal_context_map *context_map)
185 mutex_init(&context_map->lock);
186 context_map->last_handle = 0;
187 return btree_init32(&context_map->btree_head);
190 static void mmal_context_map_destroy(struct vchiq_mmal_context_map *context_map)
192 mutex_lock(&context_map->lock);
193 btree_destroy32(&context_map->btree_head);
194 mutex_unlock(&context_map->lock);
198 mmal_context_map_create_handle(struct vchiq_mmal_context_map *context_map,
199 struct mmal_msg_context *msg_context,
204 mutex_lock(&context_map->lock);
207 /* just use a simple count for handles, but do not use 0 */
208 context_map->last_handle++;
209 if (!context_map->last_handle)
210 context_map->last_handle++;
212 handle = context_map->last_handle;
214 /* check if the handle is already in use */
215 if (!btree_lookup32(&context_map->btree_head, handle))
219 if (btree_insert32(&context_map->btree_head, handle,
221 /* probably out of memory */
222 mutex_unlock(&context_map->lock);
226 mutex_unlock(&context_map->lock);
230 static struct mmal_msg_context *
231 mmal_context_map_lookup_handle(struct vchiq_mmal_context_map *context_map,
234 struct mmal_msg_context *msg_context;
239 mutex_lock(&context_map->lock);
241 msg_context = btree_lookup32(&context_map->btree_head, handle);
243 mutex_unlock(&context_map->lock);
248 mmal_context_map_destroy_handle(struct vchiq_mmal_context_map *context_map,
251 mutex_lock(&context_map->lock);
252 btree_remove32(&context_map->btree_head, handle);
253 mutex_unlock(&context_map->lock);
256 static struct mmal_msg_context *
257 get_msg_context(struct vchiq_mmal_instance *instance)
259 struct mmal_msg_context *msg_context;
261 /* todo: should this be allocated from a pool to avoid kzalloc */
262 msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
265 return ERR_PTR(-ENOMEM);
267 msg_context->instance = instance;
268 msg_context->handle =
269 mmal_context_map_create_handle(&instance->context_map,
273 if (!msg_context->handle) {
275 return ERR_PTR(-ENOMEM);
281 static struct mmal_msg_context *
282 lookup_msg_context(struct vchiq_mmal_instance *instance, u32 handle)
284 return mmal_context_map_lookup_handle(&instance->context_map,
289 release_msg_context(struct mmal_msg_context *msg_context)
291 mmal_context_map_destroy_handle(&msg_context->instance->context_map,
292 msg_context->handle);
296 /* deals with receipt of event to host message */
297 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
298 struct mmal_msg *msg, u32 msg_len)
300 pr_debug("unhandled event\n");
301 pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
302 msg->u.event_to_host.client_component,
303 msg->u.event_to_host.port_type,
304 msg->u.event_to_host.port_num,
305 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
308 /* workqueue scheduled callback
310 * we do this because it is important we do not call any other vchiq
311 * sync calls from witin the message delivery thread
313 static void buffer_work_cb(struct work_struct *work)
315 struct mmal_msg_context *msg_context =
316 container_of(work, struct mmal_msg_context, u.bulk.work);
318 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
319 msg_context->u.bulk.port,
320 msg_context->u.bulk.status,
321 msg_context->u.bulk.buffer,
322 msg_context->u.bulk.buffer_used,
323 msg_context->u.bulk.mmal_flags,
324 msg_context->u.bulk.dts,
325 msg_context->u.bulk.pts);
327 /* release message context */
328 release_msg_context(msg_context);
331 /* enqueue a bulk receive for a given message context */
332 static int bulk_receive(struct vchiq_mmal_instance *instance,
333 struct mmal_msg *msg,
334 struct mmal_msg_context *msg_context)
336 unsigned long rd_len;
337 unsigned long flags = 0;
340 /* bulk mutex stops other bulk operations while we have a
341 * receive in progress - released in callback
343 ret = mutex_lock_interruptible(&instance->bulk_mutex);
347 rd_len = msg->u.buffer_from_host.buffer_header.length;
349 /* take buffer from queue */
350 spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
351 if (list_empty(&msg_context->u.bulk.port->buffers)) {
352 spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
353 pr_err("buffer list empty trying to submit bulk receive\n");
355 /* todo: this is a serious error, we should never have
356 * committed a buffer_to_host operation to the mmal
357 * port without the buffer to back it up (underflow
358 * handling) and there is no obvious way to deal with
359 * this - how is the mmal servie going to react when
360 * we fail to do the xfer and reschedule a buffer when
361 * it arrives? perhaps a starved flag to indicate a
362 * waiting bulk receive?
365 mutex_unlock(&instance->bulk_mutex);
370 msg_context->u.bulk.buffer =
371 list_entry(msg_context->u.bulk.port->buffers.next,
372 struct mmal_buffer, list);
373 list_del(&msg_context->u.bulk.buffer->list);
375 spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
377 /* ensure we do not overrun the available buffer */
378 if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
379 rd_len = msg_context->u.bulk.buffer->buffer_size;
380 pr_warn("short read as not enough receive buffer space\n");
381 /* todo: is this the correct response, what happens to
382 * the rest of the message data?
387 msg_context->u.bulk.buffer_used = rd_len;
388 msg_context->u.bulk.mmal_flags =
389 msg->u.buffer_from_host.buffer_header.flags;
390 msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
391 msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
393 /* queue the bulk submission */
394 vchi_service_use(instance->handle);
395 ret = vchi_bulk_queue_receive(instance->handle,
396 msg_context->u.bulk.buffer->buffer,
397 /* Actual receive needs to be a multiple
401 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
402 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
405 vchi_service_release(instance->handle);
408 /* callback will not be clearing the mutex */
409 mutex_unlock(&instance->bulk_mutex);
415 /* enque a dummy bulk receive for a given message context */
416 static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
417 struct mmal_msg_context *msg_context)
421 /* bulk mutex stops other bulk operations while we have a
422 * receive in progress - released in callback
424 ret = mutex_lock_interruptible(&instance->bulk_mutex);
428 /* zero length indicates this was a dummy transfer */
429 msg_context->u.bulk.buffer_used = 0;
431 /* queue the bulk submission */
432 vchi_service_use(instance->handle);
434 ret = vchi_bulk_queue_receive(instance->handle,
435 instance->bulk_scratch,
437 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
438 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
441 vchi_service_release(instance->handle);
444 /* callback will not be clearing the mutex */
445 mutex_unlock(&instance->bulk_mutex);
451 /* data in message, memcpy from packet into output buffer */
452 static int inline_receive(struct vchiq_mmal_instance *instance,
453 struct mmal_msg *msg,
454 struct mmal_msg_context *msg_context)
456 unsigned long flags = 0;
458 /* take buffer from queue */
459 spin_lock_irqsave(&msg_context->u.bulk.port->slock, flags);
460 if (list_empty(&msg_context->u.bulk.port->buffers)) {
461 spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
462 pr_err("buffer list empty trying to receive inline\n");
464 /* todo: this is a serious error, we should never have
465 * committed a buffer_to_host operation to the mmal
466 * port without the buffer to back it up (with
467 * underflow handling) and there is no obvious way to
468 * deal with this. Less bad than the bulk case as we
469 * can just drop this on the floor but...unhelpful
474 msg_context->u.bulk.buffer =
475 list_entry(msg_context->u.bulk.port->buffers.next,
476 struct mmal_buffer, list);
477 list_del(&msg_context->u.bulk.buffer->list);
479 spin_unlock_irqrestore(&msg_context->u.bulk.port->slock, flags);
481 memcpy(msg_context->u.bulk.buffer->buffer,
482 msg->u.buffer_from_host.short_data,
483 msg->u.buffer_from_host.payload_in_message);
485 msg_context->u.bulk.buffer_used =
486 msg->u.buffer_from_host.payload_in_message;
491 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
493 buffer_from_host(struct vchiq_mmal_instance *instance,
494 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
496 struct mmal_msg_context *msg_context;
500 pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
502 /* bulk mutex stops other bulk operations while we
503 * have a receive in progress
505 if (mutex_lock_interruptible(&instance->bulk_mutex))
509 msg_context = get_msg_context(instance);
510 if (IS_ERR(msg_context)) {
511 ret = PTR_ERR(msg_context);
515 /* store bulk message context for when data arrives */
516 msg_context->u.bulk.instance = instance;
517 msg_context->u.bulk.port = port;
518 msg_context->u.bulk.buffer = NULL; /* not valid until bulk xfer */
519 msg_context->u.bulk.buffer_used = 0;
521 /* initialise work structure ready to schedule callback */
522 INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
524 /* prep the buffer from host message */
525 memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
527 m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
528 m.h.magic = MMAL_MAGIC;
529 m.h.context = msg_context->handle;
532 /* drvbuf is our private data passed back */
533 m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
534 m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
535 m.u.buffer_from_host.drvbuf.port_handle = port->handle;
536 m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
539 m.u.buffer_from_host.buffer_header.cmd = 0;
540 m.u.buffer_from_host.buffer_header.data =
541 (u32)(unsigned long)buf->buffer;
542 m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
543 m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */
544 m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */
545 m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */
546 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
547 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
549 /* clear buffer type sepecific data */
550 memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
551 sizeof(m.u.buffer_from_host.buffer_header_type_specific));
553 /* no payload in message */
554 m.u.buffer_from_host.payload_in_message = 0;
556 vchi_service_use(instance->handle);
558 ret = vchi_queue_kernel_message(instance->handle,
560 sizeof(struct mmal_msg_header) +
561 sizeof(m.u.buffer_from_host));
564 release_msg_context(msg_context);
565 /* todo: is this correct error value? */
568 vchi_service_release(instance->handle);
571 mutex_unlock(&instance->bulk_mutex);
576 /* submit a buffer to the mmal sevice
578 * the buffer_from_host uses size data from the ports next available
579 * mmal_buffer and deals with there being no buffer available by
580 * incrementing the underflow for later
582 static int port_buffer_from_host(struct vchiq_mmal_instance *instance,
583 struct vchiq_mmal_port *port)
586 struct mmal_buffer *buf;
587 unsigned long flags = 0;
592 /* peek buffer from queue */
593 spin_lock_irqsave(&port->slock, flags);
594 if (list_empty(&port->buffers)) {
595 port->buffer_underflow++;
596 spin_unlock_irqrestore(&port->slock, flags);
600 buf = list_entry(port->buffers.next, struct mmal_buffer, list);
602 spin_unlock_irqrestore(&port->slock, flags);
604 /* issue buffer to mmal service */
605 ret = buffer_from_host(instance, port, buf);
607 pr_err("adding buffer header failed\n");
608 /* todo: how should this be dealt with */
614 /* deals with receipt of buffer to host message */
615 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
616 struct mmal_msg *msg, u32 msg_len)
618 struct mmal_msg_context *msg_context;
621 pr_debug("buffer_to_host_cb: instance:%p msg:%p msg_len:%d\n",
622 instance, msg, msg_len);
624 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
625 handle = msg->u.buffer_from_host.drvbuf.client_context;
626 msg_context = lookup_msg_context(instance, handle);
629 pr_err("drvbuf.client_context(%u) is invalid\n",
634 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
638 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
639 /* message reception had an error */
640 pr_warn("error %d in reply\n", msg->h.status);
642 msg_context->u.bulk.status = msg->h.status;
644 } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
646 if (msg->u.buffer_from_host.buffer_header.flags &
647 MMAL_BUFFER_HEADER_FLAG_EOS) {
648 msg_context->u.bulk.status =
649 dummy_bulk_receive(instance, msg_context);
650 if (msg_context->u.bulk.status == 0)
651 return; /* successful bulk submission, bulk
652 * completion will trigger callback
655 /* do callback with empty buffer - not EOS though */
656 msg_context->u.bulk.status = 0;
657 msg_context->u.bulk.buffer_used = 0;
659 } else if (msg->u.buffer_from_host.payload_in_message == 0) {
660 /* data is not in message, queue a bulk receive */
661 msg_context->u.bulk.status =
662 bulk_receive(instance, msg, msg_context);
663 if (msg_context->u.bulk.status == 0)
664 return; /* successful bulk submission, bulk
665 * completion will trigger callback
668 /* failed to submit buffer, this will end badly */
669 pr_err("error %d on bulk submission\n",
670 msg_context->u.bulk.status);
672 } else if (msg->u.buffer_from_host.payload_in_message <=
673 MMAL_VC_SHORT_DATA) {
674 /* data payload within message */
675 msg_context->u.bulk.status = inline_receive(instance, msg,
678 pr_err("message with invalid short payload\n");
681 msg_context->u.bulk.status = -EINVAL;
682 msg_context->u.bulk.buffer_used =
683 msg->u.buffer_from_host.payload_in_message;
686 /* replace the buffer header */
687 port_buffer_from_host(instance, msg_context->u.bulk.port);
689 /* schedule the port callback */
690 schedule_work(&msg_context->u.bulk.work);
693 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
694 struct mmal_msg_context *msg_context)
696 /* bulk receive operation complete */
697 mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
699 /* replace the buffer header */
700 port_buffer_from_host(msg_context->u.bulk.instance,
701 msg_context->u.bulk.port);
703 msg_context->u.bulk.status = 0;
705 /* schedule the port callback */
706 schedule_work(&msg_context->u.bulk.work);
709 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
710 struct mmal_msg_context *msg_context)
712 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
714 /* bulk receive operation complete */
715 mutex_unlock(&msg_context->u.bulk.instance->bulk_mutex);
717 /* replace the buffer header */
718 port_buffer_from_host(msg_context->u.bulk.instance,
719 msg_context->u.bulk.port);
721 msg_context->u.bulk.status = -EINTR;
723 schedule_work(&msg_context->u.bulk.work);
726 /* incoming event service callback */
727 static void service_callback(void *param,
728 const VCHI_CALLBACK_REASON_T reason,
731 struct vchiq_mmal_instance *instance = param;
734 struct mmal_msg *msg;
735 VCHI_HELD_MSG_T msg_handle;
736 struct mmal_msg_context *msg_context;
739 pr_err("Message callback passed NULL instance\n");
744 case VCHI_CALLBACK_MSG_AVAILABLE:
745 status = vchi_msg_hold(instance->handle, (void **)&msg,
746 &msg_len, VCHI_FLAGS_NONE, &msg_handle);
748 pr_err("Unable to dequeue a message (%d)\n", status);
752 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
754 /* handling is different for buffer messages */
755 switch (msg->h.type) {
756 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
757 vchi_held_msg_release(&msg_handle);
760 case MMAL_MSG_TYPE_EVENT_TO_HOST:
761 event_to_host_cb(instance, msg, msg_len);
762 vchi_held_msg_release(&msg_handle);
766 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
767 buffer_to_host_cb(instance, msg, msg_len);
768 vchi_held_msg_release(&msg_handle);
772 /* messages dependent on header context to complete */
773 if (!msg->h.context) {
774 pr_err("received message context was null!\n");
775 vchi_held_msg_release(&msg_handle);
779 msg_context = lookup_msg_context(instance,
782 pr_err("received invalid message context %u!\n",
784 vchi_held_msg_release(&msg_handle);
788 /* fill in context values */
789 msg_context->u.sync.msg_handle = msg_handle;
790 msg_context->u.sync.msg = msg;
791 msg_context->u.sync.msg_len = msg_len;
793 /* todo: should this check (completion_done()
794 * == 1) for no one waiting? or do we need a
795 * flag to tell us the completion has been
796 * interrupted so we can free the message and
797 * its context. This probably also solves the
798 * message arriving after interruption todo
802 /* complete message so caller knows it happened */
803 complete(&msg_context->u.sync.cmplt);
809 case VCHI_CALLBACK_BULK_RECEIVED:
810 bulk_receive_cb(instance, bulk_ctx);
813 case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
814 bulk_abort_cb(instance, bulk_ctx);
817 case VCHI_CALLBACK_SERVICE_CLOSED:
818 /* TODO: consider if this requires action if received when
819 * driver is not explicitly closing the service
824 pr_err("Received unhandled message reason %d\n", reason);
829 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
830 struct mmal_msg *msg,
831 unsigned int payload_len,
832 struct mmal_msg **msg_out,
833 VCHI_HELD_MSG_T *msg_handle_out)
835 struct mmal_msg_context *msg_context;
837 unsigned long timeout;
839 /* payload size must not cause message to exceed max size */
841 (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
842 pr_err("payload length %d exceeds max:%d\n", payload_len,
843 (int)(MMAL_MSG_MAX_SIZE -
844 sizeof(struct mmal_msg_header)));
848 msg_context = get_msg_context(instance);
849 if (IS_ERR(msg_context))
850 return PTR_ERR(msg_context);
852 init_completion(&msg_context->u.sync.cmplt);
854 msg->h.magic = MMAL_MAGIC;
855 msg->h.context = msg_context->handle;
858 DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
861 vchi_service_use(instance->handle);
863 ret = vchi_queue_kernel_message(instance->handle,
865 sizeof(struct mmal_msg_header) +
868 vchi_service_release(instance->handle);
871 pr_err("error %d queuing message\n", ret);
872 release_msg_context(msg_context);
876 timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
879 pr_err("timed out waiting for sync completion\n");
881 /* todo: what happens if the message arrives after aborting */
882 release_msg_context(msg_context);
886 *msg_out = msg_context->u.sync.msg;
887 *msg_handle_out = msg_context->u.sync.msg_handle;
888 release_msg_context(msg_context);
893 static void dump_port_info(struct vchiq_mmal_port *port)
895 pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
897 pr_debug("buffer minimum num:%d size:%d align:%d\n",
898 port->minimum_buffer.num,
899 port->minimum_buffer.size, port->minimum_buffer.alignment);
901 pr_debug("buffer recommended num:%d size:%d align:%d\n",
902 port->recommended_buffer.num,
903 port->recommended_buffer.size,
904 port->recommended_buffer.alignment);
906 pr_debug("buffer current values num:%d size:%d align:%d\n",
907 port->current_buffer.num,
908 port->current_buffer.size, port->current_buffer.alignment);
910 pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
912 port->format.encoding, port->format.encoding_variant);
914 pr_debug(" bitrate:%d flags:0x%x\n",
915 port->format.bitrate, port->format.flags);
917 if (port->format.type == MMAL_ES_TYPE_VIDEO) {
919 ("es video format: width:%d height:%d colourspace:0x%x\n",
920 port->es.video.width, port->es.video.height,
921 port->es.video.color_space);
923 pr_debug(" : crop xywh %d,%d,%d,%d\n",
924 port->es.video.crop.x,
925 port->es.video.crop.y,
926 port->es.video.crop.width, port->es.video.crop.height);
927 pr_debug(" : framerate %d/%d aspect %d/%d\n",
928 port->es.video.frame_rate.num,
929 port->es.video.frame_rate.den,
930 port->es.video.par.num, port->es.video.par.den);
934 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
936 /* todo do readonly fields need setting at all? */
937 p->type = port->type;
938 p->index = port->index;
940 p->is_enabled = port->enabled;
941 p->buffer_num_min = port->minimum_buffer.num;
942 p->buffer_size_min = port->minimum_buffer.size;
943 p->buffer_alignment_min = port->minimum_buffer.alignment;
944 p->buffer_num_recommended = port->recommended_buffer.num;
945 p->buffer_size_recommended = port->recommended_buffer.size;
947 /* only three writable fields in a port */
948 p->buffer_num = port->current_buffer.num;
949 p->buffer_size = port->current_buffer.size;
950 p->userdata = (u32)(unsigned long)port;
953 static int port_info_set(struct vchiq_mmal_instance *instance,
954 struct vchiq_mmal_port *port)
958 struct mmal_msg *rmsg;
959 VCHI_HELD_MSG_T rmsg_handle;
961 pr_debug("setting port info port %p\n", port);
964 dump_port_info(port);
966 m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
968 m.u.port_info_set.component_handle = port->component->handle;
969 m.u.port_info_set.port_type = port->type;
970 m.u.port_info_set.port_index = port->index;
972 port_to_mmal_msg(port, &m.u.port_info_set.port);
974 /* elementary stream format setup */
975 m.u.port_info_set.format.type = port->format.type;
976 m.u.port_info_set.format.encoding = port->format.encoding;
977 m.u.port_info_set.format.encoding_variant =
978 port->format.encoding_variant;
979 m.u.port_info_set.format.bitrate = port->format.bitrate;
980 m.u.port_info_set.format.flags = port->format.flags;
982 memcpy(&m.u.port_info_set.es, &port->es,
983 sizeof(union mmal_es_specific_format));
985 m.u.port_info_set.format.extradata_size = port->format.extradata_size;
986 memcpy(&m.u.port_info_set.extradata, port->format.extradata,
987 port->format.extradata_size);
989 ret = send_synchronous_mmal_msg(instance, &m,
990 sizeof(m.u.port_info_set),
991 &rmsg, &rmsg_handle);
995 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
996 /* got an unexpected message type in reply */
1001 /* return operation status */
1002 ret = -rmsg->u.port_info_get_reply.status;
1004 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
1005 port->component->handle, port->handle);
1008 vchi_held_msg_release(&rmsg_handle);
1013 /* use port info get message to retrieve port information */
1014 static int port_info_get(struct vchiq_mmal_instance *instance,
1015 struct vchiq_mmal_port *port)
1019 struct mmal_msg *rmsg;
1020 VCHI_HELD_MSG_T rmsg_handle;
1022 /* port info time */
1023 m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
1024 m.u.port_info_get.component_handle = port->component->handle;
1025 m.u.port_info_get.port_type = port->type;
1026 m.u.port_info_get.index = port->index;
1028 ret = send_synchronous_mmal_msg(instance, &m,
1029 sizeof(m.u.port_info_get),
1030 &rmsg, &rmsg_handle);
1034 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
1035 /* got an unexpected message type in reply */
1040 /* return operation status */
1041 ret = -rmsg->u.port_info_get_reply.status;
1042 if (ret != MMAL_MSG_STATUS_SUCCESS)
1045 if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
1046 port->enabled = false;
1048 port->enabled = true;
1050 /* copy the values out of the message */
1051 port->handle = rmsg->u.port_info_get_reply.port_handle;
1053 /* port type and index cached to use on port info set because
1054 * it does not use a port handle
1056 port->type = rmsg->u.port_info_get_reply.port_type;
1057 port->index = rmsg->u.port_info_get_reply.port_index;
1059 port->minimum_buffer.num =
1060 rmsg->u.port_info_get_reply.port.buffer_num_min;
1061 port->minimum_buffer.size =
1062 rmsg->u.port_info_get_reply.port.buffer_size_min;
1063 port->minimum_buffer.alignment =
1064 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1066 port->recommended_buffer.alignment =
1067 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
1068 port->recommended_buffer.num =
1069 rmsg->u.port_info_get_reply.port.buffer_num_recommended;
1071 port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
1072 port->current_buffer.size =
1073 rmsg->u.port_info_get_reply.port.buffer_size;
1076 port->format.type = rmsg->u.port_info_get_reply.format.type;
1077 port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
1078 port->format.encoding_variant =
1079 rmsg->u.port_info_get_reply.format.encoding_variant;
1080 port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
1081 port->format.flags = rmsg->u.port_info_get_reply.format.flags;
1083 /* elementary stream format */
1085 &rmsg->u.port_info_get_reply.es,
1086 sizeof(union mmal_es_specific_format));
1087 port->format.es = &port->es;
1089 port->format.extradata_size =
1090 rmsg->u.port_info_get_reply.format.extradata_size;
1091 memcpy(port->format.extradata,
1092 rmsg->u.port_info_get_reply.extradata,
1093 port->format.extradata_size);
1095 pr_debug("received port info\n");
1096 dump_port_info(port);
1100 pr_debug("%s:result:%d component:0x%x port:%d\n",
1101 __func__, ret, port->component->handle, port->handle);
1103 vchi_held_msg_release(&rmsg_handle);
1108 /* create comonent on vc */
1109 static int create_component(struct vchiq_mmal_instance *instance,
1110 struct vchiq_mmal_component *component,
1115 struct mmal_msg *rmsg;
1116 VCHI_HELD_MSG_T rmsg_handle;
1118 /* build component create message */
1119 m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
1120 m.u.component_create.client_component = (u32)(unsigned long)component;
1121 strncpy(m.u.component_create.name, name,
1122 sizeof(m.u.component_create.name));
1124 ret = send_synchronous_mmal_msg(instance, &m,
1125 sizeof(m.u.component_create),
1126 &rmsg, &rmsg_handle);
1130 if (rmsg->h.type != m.h.type) {
1131 /* got an unexpected message type in reply */
1136 ret = -rmsg->u.component_create_reply.status;
1137 if (ret != MMAL_MSG_STATUS_SUCCESS)
1140 /* a valid component response received */
1141 component->handle = rmsg->u.component_create_reply.component_handle;
1142 component->inputs = rmsg->u.component_create_reply.input_num;
1143 component->outputs = rmsg->u.component_create_reply.output_num;
1144 component->clocks = rmsg->u.component_create_reply.clock_num;
1146 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
1148 component->inputs, component->outputs, component->clocks);
1151 vchi_held_msg_release(&rmsg_handle);
1156 /* destroys a component on vc */
1157 static int destroy_component(struct vchiq_mmal_instance *instance,
1158 struct vchiq_mmal_component *component)
1162 struct mmal_msg *rmsg;
1163 VCHI_HELD_MSG_T rmsg_handle;
1165 m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
1166 m.u.component_destroy.component_handle = component->handle;
1168 ret = send_synchronous_mmal_msg(instance, &m,
1169 sizeof(m.u.component_destroy),
1170 &rmsg, &rmsg_handle);
1174 if (rmsg->h.type != m.h.type) {
1175 /* got an unexpected message type in reply */
1180 ret = -rmsg->u.component_destroy_reply.status;
1184 vchi_held_msg_release(&rmsg_handle);
1189 /* enable a component on vc */
1190 static int enable_component(struct vchiq_mmal_instance *instance,
1191 struct vchiq_mmal_component *component)
1195 struct mmal_msg *rmsg;
1196 VCHI_HELD_MSG_T rmsg_handle;
1198 m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1199 m.u.component_enable.component_handle = component->handle;
1201 ret = send_synchronous_mmal_msg(instance, &m,
1202 sizeof(m.u.component_enable),
1203 &rmsg, &rmsg_handle);
1207 if (rmsg->h.type != m.h.type) {
1208 /* got an unexpected message type in reply */
1213 ret = -rmsg->u.component_enable_reply.status;
1216 vchi_held_msg_release(&rmsg_handle);
1221 /* disable a component on vc */
1222 static int disable_component(struct vchiq_mmal_instance *instance,
1223 struct vchiq_mmal_component *component)
1227 struct mmal_msg *rmsg;
1228 VCHI_HELD_MSG_T rmsg_handle;
1230 m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1231 m.u.component_disable.component_handle = component->handle;
1233 ret = send_synchronous_mmal_msg(instance, &m,
1234 sizeof(m.u.component_disable),
1235 &rmsg, &rmsg_handle);
1239 if (rmsg->h.type != m.h.type) {
1240 /* got an unexpected message type in reply */
1245 ret = -rmsg->u.component_disable_reply.status;
1249 vchi_held_msg_release(&rmsg_handle);
1254 /* get version of mmal implementation */
1255 static int get_version(struct vchiq_mmal_instance *instance,
1256 u32 *major_out, u32 *minor_out)
1260 struct mmal_msg *rmsg;
1261 VCHI_HELD_MSG_T rmsg_handle;
1263 m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1265 ret = send_synchronous_mmal_msg(instance, &m,
1266 sizeof(m.u.version),
1267 &rmsg, &rmsg_handle);
1271 if (rmsg->h.type != m.h.type) {
1272 /* got an unexpected message type in reply */
1277 *major_out = rmsg->u.version.major;
1278 *minor_out = rmsg->u.version.minor;
1281 vchi_held_msg_release(&rmsg_handle);
1286 /* do a port action with a port as a parameter */
1287 static int port_action_port(struct vchiq_mmal_instance *instance,
1288 struct vchiq_mmal_port *port,
1289 enum mmal_msg_port_action_type action_type)
1293 struct mmal_msg *rmsg;
1294 VCHI_HELD_MSG_T rmsg_handle;
1296 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1297 m.u.port_action_port.component_handle = port->component->handle;
1298 m.u.port_action_port.port_handle = port->handle;
1299 m.u.port_action_port.action = action_type;
1301 port_to_mmal_msg(port, &m.u.port_action_port.port);
1303 ret = send_synchronous_mmal_msg(instance, &m,
1304 sizeof(m.u.port_action_port),
1305 &rmsg, &rmsg_handle);
1309 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1310 /* got an unexpected message type in reply */
1315 ret = -rmsg->u.port_action_reply.status;
1317 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1319 ret, port->component->handle, port->handle,
1320 port_action_type_names[action_type], action_type);
1323 vchi_held_msg_release(&rmsg_handle);
1328 /* do a port action with handles as parameters */
1329 static int port_action_handle(struct vchiq_mmal_instance *instance,
1330 struct vchiq_mmal_port *port,
1331 enum mmal_msg_port_action_type action_type,
1332 u32 connect_component_handle,
1333 u32 connect_port_handle)
1337 struct mmal_msg *rmsg;
1338 VCHI_HELD_MSG_T rmsg_handle;
1340 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1342 m.u.port_action_handle.component_handle = port->component->handle;
1343 m.u.port_action_handle.port_handle = port->handle;
1344 m.u.port_action_handle.action = action_type;
1346 m.u.port_action_handle.connect_component_handle =
1347 connect_component_handle;
1348 m.u.port_action_handle.connect_port_handle = connect_port_handle;
1350 ret = send_synchronous_mmal_msg(instance, &m,
1351 sizeof(m.u.port_action_handle),
1352 &rmsg, &rmsg_handle);
1356 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1357 /* got an unexpected message type in reply */
1362 ret = -rmsg->u.port_action_reply.status;
1364 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)" \
1365 " connect component:0x%x connect port:%d\n",
1367 ret, port->component->handle, port->handle,
1368 port_action_type_names[action_type],
1369 action_type, connect_component_handle, connect_port_handle);
1372 vchi_held_msg_release(&rmsg_handle);
1377 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1378 struct vchiq_mmal_port *port,
1379 u32 parameter_id, void *value, u32 value_size)
1383 struct mmal_msg *rmsg;
1384 VCHI_HELD_MSG_T rmsg_handle;
1386 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1388 m.u.port_parameter_set.component_handle = port->component->handle;
1389 m.u.port_parameter_set.port_handle = port->handle;
1390 m.u.port_parameter_set.id = parameter_id;
1391 m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1392 memcpy(&m.u.port_parameter_set.value, value, value_size);
1394 ret = send_synchronous_mmal_msg(instance, &m,
1395 (4 * sizeof(u32)) + value_size,
1396 &rmsg, &rmsg_handle);
1400 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1401 /* got an unexpected message type in reply */
1406 ret = -rmsg->u.port_parameter_set_reply.status;
1408 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1410 ret, port->component->handle, port->handle, parameter_id);
1413 vchi_held_msg_release(&rmsg_handle);
1418 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1419 struct vchiq_mmal_port *port,
1420 u32 parameter_id, void *value, u32 *value_size)
1424 struct mmal_msg *rmsg;
1425 VCHI_HELD_MSG_T rmsg_handle;
1427 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1429 m.u.port_parameter_get.component_handle = port->component->handle;
1430 m.u.port_parameter_get.port_handle = port->handle;
1431 m.u.port_parameter_get.id = parameter_id;
1432 m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1434 ret = send_synchronous_mmal_msg(instance, &m,
1436 mmal_msg_port_parameter_get),
1437 &rmsg, &rmsg_handle);
1441 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1442 /* got an unexpected message type in reply */
1443 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1448 ret = -rmsg->u.port_parameter_get_reply.status;
1449 /* port_parameter_get_reply.size includes the header,
1450 * whilst *value_size doesn't.
1452 rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1454 if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1455 /* Copy only as much as we have space for
1456 * but report true size of parameter
1458 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1460 *value_size = rmsg->u.port_parameter_get_reply.size;
1462 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1463 rmsg->u.port_parameter_get_reply.size);
1465 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1466 ret, port->component->handle, port->handle, parameter_id);
1469 vchi_held_msg_release(&rmsg_handle);
1474 /* disables a port and drains buffers from it */
1475 static int port_disable(struct vchiq_mmal_instance *instance,
1476 struct vchiq_mmal_port *port)
1479 struct list_head *q, *buf_head;
1480 unsigned long flags = 0;
1485 port->enabled = false;
1487 ret = port_action_port(instance, port,
1488 MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1490 /* drain all queued buffers on port */
1491 spin_lock_irqsave(&port->slock, flags);
1493 list_for_each_safe(buf_head, q, &port->buffers) {
1494 struct mmal_buffer *mmalbuf;
1496 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1499 if (port->buffer_cb)
1500 port->buffer_cb(instance,
1501 port, 0, mmalbuf, 0, 0,
1506 spin_unlock_irqrestore(&port->slock, flags);
1508 ret = port_info_get(instance, port);
1515 static int port_enable(struct vchiq_mmal_instance *instance,
1516 struct vchiq_mmal_port *port)
1518 unsigned int hdr_count;
1519 struct list_head *buf_head;
1525 /* ensure there are enough buffers queued to cover the buffer headers */
1526 if (port->buffer_cb) {
1528 list_for_each(buf_head, &port->buffers) {
1531 if (hdr_count < port->current_buffer.num)
1535 ret = port_action_port(instance, port,
1536 MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1540 port->enabled = true;
1542 if (port->buffer_cb) {
1543 /* send buffer headers to videocore */
1545 list_for_each(buf_head, &port->buffers) {
1546 struct mmal_buffer *mmalbuf;
1548 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1550 ret = buffer_from_host(instance, port, mmalbuf);
1555 if (hdr_count > port->current_buffer.num)
1560 ret = port_info_get(instance, port);
1566 /* ------------------------------------------------------------------
1568 *------------------------------------------------------------------
1571 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1572 struct vchiq_mmal_port *port)
1576 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1579 ret = port_info_set(instance, port);
1581 goto release_unlock;
1583 /* read what has actually been set */
1584 ret = port_info_get(instance, port);
1587 mutex_unlock(&instance->vchiq_mutex);
1592 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1593 struct vchiq_mmal_port *port,
1594 u32 parameter, void *value, u32 value_size)
1598 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1601 ret = port_parameter_set(instance, port, parameter, value, value_size);
1603 mutex_unlock(&instance->vchiq_mutex);
1608 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1609 struct vchiq_mmal_port *port,
1610 u32 parameter, void *value, u32 *value_size)
1614 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1617 ret = port_parameter_get(instance, port, parameter, value, value_size);
1619 mutex_unlock(&instance->vchiq_mutex);
1626 * enables a port and queues buffers for satisfying callbacks if we
1627 * provide a callback handler
1629 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1630 struct vchiq_mmal_port *port,
1631 vchiq_mmal_buffer_cb buffer_cb)
1635 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1638 /* already enabled - noop */
1639 if (port->enabled) {
1644 port->buffer_cb = buffer_cb;
1646 ret = port_enable(instance, port);
1649 mutex_unlock(&instance->vchiq_mutex);
1654 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1655 struct vchiq_mmal_port *port)
1659 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1662 if (!port->enabled) {
1663 mutex_unlock(&instance->vchiq_mutex);
1667 ret = port_disable(instance, port);
1669 mutex_unlock(&instance->vchiq_mutex);
1674 /* ports will be connected in a tunneled manner so data buffers
1675 * are not handled by client.
1677 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1678 struct vchiq_mmal_port *src,
1679 struct vchiq_mmal_port *dst)
1683 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1686 /* disconnect ports if connected */
1687 if (src->connected) {
1688 ret = port_disable(instance, src);
1690 pr_err("failed disabling src port(%d)\n", ret);
1691 goto release_unlock;
1694 /* do not need to disable the destination port as they
1695 * are connected and it is done automatically
1698 ret = port_action_handle(instance, src,
1699 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1700 src->connected->component->handle,
1701 src->connected->handle);
1703 pr_err("failed disconnecting src port\n");
1704 goto release_unlock;
1706 src->connected->enabled = false;
1707 src->connected = NULL;
1711 /* do not make new connection */
1713 pr_debug("not making new connection\n");
1714 goto release_unlock;
1717 /* copy src port format to dst */
1718 dst->format.encoding = src->format.encoding;
1719 dst->es.video.width = src->es.video.width;
1720 dst->es.video.height = src->es.video.height;
1721 dst->es.video.crop.x = src->es.video.crop.x;
1722 dst->es.video.crop.y = src->es.video.crop.y;
1723 dst->es.video.crop.width = src->es.video.crop.width;
1724 dst->es.video.crop.height = src->es.video.crop.height;
1725 dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1726 dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1728 /* set new format */
1729 ret = port_info_set(instance, dst);
1731 pr_debug("setting port info failed\n");
1732 goto release_unlock;
1735 /* read what has actually been set */
1736 ret = port_info_get(instance, dst);
1738 pr_debug("read back port info failed\n");
1739 goto release_unlock;
1742 /* connect two ports together */
1743 ret = port_action_handle(instance, src,
1744 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1745 dst->component->handle, dst->handle);
1747 pr_debug("connecting port %d:%d to %d:%d failed\n",
1748 src->component->handle, src->handle,
1749 dst->component->handle, dst->handle);
1750 goto release_unlock;
1752 src->connected = dst;
1756 mutex_unlock(&instance->vchiq_mutex);
1761 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1762 struct vchiq_mmal_port *port,
1763 struct mmal_buffer *buffer)
1765 unsigned long flags = 0;
1767 spin_lock_irqsave(&port->slock, flags);
1768 list_add_tail(&buffer->list, &port->buffers);
1769 spin_unlock_irqrestore(&port->slock, flags);
1771 /* the port previously underflowed because it was missing a
1772 * mmal_buffer which has just been added, submit that buffer
1773 * to the mmal service.
1775 if (port->buffer_underflow) {
1776 port_buffer_from_host(instance, port);
1777 port->buffer_underflow--;
1783 /* Initialise a mmal component and its ports
1786 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1788 struct vchiq_mmal_component **component_out)
1791 int idx; /* port index */
1792 struct vchiq_mmal_component *component;
1794 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1797 if (instance->component_idx == VCHIQ_MMAL_MAX_COMPONENTS) {
1798 ret = -EINVAL; /* todo is this correct error? */
1802 component = &instance->component[instance->component_idx];
1804 ret = create_component(instance, component, name);
1808 /* ports info needs gathering */
1809 component->control.type = MMAL_PORT_TYPE_CONTROL;
1810 component->control.index = 0;
1811 component->control.component = component;
1812 spin_lock_init(&component->control.slock);
1813 INIT_LIST_HEAD(&component->control.buffers);
1814 ret = port_info_get(instance, &component->control);
1816 goto release_component;
1818 for (idx = 0; idx < component->inputs; idx++) {
1819 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1820 component->input[idx].index = idx;
1821 component->input[idx].component = component;
1822 spin_lock_init(&component->input[idx].slock);
1823 INIT_LIST_HEAD(&component->input[idx].buffers);
1824 ret = port_info_get(instance, &component->input[idx]);
1826 goto release_component;
1829 for (idx = 0; idx < component->outputs; idx++) {
1830 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1831 component->output[idx].index = idx;
1832 component->output[idx].component = component;
1833 spin_lock_init(&component->output[idx].slock);
1834 INIT_LIST_HEAD(&component->output[idx].buffers);
1835 ret = port_info_get(instance, &component->output[idx]);
1837 goto release_component;
1840 for (idx = 0; idx < component->clocks; idx++) {
1841 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1842 component->clock[idx].index = idx;
1843 component->clock[idx].component = component;
1844 spin_lock_init(&component->clock[idx].slock);
1845 INIT_LIST_HEAD(&component->clock[idx].buffers);
1846 ret = port_info_get(instance, &component->clock[idx]);
1848 goto release_component;
1851 instance->component_idx++;
1853 *component_out = component;
1855 mutex_unlock(&instance->vchiq_mutex);
1860 destroy_component(instance, component);
1862 mutex_unlock(&instance->vchiq_mutex);
1868 * cause a mmal component to be destroyed
1870 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1871 struct vchiq_mmal_component *component)
1875 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1878 if (component->enabled)
1879 ret = disable_component(instance, component);
1881 ret = destroy_component(instance, component);
1883 mutex_unlock(&instance->vchiq_mutex);
1889 * cause a mmal component to be enabled
1891 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1892 struct vchiq_mmal_component *component)
1896 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1899 if (component->enabled) {
1900 mutex_unlock(&instance->vchiq_mutex);
1904 ret = enable_component(instance, component);
1906 component->enabled = true;
1908 mutex_unlock(&instance->vchiq_mutex);
1914 * cause a mmal component to be enabled
1916 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1917 struct vchiq_mmal_component *component)
1921 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1924 if (!component->enabled) {
1925 mutex_unlock(&instance->vchiq_mutex);
1929 ret = disable_component(instance, component);
1931 component->enabled = false;
1933 mutex_unlock(&instance->vchiq_mutex);
1938 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1939 u32 *major_out, u32 *minor_out)
1943 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1946 ret = get_version(instance, major_out, minor_out);
1948 mutex_unlock(&instance->vchiq_mutex);
1953 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1960 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1963 vchi_service_use(instance->handle);
1965 status = vchi_service_close(instance->handle);
1967 pr_err("mmal-vchiq: VCHIQ close failed");
1969 mutex_unlock(&instance->vchiq_mutex);
1971 vfree(instance->bulk_scratch);
1973 mmal_context_map_destroy(&instance->context_map);
1980 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1983 struct vchiq_mmal_instance *instance;
1984 static VCHI_CONNECTION_T *vchi_connection;
1985 static VCHI_INSTANCE_T vchi_instance;
1986 SERVICE_CREATION_T params = {
1987 .version = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
1988 .service_id = VC_MMAL_SERVER_NAME,
1989 .connection = vchi_connection,
1992 .callback = service_callback,
1993 .callback_param = NULL,
1994 .want_unaligned_bulk_rx = 1,
1995 .want_unaligned_bulk_tx = 1,
1999 /* compile time checks to ensure structure size as they are
2000 * directly (de)serialised from memory.
2003 /* ensure the header structure has packed to the correct size */
2004 BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
2006 /* ensure message structure does not exceed maximum length */
2007 BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
2009 /* mmal port struct is correct size */
2010 BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
2012 /* create a vchi instance */
2013 status = vchi_initialise(&vchi_instance);
2015 pr_err("Failed to initialise VCHI instance (status=%d)\n",
2020 status = vchi_connect(NULL, 0, vchi_instance);
2022 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
2026 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
2031 mutex_init(&instance->vchiq_mutex);
2032 mutex_init(&instance->bulk_mutex);
2034 instance->bulk_scratch = vmalloc(PAGE_SIZE);
2036 status = mmal_context_map_init(&instance->context_map);
2038 pr_err("Failed to init context map (status=%d)\n", status);
2043 params.callback_param = instance;
2045 status = vchi_service_open(vchi_instance, ¶ms, &instance->handle);
2047 pr_err("Failed to open VCHI service connection (status=%d)\n",
2049 goto err_close_services;
2052 vchi_service_release(instance->handle);
2054 *out_instance = instance;
2060 vchi_service_close(instance->handle);
2061 vfree(instance->bulk_scratch);