1 // SPDX-License-Identifier: GPL-2.0
3 * Broadcom BM2835 V4L2 driver
5 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
7 * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
8 * Dave Stevenson <dsteve@broadcom.com>
9 * Simon Mellor <simellor@broadcom.com>
10 * Luke Diamand <luked@broadcom.com>
12 * V4L2 driver MMAL vchiq interface code
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/mutex.h>
21 #include <linux/slab.h>
22 #include <linux/completion.h>
23 #include <linux/vmalloc.h>
24 #include <asm/cacheflush.h>
25 #include <media/videobuf2-vmalloc.h>
27 #include "mmal-common.h"
28 #include "mmal-vchiq.h"
32 #include "interface/vchi/vchi.h"
34 /* maximum number of components supported */
35 #define VCHIQ_MMAL_MAX_COMPONENTS 4
37 /*#define FULL_MSG_DUMP 1*/
40 static const char *const msg_type_names[] = {
58 "GET_CORE_STATS_FOR_PORT",
62 "OPAQUE_ALLOCATOR_DESC",
65 "BUFFER_FROM_HOST_ZEROLEN",
71 static const char *const port_action_type_names[] = {
82 #if defined(FULL_MSG_DUMP)
83 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
85 pr_debug(TITLE" type:%s(%d) length:%d\n", \
86 msg_type_names[(MSG)->h.type], \
87 (MSG)->h.type, (MSG_LEN)); \
88 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
90 sizeof(struct mmal_msg_header), 1); \
91 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
93 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
94 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
97 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
99 pr_debug(TITLE" type:%s(%d) length:%d\n", \
100 msg_type_names[(MSG)->h.type], \
101 (MSG)->h.type, (MSG_LEN)); \
105 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
108 struct vchiq_mmal_instance;
110 /* normal message context */
111 struct mmal_msg_context {
112 struct vchiq_mmal_instance *instance;
114 /* Index in the context_map idr so that we can find the
115 * mmal_msg_context again when servicing the VCHI reply.
121 /* work struct for defered callback - must come first */
122 struct work_struct work;
124 struct vchiq_mmal_instance *instance;
126 struct vchiq_mmal_port *port;
127 /* actual buffer used to store bulk reply */
128 struct mmal_buffer *buffer;
129 /* amount of buffer used */
130 unsigned long buffer_used;
131 /* MMAL buffer flags */
133 /* Presentation and Decode timestamps */
137 int status; /* context status */
139 } bulk; /* bulk data */
142 /* message handle to release */
143 VCHI_HELD_MSG_T msg_handle;
144 /* pointer to received message */
145 struct mmal_msg *msg;
146 /* received message length */
148 /* completion upon reply */
149 struct completion cmplt;
150 } sync; /* synchronous response */
155 struct vchiq_mmal_instance {
156 VCHI_SERVICE_HANDLE_T handle;
158 /* ensure serialised access to service */
159 struct mutex vchiq_mutex;
161 /* vmalloc page to receive scratch bulk xfers into */
164 struct idr context_map;
165 /* protect accesses to context_map */
166 struct mutex context_map_lock;
168 /* component to use next */
170 struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
173 static struct mmal_msg_context *
174 get_msg_context(struct vchiq_mmal_instance *instance)
176 struct mmal_msg_context *msg_context;
179 /* todo: should this be allocated from a pool to avoid kzalloc */
180 msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
183 return ERR_PTR(-ENOMEM);
185 /* Create an ID that will be passed along with our message so
186 * that when we service the VCHI reply, we can look up what
187 * message is being replied to.
189 mutex_lock(&instance->context_map_lock);
190 handle = idr_alloc(&instance->context_map, msg_context,
192 mutex_unlock(&instance->context_map_lock);
196 return ERR_PTR(handle);
199 msg_context->instance = instance;
200 msg_context->handle = handle;
205 static struct mmal_msg_context *
206 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
208 return idr_find(&instance->context_map, handle);
212 release_msg_context(struct mmal_msg_context *msg_context)
214 struct vchiq_mmal_instance *instance = msg_context->instance;
216 mutex_lock(&instance->context_map_lock);
217 idr_remove(&instance->context_map, msg_context->handle);
218 mutex_unlock(&instance->context_map_lock);
222 /* deals with receipt of event to host message */
223 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
224 struct mmal_msg *msg, u32 msg_len)
226 pr_debug("unhandled event\n");
227 pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
228 msg->u.event_to_host.client_component,
229 msg->u.event_to_host.port_type,
230 msg->u.event_to_host.port_num,
231 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
234 /* workqueue scheduled callback
236 * we do this because it is important we do not call any other vchiq
237 * sync calls from witin the message delivery thread
239 static void buffer_work_cb(struct work_struct *work)
241 struct mmal_msg_context *msg_context =
242 container_of(work, struct mmal_msg_context, u.bulk.work);
244 atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
246 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
247 msg_context->u.bulk.port,
248 msg_context->u.bulk.status,
249 msg_context->u.bulk.buffer,
250 msg_context->u.bulk.buffer_used,
251 msg_context->u.bulk.mmal_flags,
252 msg_context->u.bulk.dts,
253 msg_context->u.bulk.pts);
257 /* enqueue a bulk receive for a given message context */
258 static int bulk_receive(struct vchiq_mmal_instance *instance,
259 struct mmal_msg *msg,
260 struct mmal_msg_context *msg_context)
262 unsigned long rd_len;
265 rd_len = msg->u.buffer_from_host.buffer_header.length;
267 if (!msg_context->u.bulk.buffer) {
268 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
270 /* todo: this is a serious error, we should never have
271 * committed a buffer_to_host operation to the mmal
272 * port without the buffer to back it up (underflow
273 * handling) and there is no obvious way to deal with
274 * this - how is the mmal servie going to react when
275 * we fail to do the xfer and reschedule a buffer when
276 * it arrives? perhaps a starved flag to indicate a
277 * waiting bulk receive?
283 /* ensure we do not overrun the available buffer */
284 if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
285 rd_len = msg_context->u.bulk.buffer->buffer_size;
286 pr_warn("short read as not enough receive buffer space\n");
287 /* todo: is this the correct response, what happens to
288 * the rest of the message data?
293 msg_context->u.bulk.buffer_used = rd_len;
294 msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
295 msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
297 /* queue the bulk submission */
298 vchi_service_use(instance->handle);
299 ret = vchi_bulk_queue_receive(instance->handle,
300 msg_context->u.bulk.buffer->buffer,
301 /* Actual receive needs to be a multiple
305 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
306 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
309 vchi_service_release(instance->handle);
314 /* enque a dummy bulk receive for a given message context */
315 static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
316 struct mmal_msg_context *msg_context)
320 /* zero length indicates this was a dummy transfer */
321 msg_context->u.bulk.buffer_used = 0;
323 /* queue the bulk submission */
324 vchi_service_use(instance->handle);
326 ret = vchi_bulk_queue_receive(instance->handle,
327 instance->bulk_scratch,
329 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
330 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
333 vchi_service_release(instance->handle);
338 /* data in message, memcpy from packet into output buffer */
339 static int inline_receive(struct vchiq_mmal_instance *instance,
340 struct mmal_msg *msg,
341 struct mmal_msg_context *msg_context)
343 memcpy(msg_context->u.bulk.buffer->buffer,
344 msg->u.buffer_from_host.short_data,
345 msg->u.buffer_from_host.payload_in_message);
347 msg_context->u.bulk.buffer_used =
348 msg->u.buffer_from_host.payload_in_message;
353 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
355 buffer_from_host(struct vchiq_mmal_instance *instance,
356 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
358 struct mmal_msg_context *msg_context;
365 pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
368 if (!buf->msg_context) {
369 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
373 msg_context = buf->msg_context;
375 /* store bulk message context for when data arrives */
376 msg_context->u.bulk.instance = instance;
377 msg_context->u.bulk.port = port;
378 msg_context->u.bulk.buffer = buf;
379 msg_context->u.bulk.buffer_used = 0;
381 /* initialise work structure ready to schedule callback */
382 INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
384 atomic_inc(&port->buffers_with_vpu);
386 /* prep the buffer from host message */
387 memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
389 m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
390 m.h.magic = MMAL_MAGIC;
391 m.h.context = msg_context->handle;
394 /* drvbuf is our private data passed back */
395 m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
396 m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
397 m.u.buffer_from_host.drvbuf.port_handle = port->handle;
398 m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
401 m.u.buffer_from_host.buffer_header.cmd = 0;
402 m.u.buffer_from_host.buffer_header.data =
403 (u32)(unsigned long)buf->buffer;
404 m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
405 m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */
406 m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */
407 m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */
408 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
409 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
411 /* clear buffer type sepecific data */
412 memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
413 sizeof(m.u.buffer_from_host.buffer_header_type_specific));
415 /* no payload in message */
416 m.u.buffer_from_host.payload_in_message = 0;
418 vchi_service_use(instance->handle);
420 ret = vchi_queue_kernel_message(instance->handle,
422 sizeof(struct mmal_msg_header) +
423 sizeof(m.u.buffer_from_host));
425 vchi_service_release(instance->handle);
430 /* deals with receipt of buffer to host message */
431 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
432 struct mmal_msg *msg, u32 msg_len)
434 struct mmal_msg_context *msg_context;
437 pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
438 __func__, instance, msg, msg_len);
440 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
441 handle = msg->u.buffer_from_host.drvbuf.client_context;
442 msg_context = lookup_msg_context(instance, handle);
445 pr_err("drvbuf.client_context(%u) is invalid\n",
450 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
454 msg_context->u.bulk.mmal_flags =
455 msg->u.buffer_from_host.buffer_header.flags;
457 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
458 /* message reception had an error */
459 pr_warn("error %d in reply\n", msg->h.status);
461 msg_context->u.bulk.status = msg->h.status;
463 } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
465 if (msg->u.buffer_from_host.buffer_header.flags &
466 MMAL_BUFFER_HEADER_FLAG_EOS) {
467 msg_context->u.bulk.status =
468 dummy_bulk_receive(instance, msg_context);
469 if (msg_context->u.bulk.status == 0)
470 return; /* successful bulk submission, bulk
471 * completion will trigger callback
474 /* do callback with empty buffer - not EOS though */
475 msg_context->u.bulk.status = 0;
476 msg_context->u.bulk.buffer_used = 0;
478 } else if (msg->u.buffer_from_host.payload_in_message == 0) {
479 /* data is not in message, queue a bulk receive */
480 msg_context->u.bulk.status =
481 bulk_receive(instance, msg, msg_context);
482 if (msg_context->u.bulk.status == 0)
483 return; /* successful bulk submission, bulk
484 * completion will trigger callback
487 /* failed to submit buffer, this will end badly */
488 pr_err("error %d on bulk submission\n",
489 msg_context->u.bulk.status);
491 } else if (msg->u.buffer_from_host.payload_in_message <=
492 MMAL_VC_SHORT_DATA) {
493 /* data payload within message */
494 msg_context->u.bulk.status = inline_receive(instance, msg,
497 pr_err("message with invalid short payload\n");
500 msg_context->u.bulk.status = -EINVAL;
501 msg_context->u.bulk.buffer_used =
502 msg->u.buffer_from_host.payload_in_message;
505 /* schedule the port callback */
506 schedule_work(&msg_context->u.bulk.work);
509 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
510 struct mmal_msg_context *msg_context)
512 msg_context->u.bulk.status = 0;
514 /* schedule the port callback */
515 schedule_work(&msg_context->u.bulk.work);
518 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
519 struct mmal_msg_context *msg_context)
521 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
523 msg_context->u.bulk.status = -EINTR;
525 schedule_work(&msg_context->u.bulk.work);
528 /* incoming event service callback */
529 static void service_callback(void *param,
530 const VCHI_CALLBACK_REASON_T reason,
533 struct vchiq_mmal_instance *instance = param;
536 struct mmal_msg *msg;
537 VCHI_HELD_MSG_T msg_handle;
538 struct mmal_msg_context *msg_context;
541 pr_err("Message callback passed NULL instance\n");
546 case VCHI_CALLBACK_MSG_AVAILABLE:
547 status = vchi_msg_hold(instance->handle, (void **)&msg,
548 &msg_len, VCHI_FLAGS_NONE, &msg_handle);
550 pr_err("Unable to dequeue a message (%d)\n", status);
554 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
556 /* handling is different for buffer messages */
557 switch (msg->h.type) {
558 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
559 vchi_held_msg_release(&msg_handle);
562 case MMAL_MSG_TYPE_EVENT_TO_HOST:
563 event_to_host_cb(instance, msg, msg_len);
564 vchi_held_msg_release(&msg_handle);
568 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
569 buffer_to_host_cb(instance, msg, msg_len);
570 vchi_held_msg_release(&msg_handle);
574 /* messages dependent on header context to complete */
575 if (!msg->h.context) {
576 pr_err("received message context was null!\n");
577 vchi_held_msg_release(&msg_handle);
581 msg_context = lookup_msg_context(instance,
584 pr_err("received invalid message context %u!\n",
586 vchi_held_msg_release(&msg_handle);
590 /* fill in context values */
591 msg_context->u.sync.msg_handle = msg_handle;
592 msg_context->u.sync.msg = msg;
593 msg_context->u.sync.msg_len = msg_len;
595 /* todo: should this check (completion_done()
596 * == 1) for no one waiting? or do we need a
597 * flag to tell us the completion has been
598 * interrupted so we can free the message and
599 * its context. This probably also solves the
600 * message arriving after interruption todo
604 /* complete message so caller knows it happened */
605 complete(&msg_context->u.sync.cmplt);
611 case VCHI_CALLBACK_BULK_RECEIVED:
612 bulk_receive_cb(instance, bulk_ctx);
615 case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
616 bulk_abort_cb(instance, bulk_ctx);
619 case VCHI_CALLBACK_SERVICE_CLOSED:
620 /* TODO: consider if this requires action if received when
621 * driver is not explicitly closing the service
626 pr_err("Received unhandled message reason %d\n", reason);
631 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
632 struct mmal_msg *msg,
633 unsigned int payload_len,
634 struct mmal_msg **msg_out,
635 VCHI_HELD_MSG_T *msg_handle_out)
637 struct mmal_msg_context *msg_context;
639 unsigned long timeout;
641 /* payload size must not cause message to exceed max size */
643 (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
644 pr_err("payload length %d exceeds max:%d\n", payload_len,
645 (int)(MMAL_MSG_MAX_SIZE -
646 sizeof(struct mmal_msg_header)));
650 msg_context = get_msg_context(instance);
651 if (IS_ERR(msg_context))
652 return PTR_ERR(msg_context);
654 init_completion(&msg_context->u.sync.cmplt);
656 msg->h.magic = MMAL_MAGIC;
657 msg->h.context = msg_context->handle;
660 DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
663 vchi_service_use(instance->handle);
665 ret = vchi_queue_kernel_message(instance->handle,
667 sizeof(struct mmal_msg_header) +
670 vchi_service_release(instance->handle);
673 pr_err("error %d queuing message\n", ret);
674 release_msg_context(msg_context);
678 timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
681 pr_err("timed out waiting for sync completion\n");
683 /* todo: what happens if the message arrives after aborting */
684 release_msg_context(msg_context);
688 *msg_out = msg_context->u.sync.msg;
689 *msg_handle_out = msg_context->u.sync.msg_handle;
690 release_msg_context(msg_context);
695 static void dump_port_info(struct vchiq_mmal_port *port)
697 pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
699 pr_debug("buffer minimum num:%d size:%d align:%d\n",
700 port->minimum_buffer.num,
701 port->minimum_buffer.size, port->minimum_buffer.alignment);
703 pr_debug("buffer recommended num:%d size:%d align:%d\n",
704 port->recommended_buffer.num,
705 port->recommended_buffer.size,
706 port->recommended_buffer.alignment);
708 pr_debug("buffer current values num:%d size:%d align:%d\n",
709 port->current_buffer.num,
710 port->current_buffer.size, port->current_buffer.alignment);
712 pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
714 port->format.encoding, port->format.encoding_variant);
716 pr_debug(" bitrate:%d flags:0x%x\n",
717 port->format.bitrate, port->format.flags);
719 if (port->format.type == MMAL_ES_TYPE_VIDEO) {
721 ("es video format: width:%d height:%d colourspace:0x%x\n",
722 port->es.video.width, port->es.video.height,
723 port->es.video.color_space);
725 pr_debug(" : crop xywh %d,%d,%d,%d\n",
726 port->es.video.crop.x,
727 port->es.video.crop.y,
728 port->es.video.crop.width, port->es.video.crop.height);
729 pr_debug(" : framerate %d/%d aspect %d/%d\n",
730 port->es.video.frame_rate.num,
731 port->es.video.frame_rate.den,
732 port->es.video.par.num, port->es.video.par.den);
736 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
738 /* todo do readonly fields need setting at all? */
739 p->type = port->type;
740 p->index = port->index;
742 p->is_enabled = port->enabled;
743 p->buffer_num_min = port->minimum_buffer.num;
744 p->buffer_size_min = port->minimum_buffer.size;
745 p->buffer_alignment_min = port->minimum_buffer.alignment;
746 p->buffer_num_recommended = port->recommended_buffer.num;
747 p->buffer_size_recommended = port->recommended_buffer.size;
749 /* only three writable fields in a port */
750 p->buffer_num = port->current_buffer.num;
751 p->buffer_size = port->current_buffer.size;
752 p->userdata = (u32)(unsigned long)port;
755 static int port_info_set(struct vchiq_mmal_instance *instance,
756 struct vchiq_mmal_port *port)
760 struct mmal_msg *rmsg;
761 VCHI_HELD_MSG_T rmsg_handle;
763 pr_debug("setting port info port %p\n", port);
766 dump_port_info(port);
768 m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
770 m.u.port_info_set.component_handle = port->component->handle;
771 m.u.port_info_set.port_type = port->type;
772 m.u.port_info_set.port_index = port->index;
774 port_to_mmal_msg(port, &m.u.port_info_set.port);
776 /* elementary stream format setup */
777 m.u.port_info_set.format.type = port->format.type;
778 m.u.port_info_set.format.encoding = port->format.encoding;
779 m.u.port_info_set.format.encoding_variant =
780 port->format.encoding_variant;
781 m.u.port_info_set.format.bitrate = port->format.bitrate;
782 m.u.port_info_set.format.flags = port->format.flags;
784 memcpy(&m.u.port_info_set.es, &port->es,
785 sizeof(union mmal_es_specific_format));
787 m.u.port_info_set.format.extradata_size = port->format.extradata_size;
788 memcpy(&m.u.port_info_set.extradata, port->format.extradata,
789 port->format.extradata_size);
791 ret = send_synchronous_mmal_msg(instance, &m,
792 sizeof(m.u.port_info_set),
793 &rmsg, &rmsg_handle);
797 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
798 /* got an unexpected message type in reply */
803 /* return operation status */
804 ret = -rmsg->u.port_info_get_reply.status;
806 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
807 port->component->handle, port->handle);
810 vchi_held_msg_release(&rmsg_handle);
815 /* use port info get message to retrieve port information */
816 static int port_info_get(struct vchiq_mmal_instance *instance,
817 struct vchiq_mmal_port *port)
821 struct mmal_msg *rmsg;
822 VCHI_HELD_MSG_T rmsg_handle;
825 m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
826 m.u.port_info_get.component_handle = port->component->handle;
827 m.u.port_info_get.port_type = port->type;
828 m.u.port_info_get.index = port->index;
830 ret = send_synchronous_mmal_msg(instance, &m,
831 sizeof(m.u.port_info_get),
832 &rmsg, &rmsg_handle);
836 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
837 /* got an unexpected message type in reply */
842 /* return operation status */
843 ret = -rmsg->u.port_info_get_reply.status;
844 if (ret != MMAL_MSG_STATUS_SUCCESS)
847 if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
848 port->enabled = false;
850 port->enabled = true;
852 /* copy the values out of the message */
853 port->handle = rmsg->u.port_info_get_reply.port_handle;
855 /* port type and index cached to use on port info set because
856 * it does not use a port handle
858 port->type = rmsg->u.port_info_get_reply.port_type;
859 port->index = rmsg->u.port_info_get_reply.port_index;
861 port->minimum_buffer.num =
862 rmsg->u.port_info_get_reply.port.buffer_num_min;
863 port->minimum_buffer.size =
864 rmsg->u.port_info_get_reply.port.buffer_size_min;
865 port->minimum_buffer.alignment =
866 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
868 port->recommended_buffer.alignment =
869 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
870 port->recommended_buffer.num =
871 rmsg->u.port_info_get_reply.port.buffer_num_recommended;
873 port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
874 port->current_buffer.size =
875 rmsg->u.port_info_get_reply.port.buffer_size;
878 port->format.type = rmsg->u.port_info_get_reply.format.type;
879 port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
880 port->format.encoding_variant =
881 rmsg->u.port_info_get_reply.format.encoding_variant;
882 port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
883 port->format.flags = rmsg->u.port_info_get_reply.format.flags;
885 /* elementary stream format */
887 &rmsg->u.port_info_get_reply.es,
888 sizeof(union mmal_es_specific_format));
889 port->format.es = &port->es;
891 port->format.extradata_size =
892 rmsg->u.port_info_get_reply.format.extradata_size;
893 memcpy(port->format.extradata,
894 rmsg->u.port_info_get_reply.extradata,
895 port->format.extradata_size);
897 pr_debug("received port info\n");
898 dump_port_info(port);
902 pr_debug("%s:result:%d component:0x%x port:%d\n",
903 __func__, ret, port->component->handle, port->handle);
905 vchi_held_msg_release(&rmsg_handle);
910 /* create comonent on vc */
911 static int create_component(struct vchiq_mmal_instance *instance,
912 struct vchiq_mmal_component *component,
917 struct mmal_msg *rmsg;
918 VCHI_HELD_MSG_T rmsg_handle;
920 /* build component create message */
921 m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
922 m.u.component_create.client_component = (u32)(unsigned long)component;
923 strncpy(m.u.component_create.name, name,
924 sizeof(m.u.component_create.name));
926 ret = send_synchronous_mmal_msg(instance, &m,
927 sizeof(m.u.component_create),
928 &rmsg, &rmsg_handle);
932 if (rmsg->h.type != m.h.type) {
933 /* got an unexpected message type in reply */
938 ret = -rmsg->u.component_create_reply.status;
939 if (ret != MMAL_MSG_STATUS_SUCCESS)
942 /* a valid component response received */
943 component->handle = rmsg->u.component_create_reply.component_handle;
944 component->inputs = rmsg->u.component_create_reply.input_num;
945 component->outputs = rmsg->u.component_create_reply.output_num;
946 component->clocks = rmsg->u.component_create_reply.clock_num;
948 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
950 component->inputs, component->outputs, component->clocks);
953 vchi_held_msg_release(&rmsg_handle);
958 /* destroys a component on vc */
959 static int destroy_component(struct vchiq_mmal_instance *instance,
960 struct vchiq_mmal_component *component)
964 struct mmal_msg *rmsg;
965 VCHI_HELD_MSG_T rmsg_handle;
967 m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
968 m.u.component_destroy.component_handle = component->handle;
970 ret = send_synchronous_mmal_msg(instance, &m,
971 sizeof(m.u.component_destroy),
972 &rmsg, &rmsg_handle);
976 if (rmsg->h.type != m.h.type) {
977 /* got an unexpected message type in reply */
982 ret = -rmsg->u.component_destroy_reply.status;
986 vchi_held_msg_release(&rmsg_handle);
991 /* enable a component on vc */
992 static int enable_component(struct vchiq_mmal_instance *instance,
993 struct vchiq_mmal_component *component)
997 struct mmal_msg *rmsg;
998 VCHI_HELD_MSG_T rmsg_handle;
1000 m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1001 m.u.component_enable.component_handle = component->handle;
1003 ret = send_synchronous_mmal_msg(instance, &m,
1004 sizeof(m.u.component_enable),
1005 &rmsg, &rmsg_handle);
1009 if (rmsg->h.type != m.h.type) {
1010 /* got an unexpected message type in reply */
1015 ret = -rmsg->u.component_enable_reply.status;
1018 vchi_held_msg_release(&rmsg_handle);
1023 /* disable a component on vc */
1024 static int disable_component(struct vchiq_mmal_instance *instance,
1025 struct vchiq_mmal_component *component)
1029 struct mmal_msg *rmsg;
1030 VCHI_HELD_MSG_T rmsg_handle;
1032 m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1033 m.u.component_disable.component_handle = component->handle;
1035 ret = send_synchronous_mmal_msg(instance, &m,
1036 sizeof(m.u.component_disable),
1037 &rmsg, &rmsg_handle);
1041 if (rmsg->h.type != m.h.type) {
1042 /* got an unexpected message type in reply */
1047 ret = -rmsg->u.component_disable_reply.status;
1051 vchi_held_msg_release(&rmsg_handle);
1056 /* get version of mmal implementation */
1057 static int get_version(struct vchiq_mmal_instance *instance,
1058 u32 *major_out, u32 *minor_out)
1062 struct mmal_msg *rmsg;
1063 VCHI_HELD_MSG_T rmsg_handle;
1065 m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1067 ret = send_synchronous_mmal_msg(instance, &m,
1068 sizeof(m.u.version),
1069 &rmsg, &rmsg_handle);
1073 if (rmsg->h.type != m.h.type) {
1074 /* got an unexpected message type in reply */
1079 *major_out = rmsg->u.version.major;
1080 *minor_out = rmsg->u.version.minor;
1083 vchi_held_msg_release(&rmsg_handle);
1088 /* do a port action with a port as a parameter */
1089 static int port_action_port(struct vchiq_mmal_instance *instance,
1090 struct vchiq_mmal_port *port,
1091 enum mmal_msg_port_action_type action_type)
1095 struct mmal_msg *rmsg;
1096 VCHI_HELD_MSG_T rmsg_handle;
1098 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1099 m.u.port_action_port.component_handle = port->component->handle;
1100 m.u.port_action_port.port_handle = port->handle;
1101 m.u.port_action_port.action = action_type;
1103 port_to_mmal_msg(port, &m.u.port_action_port.port);
1105 ret = send_synchronous_mmal_msg(instance, &m,
1106 sizeof(m.u.port_action_port),
1107 &rmsg, &rmsg_handle);
1111 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1112 /* got an unexpected message type in reply */
1117 ret = -rmsg->u.port_action_reply.status;
1119 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1121 ret, port->component->handle, port->handle,
1122 port_action_type_names[action_type], action_type);
1125 vchi_held_msg_release(&rmsg_handle);
1130 /* do a port action with handles as parameters */
1131 static int port_action_handle(struct vchiq_mmal_instance *instance,
1132 struct vchiq_mmal_port *port,
1133 enum mmal_msg_port_action_type action_type,
1134 u32 connect_component_handle,
1135 u32 connect_port_handle)
1139 struct mmal_msg *rmsg;
1140 VCHI_HELD_MSG_T rmsg_handle;
1142 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1144 m.u.port_action_handle.component_handle = port->component->handle;
1145 m.u.port_action_handle.port_handle = port->handle;
1146 m.u.port_action_handle.action = action_type;
1148 m.u.port_action_handle.connect_component_handle =
1149 connect_component_handle;
1150 m.u.port_action_handle.connect_port_handle = connect_port_handle;
1152 ret = send_synchronous_mmal_msg(instance, &m,
1153 sizeof(m.u.port_action_handle),
1154 &rmsg, &rmsg_handle);
1158 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1159 /* got an unexpected message type in reply */
1164 ret = -rmsg->u.port_action_reply.status;
1166 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1168 ret, port->component->handle, port->handle,
1169 port_action_type_names[action_type],
1170 action_type, connect_component_handle, connect_port_handle);
1173 vchi_held_msg_release(&rmsg_handle);
1178 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1179 struct vchiq_mmal_port *port,
1180 u32 parameter_id, void *value, u32 value_size)
1184 struct mmal_msg *rmsg;
1185 VCHI_HELD_MSG_T rmsg_handle;
1187 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1189 m.u.port_parameter_set.component_handle = port->component->handle;
1190 m.u.port_parameter_set.port_handle = port->handle;
1191 m.u.port_parameter_set.id = parameter_id;
1192 m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1193 memcpy(&m.u.port_parameter_set.value, value, value_size);
1195 ret = send_synchronous_mmal_msg(instance, &m,
1196 (4 * sizeof(u32)) + value_size,
1197 &rmsg, &rmsg_handle);
1201 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1202 /* got an unexpected message type in reply */
1207 ret = -rmsg->u.port_parameter_set_reply.status;
1209 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1211 ret, port->component->handle, port->handle, parameter_id);
1214 vchi_held_msg_release(&rmsg_handle);
1219 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1220 struct vchiq_mmal_port *port,
1221 u32 parameter_id, void *value, u32 *value_size)
1225 struct mmal_msg *rmsg;
1226 VCHI_HELD_MSG_T rmsg_handle;
1228 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1230 m.u.port_parameter_get.component_handle = port->component->handle;
1231 m.u.port_parameter_get.port_handle = port->handle;
1232 m.u.port_parameter_get.id = parameter_id;
1233 m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1235 ret = send_synchronous_mmal_msg(instance, &m,
1237 mmal_msg_port_parameter_get),
1238 &rmsg, &rmsg_handle);
1242 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1243 /* got an unexpected message type in reply */
1244 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1249 ret = -rmsg->u.port_parameter_get_reply.status;
1250 /* port_parameter_get_reply.size includes the header,
1251 * whilst *value_size doesn't.
1253 rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1255 if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1256 /* Copy only as much as we have space for
1257 * but report true size of parameter
1259 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1261 *value_size = rmsg->u.port_parameter_get_reply.size;
1263 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1264 rmsg->u.port_parameter_get_reply.size);
1266 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1267 ret, port->component->handle, port->handle, parameter_id);
1270 vchi_held_msg_release(&rmsg_handle);
1275 /* disables a port and drains buffers from it */
1276 static int port_disable(struct vchiq_mmal_instance *instance,
1277 struct vchiq_mmal_port *port)
1280 struct list_head *q, *buf_head;
1281 unsigned long flags = 0;
1286 port->enabled = false;
1288 ret = port_action_port(instance, port,
1289 MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1292 * Drain all queued buffers on port. This should only
1293 * apply to buffers that have been queued before the port
1294 * has been enabled. If the port has been enabled and buffers
1295 * passed, then the buffers should have been removed from this
1296 * list, and we should get the relevant callbacks via VCHIQ
1297 * to release the buffers.
1299 spin_lock_irqsave(&port->slock, flags);
1301 list_for_each_safe(buf_head, q, &port->buffers) {
1302 struct mmal_buffer *mmalbuf;
1304 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1307 if (port->buffer_cb)
1308 port->buffer_cb(instance,
1309 port, 0, mmalbuf, 0, 0,
1314 spin_unlock_irqrestore(&port->slock, flags);
1316 ret = port_info_get(instance, port);
1323 static int port_enable(struct vchiq_mmal_instance *instance,
1324 struct vchiq_mmal_port *port)
1326 unsigned int hdr_count;
1327 struct list_head *q, *buf_head;
1333 ret = port_action_port(instance, port,
1334 MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1338 port->enabled = true;
1340 if (port->buffer_cb) {
1341 /* send buffer headers to videocore */
1343 list_for_each_safe(buf_head, q, &port->buffers) {
1344 struct mmal_buffer *mmalbuf;
1346 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1348 ret = buffer_from_host(instance, port, mmalbuf);
1354 if (hdr_count > port->current_buffer.num)
1359 ret = port_info_get(instance, port);
1365 /* ------------------------------------------------------------------
1367 *------------------------------------------------------------------
1370 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1371 struct vchiq_mmal_port *port)
1375 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1378 ret = port_info_set(instance, port);
1380 goto release_unlock;
1382 /* read what has actually been set */
1383 ret = port_info_get(instance, port);
1386 mutex_unlock(&instance->vchiq_mutex);
1391 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1392 struct vchiq_mmal_port *port,
1393 u32 parameter, void *value, u32 value_size)
1397 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1400 ret = port_parameter_set(instance, port, parameter, value, value_size);
1402 mutex_unlock(&instance->vchiq_mutex);
1407 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1408 struct vchiq_mmal_port *port,
1409 u32 parameter, void *value, u32 *value_size)
1413 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1416 ret = port_parameter_get(instance, port, parameter, value, value_size);
1418 mutex_unlock(&instance->vchiq_mutex);
1425 * enables a port and queues buffers for satisfying callbacks if we
1426 * provide a callback handler
1428 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1429 struct vchiq_mmal_port *port,
1430 vchiq_mmal_buffer_cb buffer_cb)
1434 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1437 /* already enabled - noop */
1438 if (port->enabled) {
1443 port->buffer_cb = buffer_cb;
1445 ret = port_enable(instance, port);
1448 mutex_unlock(&instance->vchiq_mutex);
1453 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1454 struct vchiq_mmal_port *port)
1458 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1461 if (!port->enabled) {
1462 mutex_unlock(&instance->vchiq_mutex);
1466 ret = port_disable(instance, port);
1468 mutex_unlock(&instance->vchiq_mutex);
1473 /* ports will be connected in a tunneled manner so data buffers
1474 * are not handled by client.
1476 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1477 struct vchiq_mmal_port *src,
1478 struct vchiq_mmal_port *dst)
1482 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1485 /* disconnect ports if connected */
1486 if (src->connected) {
1487 ret = port_disable(instance, src);
1489 pr_err("failed disabling src port(%d)\n", ret);
1490 goto release_unlock;
1493 /* do not need to disable the destination port as they
1494 * are connected and it is done automatically
1497 ret = port_action_handle(instance, src,
1498 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1499 src->connected->component->handle,
1500 src->connected->handle);
1502 pr_err("failed disconnecting src port\n");
1503 goto release_unlock;
1505 src->connected->enabled = false;
1506 src->connected = NULL;
1510 /* do not make new connection */
1512 pr_debug("not making new connection\n");
1513 goto release_unlock;
1516 /* copy src port format to dst */
1517 dst->format.encoding = src->format.encoding;
1518 dst->es.video.width = src->es.video.width;
1519 dst->es.video.height = src->es.video.height;
1520 dst->es.video.crop.x = src->es.video.crop.x;
1521 dst->es.video.crop.y = src->es.video.crop.y;
1522 dst->es.video.crop.width = src->es.video.crop.width;
1523 dst->es.video.crop.height = src->es.video.crop.height;
1524 dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1525 dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1527 /* set new format */
1528 ret = port_info_set(instance, dst);
1530 pr_debug("setting port info failed\n");
1531 goto release_unlock;
1534 /* read what has actually been set */
1535 ret = port_info_get(instance, dst);
1537 pr_debug("read back port info failed\n");
1538 goto release_unlock;
1541 /* connect two ports together */
1542 ret = port_action_handle(instance, src,
1543 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1544 dst->component->handle, dst->handle);
1546 pr_debug("connecting port %d:%d to %d:%d failed\n",
1547 src->component->handle, src->handle,
1548 dst->component->handle, dst->handle);
1549 goto release_unlock;
1551 src->connected = dst;
1555 mutex_unlock(&instance->vchiq_mutex);
1560 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1561 struct vchiq_mmal_port *port,
1562 struct mmal_buffer *buffer)
1564 unsigned long flags = 0;
1567 ret = buffer_from_host(instance, port, buffer);
1568 if (ret == -EINVAL) {
1569 /* Port is disabled. Queue for when it is enabled. */
1570 spin_lock_irqsave(&port->slock, flags);
1571 list_add_tail(&buffer->list, &port->buffers);
1572 spin_unlock_irqrestore(&port->slock, flags);
1578 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1579 struct mmal_buffer *buf)
1581 struct mmal_msg_context *msg_context = get_msg_context(instance);
1583 if (IS_ERR(msg_context))
1584 return (PTR_ERR(msg_context));
1586 buf->msg_context = msg_context;
1590 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1592 struct mmal_msg_context *msg_context = buf->msg_context;
1595 release_msg_context(msg_context);
1596 buf->msg_context = NULL;
1601 /* Initialise a mmal component and its ports
1604 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1606 struct vchiq_mmal_component **component_out)
1609 int idx; /* port index */
1610 struct vchiq_mmal_component *component;
1612 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1615 if (instance->component_idx == VCHIQ_MMAL_MAX_COMPONENTS) {
1616 ret = -EINVAL; /* todo is this correct error? */
1620 component = &instance->component[instance->component_idx];
1622 ret = create_component(instance, component, name);
1626 /* ports info needs gathering */
1627 component->control.type = MMAL_PORT_TYPE_CONTROL;
1628 component->control.index = 0;
1629 component->control.component = component;
1630 spin_lock_init(&component->control.slock);
1631 INIT_LIST_HEAD(&component->control.buffers);
1632 ret = port_info_get(instance, &component->control);
1634 goto release_component;
1636 for (idx = 0; idx < component->inputs; idx++) {
1637 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1638 component->input[idx].index = idx;
1639 component->input[idx].component = component;
1640 spin_lock_init(&component->input[idx].slock);
1641 INIT_LIST_HEAD(&component->input[idx].buffers);
1642 ret = port_info_get(instance, &component->input[idx]);
1644 goto release_component;
1647 for (idx = 0; idx < component->outputs; idx++) {
1648 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1649 component->output[idx].index = idx;
1650 component->output[idx].component = component;
1651 spin_lock_init(&component->output[idx].slock);
1652 INIT_LIST_HEAD(&component->output[idx].buffers);
1653 ret = port_info_get(instance, &component->output[idx]);
1655 goto release_component;
1658 for (idx = 0; idx < component->clocks; idx++) {
1659 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1660 component->clock[idx].index = idx;
1661 component->clock[idx].component = component;
1662 spin_lock_init(&component->clock[idx].slock);
1663 INIT_LIST_HEAD(&component->clock[idx].buffers);
1664 ret = port_info_get(instance, &component->clock[idx]);
1666 goto release_component;
1669 instance->component_idx++;
1671 *component_out = component;
1673 mutex_unlock(&instance->vchiq_mutex);
1678 destroy_component(instance, component);
1680 mutex_unlock(&instance->vchiq_mutex);
1686 * cause a mmal component to be destroyed
1688 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1689 struct vchiq_mmal_component *component)
1693 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1696 if (component->enabled)
1697 ret = disable_component(instance, component);
1699 ret = destroy_component(instance, component);
1701 mutex_unlock(&instance->vchiq_mutex);
1707 * cause a mmal component to be enabled
1709 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1710 struct vchiq_mmal_component *component)
1714 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1717 if (component->enabled) {
1718 mutex_unlock(&instance->vchiq_mutex);
1722 ret = enable_component(instance, component);
1724 component->enabled = true;
1726 mutex_unlock(&instance->vchiq_mutex);
1732 * cause a mmal component to be enabled
1734 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1735 struct vchiq_mmal_component *component)
1739 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1742 if (!component->enabled) {
1743 mutex_unlock(&instance->vchiq_mutex);
1747 ret = disable_component(instance, component);
1749 component->enabled = false;
1751 mutex_unlock(&instance->vchiq_mutex);
1756 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1757 u32 *major_out, u32 *minor_out)
1761 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1764 ret = get_version(instance, major_out, minor_out);
1766 mutex_unlock(&instance->vchiq_mutex);
1771 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1778 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1781 vchi_service_use(instance->handle);
1783 status = vchi_service_close(instance->handle);
1785 pr_err("mmal-vchiq: VCHIQ close failed\n");
1787 mutex_unlock(&instance->vchiq_mutex);
1789 vfree(instance->bulk_scratch);
1791 idr_destroy(&instance->context_map);
1798 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1801 struct vchiq_mmal_instance *instance;
1802 static VCHI_CONNECTION_T *vchi_connection;
1803 static VCHI_INSTANCE_T vchi_instance;
1804 SERVICE_CREATION_T params = {
1805 .version = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
1806 .service_id = VC_MMAL_SERVER_NAME,
1807 .connection = vchi_connection,
1810 .callback = service_callback,
1811 .callback_param = NULL,
1812 .want_unaligned_bulk_rx = 1,
1813 .want_unaligned_bulk_tx = 1,
1817 /* compile time checks to ensure structure size as they are
1818 * directly (de)serialised from memory.
1821 /* ensure the header structure has packed to the correct size */
1822 BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1824 /* ensure message structure does not exceed maximum length */
1825 BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1827 /* mmal port struct is correct size */
1828 BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1830 /* create a vchi instance */
1831 status = vchi_initialise(&vchi_instance);
1833 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1838 status = vchi_connect(NULL, 0, vchi_instance);
1840 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1844 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1849 mutex_init(&instance->vchiq_mutex);
1851 instance->bulk_scratch = vmalloc(PAGE_SIZE);
1853 mutex_init(&instance->context_map_lock);
1854 idr_init_base(&instance->context_map, 1);
1856 params.callback_param = instance;
1858 status = vchi_service_open(vchi_instance, ¶ms, &instance->handle);
1860 pr_err("Failed to open VCHI service connection (status=%d)\n",
1862 goto err_close_services;
1865 vchi_service_release(instance->handle);
1867 *out_instance = instance;
1873 vchi_service_close(instance->handle);
1874 vfree(instance->bulk_scratch);