1 // SPDX-License-Identifier: GPL-2.0-only
3 * cec-api.c - HDMI Consumer Electronics Control framework - API
5 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
8 #include <linux/errno.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/kmod.h>
13 #include <linux/ktime.h>
14 #include <linux/slab.h>
16 #include <linux/string.h>
17 #include <linux/types.h>
18 #include <linux/uaccess.h>
19 #include <linux/version.h>
21 #include <media/cec-pin.h>
23 #include "cec-pin-priv.h"
25 static inline struct cec_devnode *cec_devnode_data(struct file *filp)
27 struct cec_fh *fh = filp->private_data;
29 return &fh->adap->devnode;
32 /* CEC file operations */
34 static __poll_t cec_poll(struct file *filp,
35 struct poll_table_struct *poll)
37 struct cec_fh *fh = filp->private_data;
38 struct cec_adapter *adap = fh->adap;
41 if (!cec_is_registered(adap))
42 return EPOLLERR | EPOLLHUP;
43 mutex_lock(&adap->lock);
44 if (adap->is_configured &&
45 adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
46 res |= EPOLLOUT | EPOLLWRNORM;
48 res |= EPOLLIN | EPOLLRDNORM;
49 if (fh->total_queued_events)
51 poll_wait(filp, &fh->wait, poll);
52 mutex_unlock(&adap->lock);
56 static bool cec_is_busy(const struct cec_adapter *adap,
57 const struct cec_fh *fh)
59 bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
60 bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
63 * Exclusive initiators and followers can always access the CEC adapter
65 if (valid_initiator || valid_follower)
68 * All others can only access the CEC adapter if there is no
69 * exclusive initiator and they are in INITIATOR mode.
71 return adap->cec_initiator ||
72 fh->mode_initiator == CEC_MODE_NO_INITIATOR;
75 static long cec_adap_g_caps(struct cec_adapter *adap,
76 struct cec_caps __user *parg)
78 struct cec_caps caps = {};
80 strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
82 strlcpy(caps.name, adap->name, sizeof(caps.name));
83 caps.available_log_addrs = adap->available_log_addrs;
84 caps.capabilities = adap->capabilities;
85 caps.version = LINUX_VERSION_CODE;
86 if (copy_to_user(parg, &caps, sizeof(caps)))
91 static long cec_adap_g_phys_addr(struct cec_adapter *adap,
96 mutex_lock(&adap->lock);
97 phys_addr = adap->phys_addr;
98 mutex_unlock(&adap->lock);
99 if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
104 static int cec_validate_phys_addr(u16 phys_addr)
108 if (phys_addr == CEC_PHYS_ADDR_INVALID)
110 for (i = 0; i < 16; i += 4)
111 if (phys_addr & (0xf << i))
115 for (i += 4; i < 16; i += 4)
116 if ((phys_addr & (0xf << i)) == 0)
121 static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
122 bool block, __u16 __user *parg)
127 if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
129 if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
132 err = cec_validate_phys_addr(phys_addr);
135 mutex_lock(&adap->lock);
136 if (cec_is_busy(adap, fh))
139 __cec_s_phys_addr(adap, phys_addr, block);
140 mutex_unlock(&adap->lock);
144 static long cec_adap_g_log_addrs(struct cec_adapter *adap,
145 struct cec_log_addrs __user *parg)
147 struct cec_log_addrs log_addrs;
149 mutex_lock(&adap->lock);
151 * We use memcpy here instead of assignment since there is a
152 * hole at the end of struct cec_log_addrs that an assignment
153 * might ignore. So when we do copy_to_user() we could leak
154 * one byte of memory.
156 memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
157 if (!adap->is_configured)
158 memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
159 sizeof(log_addrs.log_addr));
160 mutex_unlock(&adap->lock);
162 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
167 static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
168 bool block, struct cec_log_addrs __user *parg)
170 struct cec_log_addrs log_addrs;
173 if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
175 if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
177 log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
178 CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
179 CEC_LOG_ADDRS_FL_CDC_ONLY;
180 mutex_lock(&adap->lock);
181 if (!adap->is_configuring &&
182 (!log_addrs.num_log_addrs || !adap->is_configured) &&
183 !cec_is_busy(adap, fh)) {
184 err = __cec_s_log_addrs(adap, &log_addrs, block);
186 log_addrs = adap->log_addrs;
188 mutex_unlock(&adap->lock);
191 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
196 static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
197 bool block, struct cec_msg __user *parg)
199 struct cec_msg msg = {};
202 if (!(adap->capabilities & CEC_CAP_TRANSMIT))
204 if (copy_from_user(&msg, parg, sizeof(msg)))
207 /* A CDC-Only device can only send CDC messages */
208 if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
209 (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
212 mutex_lock(&adap->lock);
213 if (adap->log_addrs.num_log_addrs == 0)
215 else if (adap->is_configuring)
217 else if (!adap->is_configured &&
218 (adap->needs_hpd || msg.msg[0] != 0xf0))
220 else if (cec_is_busy(adap, fh))
223 err = cec_transmit_msg_fh(adap, &msg, fh, block);
224 mutex_unlock(&adap->lock);
227 if (copy_to_user(parg, &msg, sizeof(msg)))
232 /* Called by CEC_RECEIVE: wait for a message to arrive */
233 static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
235 u32 timeout = msg->timeout;
239 mutex_lock(&fh->lock);
240 /* Are there received messages queued up? */
241 if (fh->queued_msgs) {
242 /* Yes, return the first one */
243 struct cec_msg_entry *entry =
244 list_first_entry(&fh->msgs,
245 struct cec_msg_entry, list);
247 list_del(&entry->list);
251 mutex_unlock(&fh->lock);
252 /* restore original timeout value */
253 msg->timeout = timeout;
257 /* No, return EAGAIN in non-blocking mode or wait */
258 mutex_unlock(&fh->lock);
260 /* Return when in non-blocking mode */
265 /* The user specified a timeout */
266 res = wait_event_interruptible_timeout(fh->wait,
268 msecs_to_jiffies(msg->timeout));
274 /* Wait indefinitely */
275 res = wait_event_interruptible(fh->wait,
278 /* Exit on error, otherwise loop to get the new message */
283 static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
284 bool block, struct cec_msg __user *parg)
286 struct cec_msg msg = {};
289 if (copy_from_user(&msg, parg, sizeof(msg)))
292 err = cec_receive_msg(fh, &msg, block);
296 if (copy_to_user(parg, &msg, sizeof(msg)))
301 static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
302 bool block, struct cec_event __user *parg)
304 struct cec_event_entry *ev = NULL;
310 mutex_lock(&fh->lock);
311 while (!fh->total_queued_events && block) {
312 mutex_unlock(&fh->lock);
313 err = wait_event_interruptible(fh->wait,
314 fh->total_queued_events);
317 mutex_lock(&fh->lock);
320 /* Find the oldest event */
321 for (i = 0; i < CEC_NUM_EVENTS; i++) {
322 struct cec_event_entry *entry =
323 list_first_entry_or_null(&fh->events[i],
324 struct cec_event_entry, list);
326 if (entry && entry->ev.ts <= ts) {
339 if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
341 if (ev_idx >= CEC_NUM_CORE_EVENTS)
343 fh->queued_events[ev_idx]--;
344 fh->total_queued_events--;
347 mutex_unlock(&fh->lock);
351 static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
354 u32 mode = fh->mode_initiator | fh->mode_follower;
356 if (copy_to_user(parg, &mode, sizeof(mode)))
361 static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
367 bool send_pin_event = false;
370 if (copy_from_user(&mode, parg, sizeof(mode)))
372 if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
373 dprintk(1, "%s: invalid mode bits set\n", __func__);
377 mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
378 mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
380 if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
381 mode_follower > CEC_MODE_MONITOR_ALL) {
382 dprintk(1, "%s: unknown mode\n", __func__);
386 if (mode_follower == CEC_MODE_MONITOR_ALL &&
387 !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
388 dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
392 if (mode_follower == CEC_MODE_MONITOR_PIN &&
393 !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
394 dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
398 /* Follower modes should always be able to send CEC messages */
399 if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
400 !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
401 mode_follower >= CEC_MODE_FOLLOWER &&
402 mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
403 dprintk(1, "%s: cannot transmit\n", __func__);
407 /* Monitor modes require CEC_MODE_NO_INITIATOR */
408 if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
409 dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
414 /* Monitor modes require CAP_NET_ADMIN */
415 if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
418 mutex_lock(&adap->lock);
420 * You can't become exclusive follower if someone else already
423 if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
424 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
425 adap->cec_follower && adap->cec_follower != fh)
428 * You can't become exclusive initiator if someone else already
431 if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
432 adap->cec_initiator && adap->cec_initiator != fh)
436 bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
437 bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
439 if (old_mon_all != new_mon_all) {
441 err = cec_monitor_all_cnt_inc(adap);
443 cec_monitor_all_cnt_dec(adap);
448 bool old_mon_pin = fh->mode_follower == CEC_MODE_MONITOR_PIN;
449 bool new_mon_pin = mode_follower == CEC_MODE_MONITOR_PIN;
451 if (old_mon_pin != new_mon_pin) {
452 send_pin_event = new_mon_pin;
454 err = cec_monitor_pin_cnt_inc(adap);
456 cec_monitor_pin_cnt_dec(adap);
461 mutex_unlock(&adap->lock);
465 if (fh->mode_follower == CEC_MODE_FOLLOWER)
466 adap->follower_cnt--;
467 if (mode_follower == CEC_MODE_FOLLOWER)
468 adap->follower_cnt++;
469 if (send_pin_event) {
470 struct cec_event ev = {
471 .flags = CEC_EVENT_FL_INITIAL_STATE,
474 ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
475 CEC_EVENT_PIN_CEC_LOW;
476 cec_queue_event_fh(fh, &ev, 0);
478 if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
479 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
481 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
482 adap->cec_follower = fh;
483 } else if (adap->cec_follower == fh) {
484 adap->passthrough = false;
485 adap->cec_follower = NULL;
487 if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
488 adap->cec_initiator = fh;
489 else if (adap->cec_initiator == fh)
490 adap->cec_initiator = NULL;
491 fh->mode_initiator = mode_initiator;
492 fh->mode_follower = mode_follower;
493 mutex_unlock(&adap->lock);
497 static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
499 struct cec_fh *fh = filp->private_data;
500 struct cec_adapter *adap = fh->adap;
501 bool block = !(filp->f_flags & O_NONBLOCK);
502 void __user *parg = (void __user *)arg;
504 if (!cec_is_registered(adap))
508 case CEC_ADAP_G_CAPS:
509 return cec_adap_g_caps(adap, parg);
511 case CEC_ADAP_G_PHYS_ADDR:
512 return cec_adap_g_phys_addr(adap, parg);
514 case CEC_ADAP_S_PHYS_ADDR:
515 return cec_adap_s_phys_addr(adap, fh, block, parg);
517 case CEC_ADAP_G_LOG_ADDRS:
518 return cec_adap_g_log_addrs(adap, parg);
520 case CEC_ADAP_S_LOG_ADDRS:
521 return cec_adap_s_log_addrs(adap, fh, block, parg);
524 return cec_transmit(adap, fh, block, parg);
527 return cec_receive(adap, fh, block, parg);
530 return cec_dqevent(adap, fh, block, parg);
533 return cec_g_mode(adap, fh, parg);
536 return cec_s_mode(adap, fh, parg);
543 static int cec_open(struct inode *inode, struct file *filp)
545 struct cec_devnode *devnode =
546 container_of(inode->i_cdev, struct cec_devnode, cdev);
547 struct cec_adapter *adap = to_cec_adapter(devnode);
548 struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
550 * Initial events that are automatically sent when the cec device is
553 struct cec_event ev = {
554 .event = CEC_EVENT_STATE_CHANGE,
555 .flags = CEC_EVENT_FL_INITIAL_STATE,
563 INIT_LIST_HEAD(&fh->msgs);
564 INIT_LIST_HEAD(&fh->xfer_list);
565 for (i = 0; i < CEC_NUM_EVENTS; i++)
566 INIT_LIST_HEAD(&fh->events[i]);
567 mutex_init(&fh->lock);
568 init_waitqueue_head(&fh->wait);
570 fh->mode_initiator = CEC_MODE_INITIATOR;
573 err = cec_get_device(devnode);
579 mutex_lock(&devnode->lock);
580 if (list_empty(&devnode->fhs) &&
582 adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
583 err = adap->ops->adap_enable(adap, true);
585 mutex_unlock(&devnode->lock);
590 filp->private_data = fh;
592 /* Queue up initial state events */
593 ev.state_change.phys_addr = adap->phys_addr;
594 ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
595 cec_queue_event_fh(fh, &ev, 0);
596 #ifdef CONFIG_CEC_PIN
597 if (adap->pin && adap->pin->ops->read_hpd) {
598 err = adap->pin->ops->read_hpd(adap);
600 ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
601 CEC_EVENT_PIN_HPD_LOW;
602 cec_queue_event_fh(fh, &ev, 0);
605 if (adap->pin && adap->pin->ops->read_5v) {
606 err = adap->pin->ops->read_5v(adap);
608 ev.event = err ? CEC_EVENT_PIN_5V_HIGH :
609 CEC_EVENT_PIN_5V_LOW;
610 cec_queue_event_fh(fh, &ev, 0);
615 list_add(&fh->list, &devnode->fhs);
616 mutex_unlock(&devnode->lock);
621 /* Override for the release function */
622 static int cec_release(struct inode *inode, struct file *filp)
624 struct cec_devnode *devnode = cec_devnode_data(filp);
625 struct cec_adapter *adap = to_cec_adapter(devnode);
626 struct cec_fh *fh = filp->private_data;
629 mutex_lock(&adap->lock);
630 if (adap->cec_initiator == fh)
631 adap->cec_initiator = NULL;
632 if (adap->cec_follower == fh) {
633 adap->cec_follower = NULL;
634 adap->passthrough = false;
636 if (fh->mode_follower == CEC_MODE_FOLLOWER)
637 adap->follower_cnt--;
638 if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
639 cec_monitor_pin_cnt_dec(adap);
640 if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
641 cec_monitor_all_cnt_dec(adap);
642 mutex_unlock(&adap->lock);
644 mutex_lock(&devnode->lock);
646 if (cec_is_registered(adap) && list_empty(&devnode->fhs) &&
647 !adap->needs_hpd && adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
648 WARN_ON(adap->ops->adap_enable(adap, false));
650 mutex_unlock(&devnode->lock);
652 /* Unhook pending transmits from this filehandle. */
653 mutex_lock(&adap->lock);
654 while (!list_empty(&fh->xfer_list)) {
655 struct cec_data *data =
656 list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
658 data->blocking = false;
660 list_del(&data->xfer_list);
662 mutex_unlock(&adap->lock);
663 while (!list_empty(&fh->msgs)) {
664 struct cec_msg_entry *entry =
665 list_first_entry(&fh->msgs, struct cec_msg_entry, list);
667 list_del(&entry->list);
670 for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
671 while (!list_empty(&fh->events[i])) {
672 struct cec_event_entry *entry =
673 list_first_entry(&fh->events[i],
674 struct cec_event_entry, list);
676 list_del(&entry->list);
682 cec_put_device(devnode);
683 filp->private_data = NULL;
687 const struct file_operations cec_devnode_fops = {
688 .owner = THIS_MODULE,
690 .unlocked_ioctl = cec_ioctl,
691 .release = cec_release,