2 * cec-api.c - HDMI Consumer Electronics Control framework - API
4 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 #include <linux/errno.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/kmod.h>
25 #include <linux/ktime.h>
26 #include <linux/slab.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/uaccess.h>
31 #include <linux/version.h>
33 #include <media/cec-pin.h>
35 #include "cec-pin-priv.h"
37 static inline struct cec_devnode *cec_devnode_data(struct file *filp)
39 struct cec_fh *fh = filp->private_data;
41 return &fh->adap->devnode;
44 /* CEC file operations */
46 static __poll_t cec_poll(struct file *filp,
47 struct poll_table_struct *poll)
49 struct cec_devnode *devnode = cec_devnode_data(filp);
50 struct cec_fh *fh = filp->private_data;
51 struct cec_adapter *adap = fh->adap;
54 if (!devnode->registered)
55 return POLLERR | POLLHUP;
56 mutex_lock(&adap->lock);
57 if (adap->is_configured &&
58 adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
59 res |= POLLOUT | POLLWRNORM;
61 res |= POLLIN | POLLRDNORM;
62 if (fh->total_queued_events)
64 poll_wait(filp, &fh->wait, poll);
65 mutex_unlock(&adap->lock);
69 static bool cec_is_busy(const struct cec_adapter *adap,
70 const struct cec_fh *fh)
72 bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
73 bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
76 * Exclusive initiators and followers can always access the CEC adapter
78 if (valid_initiator || valid_follower)
81 * All others can only access the CEC adapter if there is no
82 * exclusive initiator and they are in INITIATOR mode.
84 return adap->cec_initiator ||
85 fh->mode_initiator == CEC_MODE_NO_INITIATOR;
88 static long cec_adap_g_caps(struct cec_adapter *adap,
89 struct cec_caps __user *parg)
91 struct cec_caps caps = {};
93 strlcpy(caps.driver, adap->devnode.dev.parent->driver->name,
95 strlcpy(caps.name, adap->name, sizeof(caps.name));
96 caps.available_log_addrs = adap->available_log_addrs;
97 caps.capabilities = adap->capabilities;
98 caps.version = LINUX_VERSION_CODE;
99 if (copy_to_user(parg, &caps, sizeof(caps)))
104 static long cec_adap_g_phys_addr(struct cec_adapter *adap,
109 mutex_lock(&adap->lock);
110 phys_addr = adap->phys_addr;
111 mutex_unlock(&adap->lock);
112 if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
117 static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
118 bool block, __u16 __user *parg)
123 if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
125 if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
128 err = cec_phys_addr_validate(phys_addr, NULL, NULL);
131 mutex_lock(&adap->lock);
132 if (cec_is_busy(adap, fh))
135 __cec_s_phys_addr(adap, phys_addr, block);
136 mutex_unlock(&adap->lock);
140 static long cec_adap_g_log_addrs(struct cec_adapter *adap,
141 struct cec_log_addrs __user *parg)
143 struct cec_log_addrs log_addrs;
145 mutex_lock(&adap->lock);
146 log_addrs = adap->log_addrs;
147 if (!adap->is_configured)
148 memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
149 sizeof(log_addrs.log_addr));
150 mutex_unlock(&adap->lock);
152 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
157 static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
158 bool block, struct cec_log_addrs __user *parg)
160 struct cec_log_addrs log_addrs;
163 if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
165 if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
167 log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
168 CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
169 CEC_LOG_ADDRS_FL_CDC_ONLY;
170 mutex_lock(&adap->lock);
171 if (!adap->is_configuring &&
172 (!log_addrs.num_log_addrs || !adap->is_configured) &&
173 !cec_is_busy(adap, fh)) {
174 err = __cec_s_log_addrs(adap, &log_addrs, block);
176 log_addrs = adap->log_addrs;
178 mutex_unlock(&adap->lock);
181 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
186 static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
187 bool block, struct cec_msg __user *parg)
189 struct cec_msg msg = {};
192 if (!(adap->capabilities & CEC_CAP_TRANSMIT))
194 if (copy_from_user(&msg, parg, sizeof(msg)))
197 /* A CDC-Only device can only send CDC messages */
198 if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
199 (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
202 mutex_lock(&adap->lock);
203 if (adap->log_addrs.num_log_addrs == 0)
205 else if (adap->is_configuring)
207 else if (!adap->is_configured &&
208 (adap->needs_hpd || msg.msg[0] != 0xf0))
210 else if (cec_is_busy(adap, fh))
213 err = cec_transmit_msg_fh(adap, &msg, fh, block);
214 mutex_unlock(&adap->lock);
217 if (copy_to_user(parg, &msg, sizeof(msg)))
222 /* Called by CEC_RECEIVE: wait for a message to arrive */
223 static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
225 u32 timeout = msg->timeout;
229 mutex_lock(&fh->lock);
230 /* Are there received messages queued up? */
231 if (fh->queued_msgs) {
232 /* Yes, return the first one */
233 struct cec_msg_entry *entry =
234 list_first_entry(&fh->msgs,
235 struct cec_msg_entry, list);
237 list_del(&entry->list);
241 mutex_unlock(&fh->lock);
242 /* restore original timeout value */
243 msg->timeout = timeout;
247 /* No, return EAGAIN in non-blocking mode or wait */
248 mutex_unlock(&fh->lock);
250 /* Return when in non-blocking mode */
255 /* The user specified a timeout */
256 res = wait_event_interruptible_timeout(fh->wait,
258 msecs_to_jiffies(msg->timeout));
264 /* Wait indefinitely */
265 res = wait_event_interruptible(fh->wait,
268 /* Exit on error, otherwise loop to get the new message */
273 static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
274 bool block, struct cec_msg __user *parg)
276 struct cec_msg msg = {};
279 if (copy_from_user(&msg, parg, sizeof(msg)))
282 err = cec_receive_msg(fh, &msg, block);
286 if (copy_to_user(parg, &msg, sizeof(msg)))
291 static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
292 bool block, struct cec_event __user *parg)
294 struct cec_event_entry *ev = NULL;
300 mutex_lock(&fh->lock);
301 while (!fh->total_queued_events && block) {
302 mutex_unlock(&fh->lock);
303 err = wait_event_interruptible(fh->wait,
304 fh->total_queued_events);
307 mutex_lock(&fh->lock);
310 /* Find the oldest event */
311 for (i = 0; i < CEC_NUM_EVENTS; i++) {
312 struct cec_event_entry *entry =
313 list_first_entry_or_null(&fh->events[i],
314 struct cec_event_entry, list);
316 if (entry && entry->ev.ts <= ts) {
329 if (copy_to_user(parg, &ev->ev, sizeof(ev->ev)))
331 if (ev_idx >= CEC_NUM_CORE_EVENTS)
333 fh->queued_events[ev_idx]--;
334 fh->total_queued_events--;
337 mutex_unlock(&fh->lock);
341 static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
344 u32 mode = fh->mode_initiator | fh->mode_follower;
346 if (copy_to_user(parg, &mode, sizeof(mode)))
351 static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
359 if (copy_from_user(&mode, parg, sizeof(mode)))
361 if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK)) {
362 dprintk(1, "%s: invalid mode bits set\n", __func__);
366 mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
367 mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
369 if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
370 mode_follower > CEC_MODE_MONITOR_ALL) {
371 dprintk(1, "%s: unknown mode\n", __func__);
375 if (mode_follower == CEC_MODE_MONITOR_ALL &&
376 !(adap->capabilities & CEC_CAP_MONITOR_ALL)) {
377 dprintk(1, "%s: MONITOR_ALL not supported\n", __func__);
381 if (mode_follower == CEC_MODE_MONITOR_PIN &&
382 !(adap->capabilities & CEC_CAP_MONITOR_PIN)) {
383 dprintk(1, "%s: MONITOR_PIN not supported\n", __func__);
387 /* Follower modes should always be able to send CEC messages */
388 if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
389 !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
390 mode_follower >= CEC_MODE_FOLLOWER &&
391 mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
392 dprintk(1, "%s: cannot transmit\n", __func__);
396 /* Monitor modes require CEC_MODE_NO_INITIATOR */
397 if (mode_initiator && mode_follower >= CEC_MODE_MONITOR_PIN) {
398 dprintk(1, "%s: monitor modes require NO_INITIATOR\n",
403 /* Monitor modes require CAP_NET_ADMIN */
404 if (mode_follower >= CEC_MODE_MONITOR_PIN && !capable(CAP_NET_ADMIN))
407 mutex_lock(&adap->lock);
409 * You can't become exclusive follower if someone else already
412 if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
413 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
414 adap->cec_follower && adap->cec_follower != fh)
417 * You can't become exclusive initiator if someone else already
420 if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
421 adap->cec_initiator && adap->cec_initiator != fh)
425 bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
426 bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
428 if (old_mon_all != new_mon_all) {
430 err = cec_monitor_all_cnt_inc(adap);
432 cec_monitor_all_cnt_dec(adap);
437 mutex_unlock(&adap->lock);
441 if (fh->mode_follower == CEC_MODE_FOLLOWER)
442 adap->follower_cnt--;
443 if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
444 adap->monitor_pin_cnt--;
445 if (mode_follower == CEC_MODE_FOLLOWER)
446 adap->follower_cnt++;
447 if (mode_follower == CEC_MODE_MONITOR_PIN) {
448 struct cec_event ev = {
449 .flags = CEC_EVENT_FL_INITIAL_STATE,
452 ev.event = adap->cec_pin_is_high ? CEC_EVENT_PIN_CEC_HIGH :
453 CEC_EVENT_PIN_CEC_LOW;
454 cec_queue_event_fh(fh, &ev, 0);
455 adap->monitor_pin_cnt++;
457 if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
458 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
460 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
461 adap->cec_follower = fh;
462 } else if (adap->cec_follower == fh) {
463 adap->passthrough = false;
464 adap->cec_follower = NULL;
466 if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
467 adap->cec_initiator = fh;
468 else if (adap->cec_initiator == fh)
469 adap->cec_initiator = NULL;
470 fh->mode_initiator = mode_initiator;
471 fh->mode_follower = mode_follower;
472 mutex_unlock(&adap->lock);
476 static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
478 struct cec_devnode *devnode = cec_devnode_data(filp);
479 struct cec_fh *fh = filp->private_data;
480 struct cec_adapter *adap = fh->adap;
481 bool block = !(filp->f_flags & O_NONBLOCK);
482 void __user *parg = (void __user *)arg;
484 if (!devnode->registered)
488 case CEC_ADAP_G_CAPS:
489 return cec_adap_g_caps(adap, parg);
491 case CEC_ADAP_G_PHYS_ADDR:
492 return cec_adap_g_phys_addr(adap, parg);
494 case CEC_ADAP_S_PHYS_ADDR:
495 return cec_adap_s_phys_addr(adap, fh, block, parg);
497 case CEC_ADAP_G_LOG_ADDRS:
498 return cec_adap_g_log_addrs(adap, parg);
500 case CEC_ADAP_S_LOG_ADDRS:
501 return cec_adap_s_log_addrs(adap, fh, block, parg);
504 return cec_transmit(adap, fh, block, parg);
507 return cec_receive(adap, fh, block, parg);
510 return cec_dqevent(adap, fh, block, parg);
513 return cec_g_mode(adap, fh, parg);
516 return cec_s_mode(adap, fh, parg);
523 static int cec_open(struct inode *inode, struct file *filp)
525 struct cec_devnode *devnode =
526 container_of(inode->i_cdev, struct cec_devnode, cdev);
527 struct cec_adapter *adap = to_cec_adapter(devnode);
528 struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
530 * Initial events that are automatically sent when the cec device is
533 struct cec_event ev = {
534 .event = CEC_EVENT_STATE_CHANGE,
535 .flags = CEC_EVENT_FL_INITIAL_STATE,
543 INIT_LIST_HEAD(&fh->msgs);
544 INIT_LIST_HEAD(&fh->xfer_list);
545 for (i = 0; i < CEC_NUM_EVENTS; i++)
546 INIT_LIST_HEAD(&fh->events[i]);
547 mutex_init(&fh->lock);
548 init_waitqueue_head(&fh->wait);
550 fh->mode_initiator = CEC_MODE_INITIATOR;
553 err = cec_get_device(devnode);
559 mutex_lock(&devnode->lock);
560 if (list_empty(&devnode->fhs) &&
562 adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
563 err = adap->ops->adap_enable(adap, true);
565 mutex_unlock(&devnode->lock);
570 filp->private_data = fh;
572 /* Queue up initial state events */
573 ev.state_change.phys_addr = adap->phys_addr;
574 ev.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
575 cec_queue_event_fh(fh, &ev, 0);
576 #ifdef CONFIG_CEC_PIN
577 if (adap->pin && adap->pin->ops->read_hpd) {
578 err = adap->pin->ops->read_hpd(adap);
580 ev.event = err ? CEC_EVENT_PIN_HPD_HIGH :
581 CEC_EVENT_PIN_HPD_LOW;
582 cec_queue_event_fh(fh, &ev, 0);
587 list_add(&fh->list, &devnode->fhs);
588 mutex_unlock(&devnode->lock);
593 /* Override for the release function */
594 static int cec_release(struct inode *inode, struct file *filp)
596 struct cec_devnode *devnode = cec_devnode_data(filp);
597 struct cec_adapter *adap = to_cec_adapter(devnode);
598 struct cec_fh *fh = filp->private_data;
601 mutex_lock(&adap->lock);
602 if (adap->cec_initiator == fh)
603 adap->cec_initiator = NULL;
604 if (adap->cec_follower == fh) {
605 adap->cec_follower = NULL;
606 adap->passthrough = false;
608 if (fh->mode_follower == CEC_MODE_FOLLOWER)
609 adap->follower_cnt--;
610 if (fh->mode_follower == CEC_MODE_MONITOR_PIN)
611 adap->monitor_pin_cnt--;
612 if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
613 cec_monitor_all_cnt_dec(adap);
614 mutex_unlock(&adap->lock);
616 mutex_lock(&devnode->lock);
618 if (list_empty(&devnode->fhs) &&
620 adap->phys_addr == CEC_PHYS_ADDR_INVALID) {
621 WARN_ON(adap->ops->adap_enable(adap, false));
623 mutex_unlock(&devnode->lock);
625 /* Unhook pending transmits from this filehandle. */
626 mutex_lock(&adap->lock);
627 while (!list_empty(&fh->xfer_list)) {
628 struct cec_data *data =
629 list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
631 data->blocking = false;
633 list_del(&data->xfer_list);
635 mutex_unlock(&adap->lock);
636 while (!list_empty(&fh->msgs)) {
637 struct cec_msg_entry *entry =
638 list_first_entry(&fh->msgs, struct cec_msg_entry, list);
640 list_del(&entry->list);
643 for (i = CEC_NUM_CORE_EVENTS; i < CEC_NUM_EVENTS; i++) {
644 while (!list_empty(&fh->events[i])) {
645 struct cec_event_entry *entry =
646 list_first_entry(&fh->events[i],
647 struct cec_event_entry, list);
649 list_del(&entry->list);
655 cec_put_device(devnode);
656 filp->private_data = NULL;
660 const struct file_operations cec_devnode_fops = {
661 .owner = THIS_MODULE,
663 .unlocked_ioctl = cec_ioctl,
664 .release = cec_release,