blob: 597fbb62d82927bd7b489b34cad8f5d1a778d154 [file] [log] [blame]
Hans Verkuilca684382016-06-25 09:44:43 -03001/*
2 * cec-api.c - HDMI Consumer Electronics Control framework - API
3 *
4 * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5 *
6 * This program is free software; you may redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
17 * SOFTWARE.
18 */
19
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/kmod.h>
25#include <linux/ktime.h>
26#include <linux/slab.h>
27#include <linux/mm.h>
28#include <linux/string.h>
29#include <linux/types.h>
30#include <linux/uaccess.h>
31#include <linux/version.h>
32
33#include "cec-priv.h"
34
35static inline struct cec_devnode *cec_devnode_data(struct file *filp)
36{
37 struct cec_fh *fh = filp->private_data;
38
39 return &fh->adap->devnode;
40}
41
42/* CEC file operations */
43
44static unsigned int cec_poll(struct file *filp,
45 struct poll_table_struct *poll)
46{
47 struct cec_devnode *devnode = cec_devnode_data(filp);
48 struct cec_fh *fh = filp->private_data;
49 struct cec_adapter *adap = fh->adap;
50 unsigned int res = 0;
51
52 if (!devnode->registered)
53 return POLLERR | POLLHUP;
54 mutex_lock(&adap->lock);
Hans Verkuilb7cbc892016-07-17 13:02:44 -030055 if (adap->is_configured &&
56 adap->transmit_queue_sz < CEC_MAX_MSG_TX_QUEUE_SZ)
Hans Verkuilca684382016-06-25 09:44:43 -030057 res |= POLLOUT | POLLWRNORM;
58 if (fh->queued_msgs)
59 res |= POLLIN | POLLRDNORM;
60 if (fh->pending_events)
61 res |= POLLPRI;
62 poll_wait(filp, &fh->wait, poll);
63 mutex_unlock(&adap->lock);
64 return res;
65}
66
67static bool cec_is_busy(const struct cec_adapter *adap,
68 const struct cec_fh *fh)
69{
70 bool valid_initiator = adap->cec_initiator && adap->cec_initiator == fh;
71 bool valid_follower = adap->cec_follower && adap->cec_follower == fh;
72
73 /*
74 * Exclusive initiators and followers can always access the CEC adapter
75 */
76 if (valid_initiator || valid_follower)
77 return false;
78 /*
79 * All others can only access the CEC adapter if there is no
80 * exclusive initiator and they are in INITIATOR mode.
81 */
82 return adap->cec_initiator ||
83 fh->mode_initiator == CEC_MODE_NO_INITIATOR;
84}
85
86static long cec_adap_g_caps(struct cec_adapter *adap,
87 struct cec_caps __user *parg)
88{
89 struct cec_caps caps = {};
90
91 strlcpy(caps.driver, adap->devnode.parent->driver->name,
92 sizeof(caps.driver));
93 strlcpy(caps.name, adap->name, sizeof(caps.name));
94 caps.available_log_addrs = adap->available_log_addrs;
95 caps.capabilities = adap->capabilities;
96 caps.version = LINUX_VERSION_CODE;
97 if (copy_to_user(parg, &caps, sizeof(caps)))
98 return -EFAULT;
99 return 0;
100}
101
102static long cec_adap_g_phys_addr(struct cec_adapter *adap,
103 __u16 __user *parg)
104{
105 u16 phys_addr;
106
107 mutex_lock(&adap->lock);
108 phys_addr = adap->phys_addr;
109 mutex_unlock(&adap->lock);
110 if (copy_to_user(parg, &phys_addr, sizeof(phys_addr)))
111 return -EFAULT;
112 return 0;
113}
114
115static long cec_adap_s_phys_addr(struct cec_adapter *adap, struct cec_fh *fh,
116 bool block, __u16 __user *parg)
117{
118 u16 phys_addr;
119 long err;
120
121 if (!(adap->capabilities & CEC_CAP_PHYS_ADDR))
122 return -ENOTTY;
123 if (copy_from_user(&phys_addr, parg, sizeof(phys_addr)))
124 return -EFAULT;
125
126 err = cec_phys_addr_validate(phys_addr, NULL, NULL);
127 if (err)
128 return err;
129 mutex_lock(&adap->lock);
130 if (cec_is_busy(adap, fh))
131 err = -EBUSY;
132 else
133 __cec_s_phys_addr(adap, phys_addr, block);
134 mutex_unlock(&adap->lock);
135 return err;
136}
137
138static long cec_adap_g_log_addrs(struct cec_adapter *adap,
139 struct cec_log_addrs __user *parg)
140{
141 struct cec_log_addrs log_addrs;
142
143 mutex_lock(&adap->lock);
144 log_addrs = adap->log_addrs;
145 if (!adap->is_configured)
146 memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
147 sizeof(log_addrs.log_addr));
148 mutex_unlock(&adap->lock);
149
150 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
151 return -EFAULT;
152 return 0;
153}
154
155static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
156 bool block, struct cec_log_addrs __user *parg)
157{
158 struct cec_log_addrs log_addrs;
159 long err = -EBUSY;
160
161 if (!(adap->capabilities & CEC_CAP_LOG_ADDRS))
162 return -ENOTTY;
163 if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
164 return -EFAULT;
Hans Verkuilf4062622016-11-01 07:59:34 -0200165 log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK |
Hans Verkuila69a1682016-11-02 07:41:41 -0200166 CEC_LOG_ADDRS_FL_ALLOW_RC_PASSTHRU |
167 CEC_LOG_ADDRS_FL_CDC_ONLY;
Hans Verkuilca684382016-06-25 09:44:43 -0300168 mutex_lock(&adap->lock);
169 if (!adap->is_configuring &&
170 (!log_addrs.num_log_addrs || !adap->is_configured) &&
171 !cec_is_busy(adap, fh)) {
172 err = __cec_s_log_addrs(adap, &log_addrs, block);
173 if (!err)
174 log_addrs = adap->log_addrs;
175 }
176 mutex_unlock(&adap->lock);
177 if (err)
178 return err;
179 if (copy_to_user(parg, &log_addrs, sizeof(log_addrs)))
180 return -EFAULT;
181 return 0;
182}
183
184static long cec_transmit(struct cec_adapter *adap, struct cec_fh *fh,
185 bool block, struct cec_msg __user *parg)
186{
187 struct cec_msg msg = {};
188 long err = 0;
189
190 if (!(adap->capabilities & CEC_CAP_TRANSMIT))
191 return -ENOTTY;
192 if (copy_from_user(&msg, parg, sizeof(msg)))
193 return -EFAULT;
Hans Verkuila69a1682016-11-02 07:41:41 -0200194
195 /* A CDC-Only device can only send CDC messages */
196 if ((adap->log_addrs.flags & CEC_LOG_ADDRS_FL_CDC_ONLY) &&
197 (msg.len == 1 || msg.msg[1] != CEC_MSG_CDC_MESSAGE))
198 return -EINVAL;
199
Hans Verkuilca684382016-06-25 09:44:43 -0300200 mutex_lock(&adap->lock);
Hans Verkuil4eef4042016-07-16 09:59:18 -0300201 if (!adap->is_configured)
Hans Verkuilca684382016-06-25 09:44:43 -0300202 err = -ENONET;
Hans Verkuil4eef4042016-07-16 09:59:18 -0300203 else if (cec_is_busy(adap, fh))
Hans Verkuilca684382016-06-25 09:44:43 -0300204 err = -EBUSY;
Hans Verkuil4eef4042016-07-16 09:59:18 -0300205 else
Hans Verkuilca684382016-06-25 09:44:43 -0300206 err = cec_transmit_msg_fh(adap, &msg, fh, block);
Hans Verkuilca684382016-06-25 09:44:43 -0300207 mutex_unlock(&adap->lock);
208 if (err)
209 return err;
210 if (copy_to_user(parg, &msg, sizeof(msg)))
211 return -EFAULT;
212 return 0;
213}
214
215/* Called by CEC_RECEIVE: wait for a message to arrive */
216static int cec_receive_msg(struct cec_fh *fh, struct cec_msg *msg, bool block)
217{
Hans Verkuile883b4d2016-07-16 07:58:31 -0300218 u32 timeout = msg->timeout;
Hans Verkuilca684382016-06-25 09:44:43 -0300219 int res;
220
221 do {
222 mutex_lock(&fh->lock);
223 /* Are there received messages queued up? */
224 if (fh->queued_msgs) {
225 /* Yes, return the first one */
226 struct cec_msg_entry *entry =
227 list_first_entry(&fh->msgs,
228 struct cec_msg_entry, list);
229
230 list_del(&entry->list);
231 *msg = entry->msg;
232 kfree(entry);
233 fh->queued_msgs--;
234 mutex_unlock(&fh->lock);
Hans Verkuile883b4d2016-07-16 07:58:31 -0300235 /* restore original timeout value */
236 msg->timeout = timeout;
Hans Verkuilca684382016-06-25 09:44:43 -0300237 return 0;
238 }
239
240 /* No, return EAGAIN in non-blocking mode or wait */
241 mutex_unlock(&fh->lock);
242
243 /* Return when in non-blocking mode */
244 if (!block)
245 return -EAGAIN;
246
247 if (msg->timeout) {
248 /* The user specified a timeout */
249 res = wait_event_interruptible_timeout(fh->wait,
250 fh->queued_msgs,
251 msecs_to_jiffies(msg->timeout));
252 if (res == 0)
253 res = -ETIMEDOUT;
254 else if (res > 0)
255 res = 0;
256 } else {
257 /* Wait indefinitely */
258 res = wait_event_interruptible(fh->wait,
259 fh->queued_msgs);
260 }
261 /* Exit on error, otherwise loop to get the new message */
262 } while (!res);
263 return res;
264}
265
266static long cec_receive(struct cec_adapter *adap, struct cec_fh *fh,
267 bool block, struct cec_msg __user *parg)
268{
269 struct cec_msg msg = {};
270 long err = 0;
271
272 if (copy_from_user(&msg, parg, sizeof(msg)))
273 return -EFAULT;
274 mutex_lock(&adap->lock);
Hans Verkuilca000332016-07-11 05:48:10 -0300275 if (!adap->is_configured && fh->mode_follower < CEC_MODE_MONITOR)
Hans Verkuilca684382016-06-25 09:44:43 -0300276 err = -ENONET;
277 mutex_unlock(&adap->lock);
278 if (err)
279 return err;
280
281 err = cec_receive_msg(fh, &msg, block);
282 if (err)
283 return err;
Hans Verkuil7ae2a882016-11-04 07:52:11 -0200284 msg.flags = 0;
Hans Verkuilca684382016-06-25 09:44:43 -0300285 if (copy_to_user(parg, &msg, sizeof(msg)))
286 return -EFAULT;
287 return 0;
288}
289
290static long cec_dqevent(struct cec_adapter *adap, struct cec_fh *fh,
291 bool block, struct cec_event __user *parg)
292{
293 struct cec_event *ev = NULL;
294 u64 ts = ~0ULL;
295 unsigned int i;
296 long err = 0;
297
298 mutex_lock(&fh->lock);
299 while (!fh->pending_events && block) {
300 mutex_unlock(&fh->lock);
301 err = wait_event_interruptible(fh->wait, fh->pending_events);
302 if (err)
303 return err;
304 mutex_lock(&fh->lock);
305 }
306
307 /* Find the oldest event */
308 for (i = 0; i < CEC_NUM_EVENTS; i++) {
309 if (fh->pending_events & (1 << (i + 1)) &&
310 fh->events[i].ts <= ts) {
311 ev = &fh->events[i];
312 ts = ev->ts;
313 }
314 }
315 if (!ev) {
316 err = -EAGAIN;
317 goto unlock;
318 }
319
320 if (copy_to_user(parg, ev, sizeof(*ev))) {
321 err = -EFAULT;
322 goto unlock;
323 }
324
325 fh->pending_events &= ~(1 << ev->event);
326
327unlock:
328 mutex_unlock(&fh->lock);
329 return err;
330}
331
332static long cec_g_mode(struct cec_adapter *adap, struct cec_fh *fh,
333 u32 __user *parg)
334{
335 u32 mode = fh->mode_initiator | fh->mode_follower;
336
337 if (copy_to_user(parg, &mode, sizeof(mode)))
338 return -EFAULT;
339 return 0;
340}
341
342static long cec_s_mode(struct cec_adapter *adap, struct cec_fh *fh,
343 u32 __user *parg)
344{
345 u32 mode;
346 u8 mode_initiator;
347 u8 mode_follower;
348 long err = 0;
349
350 if (copy_from_user(&mode, parg, sizeof(mode)))
351 return -EFAULT;
352 if (mode & ~(CEC_MODE_INITIATOR_MSK | CEC_MODE_FOLLOWER_MSK))
353 return -EINVAL;
354
355 mode_initiator = mode & CEC_MODE_INITIATOR_MSK;
356 mode_follower = mode & CEC_MODE_FOLLOWER_MSK;
357
358 if (mode_initiator > CEC_MODE_EXCL_INITIATOR ||
359 mode_follower > CEC_MODE_MONITOR_ALL)
360 return -EINVAL;
361
362 if (mode_follower == CEC_MODE_MONITOR_ALL &&
363 !(adap->capabilities & CEC_CAP_MONITOR_ALL))
364 return -EINVAL;
365
366 /* Follower modes should always be able to send CEC messages */
367 if ((mode_initiator == CEC_MODE_NO_INITIATOR ||
368 !(adap->capabilities & CEC_CAP_TRANSMIT)) &&
369 mode_follower >= CEC_MODE_FOLLOWER &&
370 mode_follower <= CEC_MODE_EXCL_FOLLOWER_PASSTHRU)
371 return -EINVAL;
372
373 /* Monitor modes require CEC_MODE_NO_INITIATOR */
374 if (mode_initiator && mode_follower >= CEC_MODE_MONITOR)
375 return -EINVAL;
376
377 /* Monitor modes require CAP_NET_ADMIN */
378 if (mode_follower >= CEC_MODE_MONITOR && !capable(CAP_NET_ADMIN))
379 return -EPERM;
380
381 mutex_lock(&adap->lock);
382 /*
383 * You can't become exclusive follower if someone else already
384 * has that job.
385 */
386 if ((mode_follower == CEC_MODE_EXCL_FOLLOWER ||
387 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) &&
388 adap->cec_follower && adap->cec_follower != fh)
389 err = -EBUSY;
390 /*
391 * You can't become exclusive initiator if someone else already
392 * has that job.
393 */
394 if (mode_initiator == CEC_MODE_EXCL_INITIATOR &&
395 adap->cec_initiator && adap->cec_initiator != fh)
396 err = -EBUSY;
397
398 if (!err) {
399 bool old_mon_all = fh->mode_follower == CEC_MODE_MONITOR_ALL;
400 bool new_mon_all = mode_follower == CEC_MODE_MONITOR_ALL;
401
402 if (old_mon_all != new_mon_all) {
403 if (new_mon_all)
404 err = cec_monitor_all_cnt_inc(adap);
405 else
406 cec_monitor_all_cnt_dec(adap);
407 }
408 }
409
410 if (err) {
411 mutex_unlock(&adap->lock);
412 return err;
413 }
414
415 if (fh->mode_follower == CEC_MODE_FOLLOWER)
416 adap->follower_cnt--;
417 if (mode_follower == CEC_MODE_FOLLOWER)
418 adap->follower_cnt++;
419 if (mode_follower == CEC_MODE_EXCL_FOLLOWER ||
420 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU) {
421 adap->passthrough =
422 mode_follower == CEC_MODE_EXCL_FOLLOWER_PASSTHRU;
423 adap->cec_follower = fh;
424 } else if (adap->cec_follower == fh) {
425 adap->passthrough = false;
426 adap->cec_follower = NULL;
427 }
428 if (mode_initiator == CEC_MODE_EXCL_INITIATOR)
429 adap->cec_initiator = fh;
430 else if (adap->cec_initiator == fh)
431 adap->cec_initiator = NULL;
432 fh->mode_initiator = mode_initiator;
433 fh->mode_follower = mode_follower;
434 mutex_unlock(&adap->lock);
435 return 0;
436}
437
438static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
439{
440 struct cec_devnode *devnode = cec_devnode_data(filp);
441 struct cec_fh *fh = filp->private_data;
442 struct cec_adapter *adap = fh->adap;
443 bool block = !(filp->f_flags & O_NONBLOCK);
444 void __user *parg = (void __user *)arg;
445
446 if (!devnode->registered)
Hans Verkuil60815d42016-08-24 07:17:22 -0300447 return -ENODEV;
Hans Verkuilca684382016-06-25 09:44:43 -0300448
449 switch (cmd) {
450 case CEC_ADAP_G_CAPS:
451 return cec_adap_g_caps(adap, parg);
452
453 case CEC_ADAP_G_PHYS_ADDR:
454 return cec_adap_g_phys_addr(adap, parg);
455
456 case CEC_ADAP_S_PHYS_ADDR:
457 return cec_adap_s_phys_addr(adap, fh, block, parg);
458
459 case CEC_ADAP_G_LOG_ADDRS:
460 return cec_adap_g_log_addrs(adap, parg);
461
462 case CEC_ADAP_S_LOG_ADDRS:
463 return cec_adap_s_log_addrs(adap, fh, block, parg);
464
465 case CEC_TRANSMIT:
466 return cec_transmit(adap, fh, block, parg);
467
468 case CEC_RECEIVE:
469 return cec_receive(adap, fh, block, parg);
470
471 case CEC_DQEVENT:
472 return cec_dqevent(adap, fh, block, parg);
473
474 case CEC_G_MODE:
475 return cec_g_mode(adap, fh, parg);
476
477 case CEC_S_MODE:
478 return cec_s_mode(adap, fh, parg);
479
480 default:
481 return -ENOTTY;
482 }
483}
484
485static int cec_open(struct inode *inode, struct file *filp)
486{
487 struct cec_devnode *devnode =
488 container_of(inode->i_cdev, struct cec_devnode, cdev);
489 struct cec_adapter *adap = to_cec_adapter(devnode);
490 struct cec_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);
491 /*
492 * Initial events that are automatically sent when the cec device is
493 * opened.
494 */
495 struct cec_event ev_state = {
496 .event = CEC_EVENT_STATE_CHANGE,
497 .flags = CEC_EVENT_FL_INITIAL_STATE,
498 };
499 int err;
500
501 if (!fh)
502 return -ENOMEM;
503
504 INIT_LIST_HEAD(&fh->msgs);
505 INIT_LIST_HEAD(&fh->xfer_list);
506 mutex_init(&fh->lock);
507 init_waitqueue_head(&fh->wait);
508
509 fh->mode_initiator = CEC_MODE_INITIATOR;
510 fh->adap = adap;
511
512 err = cec_get_device(devnode);
513 if (err) {
514 kfree(fh);
515 return err;
516 }
517
518 filp->private_data = fh;
519
Hans Verkuil62148f02016-08-02 08:11:00 -0300520 mutex_lock(&devnode->lock);
Hans Verkuilca684382016-06-25 09:44:43 -0300521 /* Queue up initial state events */
522 ev_state.state_change.phys_addr = adap->phys_addr;
523 ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
524 cec_queue_event_fh(fh, &ev_state, 0);
525
526 list_add(&fh->list, &devnode->fhs);
Hans Verkuil62148f02016-08-02 08:11:00 -0300527 mutex_unlock(&devnode->lock);
Hans Verkuilca684382016-06-25 09:44:43 -0300528
529 return 0;
530}
531
532/* Override for the release function */
533static int cec_release(struct inode *inode, struct file *filp)
534{
535 struct cec_devnode *devnode = cec_devnode_data(filp);
536 struct cec_adapter *adap = to_cec_adapter(devnode);
537 struct cec_fh *fh = filp->private_data;
538
539 mutex_lock(&adap->lock);
540 if (adap->cec_initiator == fh)
541 adap->cec_initiator = NULL;
542 if (adap->cec_follower == fh) {
543 adap->cec_follower = NULL;
544 adap->passthrough = false;
545 }
546 if (fh->mode_follower == CEC_MODE_FOLLOWER)
547 adap->follower_cnt--;
548 if (fh->mode_follower == CEC_MODE_MONITOR_ALL)
549 cec_monitor_all_cnt_dec(adap);
550 mutex_unlock(&adap->lock);
551
Hans Verkuil62148f02016-08-02 08:11:00 -0300552 mutex_lock(&devnode->lock);
Hans Verkuilca684382016-06-25 09:44:43 -0300553 list_del(&fh->list);
Hans Verkuil62148f02016-08-02 08:11:00 -0300554 mutex_unlock(&devnode->lock);
Hans Verkuilca684382016-06-25 09:44:43 -0300555
556 /* Unhook pending transmits from this filehandle. */
557 mutex_lock(&adap->lock);
558 while (!list_empty(&fh->xfer_list)) {
559 struct cec_data *data =
560 list_first_entry(&fh->xfer_list, struct cec_data, xfer_list);
561
562 data->blocking = false;
563 data->fh = NULL;
564 list_del(&data->xfer_list);
565 }
566 mutex_unlock(&adap->lock);
567 while (!list_empty(&fh->msgs)) {
568 struct cec_msg_entry *entry =
569 list_first_entry(&fh->msgs, struct cec_msg_entry, list);
570
571 list_del(&entry->list);
572 kfree(entry);
573 }
574 kfree(fh);
575
576 cec_put_device(devnode);
577 filp->private_data = NULL;
578 return 0;
579}
580
581const struct file_operations cec_devnode_fops = {
582 .owner = THIS_MODULE,
583 .open = cec_open,
584 .unlocked_ioctl = cec_ioctl,
585 .release = cec_release,
586 .poll = cec_poll,
587 .llseek = no_llseek,
588};