blob: 763c9d131d0fc6c7573c5c1ee9e8ce2bbe266ef6 [file] [log] [blame]
David Härdeman829ba9f2010-11-19 20:43:27 -03001/* ir-raw.c - handle IR pulse/space events
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -03002 *
Mauro Carvalho Chehab37e59f82014-02-07 08:03:07 -02003 * Copyright (C) 2010 by Mauro Carvalho Chehab
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -03004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
Paul Gortmaker35a24632011-08-01 15:26:38 -040015#include <linux/export.h>
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030016#include <linux/kthread.h>
Maxim Levitsky45a568f2010-07-31 11:59:16 -030017#include <linux/mutex.h>
Stephen Rothwelldff65de2011-07-29 15:34:32 +100018#include <linux/kmod.h>
David Härdeman724e2492010-04-08 13:10:00 -030019#include <linux/sched.h>
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030020#include <linux/freezer.h>
Mauro Carvalho Chehabf62de672010-11-09 23:09:57 -030021#include "rc-core-priv.h"
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030022
David Härdeman724e2492010-04-08 13:10:00 -030023/* Define the max number of pulse/space transitions to buffer */
24#define MAX_IR_EVENT_SIZE 512
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030025
David Härdemanc2163692010-06-13 17:29:36 -030026/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
27static LIST_HEAD(ir_raw_client_list);
28
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -030029/* Used to handle IR raw handler extensions */
Maxim Levitsky45a568f2010-07-31 11:59:16 -030030static DEFINE_MUTEX(ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -030031static LIST_HEAD(ir_raw_handler_list);
32static u64 available_protocols;
Mauro Carvalho Chehab93c312f2010-03-25 21:13:43 -030033
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030034static int ir_raw_event_thread(void *data)
David Härdeman724e2492010-04-08 13:10:00 -030035{
David Härdemane40b1122010-04-15 18:46:00 -030036 struct ir_raw_event ev;
David Härdemanc2163692010-06-13 17:29:36 -030037 struct ir_raw_handler *handler;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030038 struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030039 int retval;
David Härdeman724e2492010-04-08 13:10:00 -030040
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030041 while (!kthread_should_stop()) {
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030042
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030043 spin_lock_irq(&raw->lock);
Srinivas Kandagatla004ac382012-03-20 14:05:40 -030044 retval = kfifo_len(&raw->kfifo);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030045
Srinivas Kandagatla004ac382012-03-20 14:05:40 -030046 if (retval < sizeof(ev)) {
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030047 set_current_state(TASK_INTERRUPTIBLE);
48
49 if (kthread_should_stop())
50 set_current_state(TASK_RUNNING);
51
52 spin_unlock_irq(&raw->lock);
53 schedule();
54 continue;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030055 }
56
Srinivas Kandagatla004ac382012-03-20 14:05:40 -030057 retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030058 spin_unlock_irq(&raw->lock);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030059
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030060 mutex_lock(&ir_raw_handler_lock);
61 list_for_each_entry(handler, &ir_raw_handler_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -030062 handler->decode(raw->dev, ev);
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030063 raw->prev_ev = ev;
64 mutex_unlock(&ir_raw_handler_lock);
David Härdemanc2163692010-06-13 17:29:36 -030065 }
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030066
67 return 0;
David Härdeman724e2492010-04-08 13:10:00 -030068}
69
David Härdeman724e2492010-04-08 13:10:00 -030070/**
71 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
David Härdemand8b4b582010-10-29 16:08:23 -030072 * @dev: the struct rc_dev device descriptor
David Härdemane40b1122010-04-15 18:46:00 -030073 * @ev: the struct ir_raw_event descriptor of the pulse/space
David Härdeman724e2492010-04-08 13:10:00 -030074 *
75 * This routine (which may be called from an interrupt context) stores a
76 * pulse/space duration for the raw ir decoding state machines. Pulses are
77 * signalled as positive values and spaces as negative values. A zero value
78 * will reset the decoding state machines.
79 */
David Härdemand8b4b582010-10-29 16:08:23 -030080int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030081{
David Härdemand8b4b582010-10-29 16:08:23 -030082 if (!dev->raw)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030083 return -EINVAL;
84
Mauro Carvalho Chehab74c47922010-10-20 11:56:50 -030085 IR_dprintk(2, "sample: (%05dus %s)\n",
David Härdemand8b4b582010-10-29 16:08:23 -030086 TO_US(ev->duration), TO_STR(ev->pulse));
Maxim Levitsky510fcb72010-07-31 11:59:15 -030087
David Härdemand8b4b582010-10-29 16:08:23 -030088 if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
David Härdeman724e2492010-04-08 13:10:00 -030089 return -ENOMEM;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030090
David Härdeman724e2492010-04-08 13:10:00 -030091 return 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030092}
93EXPORT_SYMBOL_GPL(ir_raw_event_store);
94
David Härdeman724e2492010-04-08 13:10:00 -030095/**
96 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
David Härdemand8b4b582010-10-29 16:08:23 -030097 * @dev: the struct rc_dev device descriptor
David Härdeman724e2492010-04-08 13:10:00 -030098 * @type: the type of the event that has occurred
99 *
100 * This routine (which may be called from an interrupt context) is used to
101 * store the beginning of an ir pulse or space (or the start/end of ir
102 * reception) for the raw ir decoding state machines. This is used by
103 * hardware which does not provide durations directly but only interrupts
104 * (or similar events) on state change.
105 */
David Härdemand8b4b582010-10-29 16:08:23 -0300106int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300107{
David Härdeman724e2492010-04-08 13:10:00 -0300108 ktime_t now;
109 s64 delta; /* ns */
Mauro Carvalho Chehab83587832011-01-20 18:16:50 -0300110 DEFINE_IR_RAW_EVENT(ev);
David Härdeman724e2492010-04-08 13:10:00 -0300111 int rc = 0;
Jarod Wilson3f5c4c72011-06-16 16:18:37 -0300112 int delay;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300113
David Härdemand8b4b582010-10-29 16:08:23 -0300114 if (!dev->raw)
David Härdeman724e2492010-04-08 13:10:00 -0300115 return -EINVAL;
116
117 now = ktime_get();
David Härdemand8b4b582010-10-29 16:08:23 -0300118 delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
Jarod Wilson3f5c4c72011-06-16 16:18:37 -0300119 delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
David Härdeman724e2492010-04-08 13:10:00 -0300120
121 /* Check for a long duration since last event or if we're
122 * being called for the first time, note that delta can't
123 * possibly be negative.
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300124 */
Jarod Wilson3f5c4c72011-06-16 16:18:37 -0300125 if (delta > delay || !dev->raw->last_type)
David Härdeman724e2492010-04-08 13:10:00 -0300126 type |= IR_START_EVENT;
David Härdemane40b1122010-04-15 18:46:00 -0300127 else
128 ev.duration = delta;
David Härdeman724e2492010-04-08 13:10:00 -0300129
130 if (type & IR_START_EVENT)
David Härdemand8b4b582010-10-29 16:08:23 -0300131 ir_raw_event_reset(dev);
132 else if (dev->raw->last_type & IR_SPACE) {
David Härdemane40b1122010-04-15 18:46:00 -0300133 ev.pulse = false;
David Härdemand8b4b582010-10-29 16:08:23 -0300134 rc = ir_raw_event_store(dev, &ev);
135 } else if (dev->raw->last_type & IR_PULSE) {
David Härdemane40b1122010-04-15 18:46:00 -0300136 ev.pulse = true;
David Härdemand8b4b582010-10-29 16:08:23 -0300137 rc = ir_raw_event_store(dev, &ev);
David Härdemane40b1122010-04-15 18:46:00 -0300138 } else
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300139 return 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300140
David Härdemand8b4b582010-10-29 16:08:23 -0300141 dev->raw->last_event = now;
142 dev->raw->last_type = type;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300143 return rc;
144}
David Härdeman724e2492010-04-08 13:10:00 -0300145EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
146
147/**
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300148 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
David Härdemand8b4b582010-10-29 16:08:23 -0300149 * @dev: the struct rc_dev device descriptor
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300150 * @type: the type of the event that has occurred
151 *
152 * This routine (which may be called from an interrupt context) works
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300153 * in similar manner to ir_raw_event_store_edge.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300154 * This routine is intended for devices with limited internal buffer
Sean Youngb83bfd12012-08-13 08:59:47 -0300155 * It automerges samples of same type, and handles timeouts. Returns non-zero
156 * if the event was added, and zero if the event was ignored due to idle
157 * processing.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300158 */
David Härdemand8b4b582010-10-29 16:08:23 -0300159int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300160{
David Härdemand8b4b582010-10-29 16:08:23 -0300161 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300162 return -EINVAL;
163
164 /* Ignore spaces in idle mode */
David Härdemand8b4b582010-10-29 16:08:23 -0300165 if (dev->idle && !ev->pulse)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300166 return 0;
David Härdemand8b4b582010-10-29 16:08:23 -0300167 else if (dev->idle)
168 ir_raw_event_set_idle(dev, false);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300169
David Härdemand8b4b582010-10-29 16:08:23 -0300170 if (!dev->raw->this_ev.duration)
171 dev->raw->this_ev = *ev;
172 else if (ev->pulse == dev->raw->this_ev.pulse)
173 dev->raw->this_ev.duration += ev->duration;
174 else {
175 ir_raw_event_store(dev, &dev->raw->this_ev);
176 dev->raw->this_ev = *ev;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300177 }
178
179 /* Enter idle mode if nessesary */
David Härdemand8b4b582010-10-29 16:08:23 -0300180 if (!ev->pulse && dev->timeout &&
181 dev->raw->this_ev.duration >= dev->timeout)
182 ir_raw_event_set_idle(dev, true);
183
Sean Youngb83bfd12012-08-13 08:59:47 -0300184 return 1;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300185}
186EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
187
Maxim Levitsky46519182010-10-16 19:56:28 -0300188/**
David Härdemand8b4b582010-10-29 16:08:23 -0300189 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
190 * @dev: the struct rc_dev device descriptor
191 * @idle: whether the device is idle or not
Maxim Levitsky46519182010-10-16 19:56:28 -0300192 */
David Härdemand8b4b582010-10-29 16:08:23 -0300193void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300194{
David Härdemand8b4b582010-10-29 16:08:23 -0300195 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300196 return;
197
Maxim Levitsky46519182010-10-16 19:56:28 -0300198 IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300199
200 if (idle) {
David Härdemand8b4b582010-10-29 16:08:23 -0300201 dev->raw->this_ev.timeout = true;
202 ir_raw_event_store(dev, &dev->raw->this_ev);
203 init_ir_raw_event(&dev->raw->this_ev);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300204 }
Maxim Levitsky46519182010-10-16 19:56:28 -0300205
David Härdemand8b4b582010-10-29 16:08:23 -0300206 if (dev->s_idle)
207 dev->s_idle(dev, idle);
208
209 dev->idle = idle;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300210}
211EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
212
213/**
David Härdeman724e2492010-04-08 13:10:00 -0300214 * ir_raw_event_handle() - schedules the decoding of stored ir data
David Härdemand8b4b582010-10-29 16:08:23 -0300215 * @dev: the struct rc_dev device descriptor
David Härdeman724e2492010-04-08 13:10:00 -0300216 *
David Härdemand8b4b582010-10-29 16:08:23 -0300217 * This routine will tell rc-core to start decoding stored ir data.
David Härdeman724e2492010-04-08 13:10:00 -0300218 */
David Härdemand8b4b582010-10-29 16:08:23 -0300219void ir_raw_event_handle(struct rc_dev *dev)
David Härdeman724e2492010-04-08 13:10:00 -0300220{
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -0300221 unsigned long flags;
David Härdeman724e2492010-04-08 13:10:00 -0300222
David Härdemand8b4b582010-10-29 16:08:23 -0300223 if (!dev->raw)
David Härdeman724e2492010-04-08 13:10:00 -0300224 return;
225
David Härdemand8b4b582010-10-29 16:08:23 -0300226 spin_lock_irqsave(&dev->raw->lock, flags);
227 wake_up_process(dev->raw->thread);
228 spin_unlock_irqrestore(&dev->raw->lock, flags);
David Härdeman724e2492010-04-08 13:10:00 -0300229}
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300230EXPORT_SYMBOL_GPL(ir_raw_event_handle);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300231
David Härdeman667c9eb2010-06-13 17:29:31 -0300232/* used internally by the sysfs interface */
233u64
Randy Dunlap2dbd61b2011-01-09 00:53:53 -0300234ir_raw_get_allowed_protocols(void)
David Härdeman667c9eb2010-06-13 17:29:31 -0300235{
236 u64 protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300237 mutex_lock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300238 protocols = available_protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300239 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300240 return protocols;
241}
242
243/*
244 * Used to (un)register raw event clients
245 */
David Härdemand8b4b582010-10-29 16:08:23 -0300246int ir_raw_event_register(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300247{
David Härdeman667c9eb2010-06-13 17:29:31 -0300248 int rc;
David Härdemanc2163692010-06-13 17:29:36 -0300249 struct ir_raw_handler *handler;
David Härdeman667c9eb2010-06-13 17:29:31 -0300250
David Härdemand8b4b582010-10-29 16:08:23 -0300251 if (!dev)
252 return -EINVAL;
253
254 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
255 if (!dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300256 return -ENOMEM;
257
David Härdemand8b4b582010-10-29 16:08:23 -0300258 dev->raw->dev = dev;
James Hogan1a1934f2014-02-28 20:17:03 -0300259 rc_set_enabled_protocols(dev, ~0);
David Härdemand8b4b582010-10-29 16:08:23 -0300260 rc = kfifo_alloc(&dev->raw->kfifo,
261 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
David Härdeman667c9eb2010-06-13 17:29:31 -0300262 GFP_KERNEL);
David Härdemand8b4b582010-10-29 16:08:23 -0300263 if (rc < 0)
264 goto out;
David Härdeman667c9eb2010-06-13 17:29:31 -0300265
David Härdemand8b4b582010-10-29 16:08:23 -0300266 spin_lock_init(&dev->raw->lock);
267 dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
268 "rc%ld", dev->devno);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300269
David Härdemand8b4b582010-10-29 16:08:23 -0300270 if (IS_ERR(dev->raw->thread)) {
271 rc = PTR_ERR(dev->raw->thread);
272 goto out;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300273 }
274
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300275 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300276 list_add_tail(&dev->raw->list, &ir_raw_client_list);
David Härdemanc2163692010-06-13 17:29:36 -0300277 list_for_each_entry(handler, &ir_raw_handler_list, list)
278 if (handler->raw_register)
David Härdemand8b4b582010-10-29 16:08:23 -0300279 handler->raw_register(dev);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300280 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300281
David Härdemanc2163692010-06-13 17:29:36 -0300282 return 0;
David Härdemand8b4b582010-10-29 16:08:23 -0300283
284out:
285 kfree(dev->raw);
286 dev->raw = NULL;
287 return rc;
David Härdeman667c9eb2010-06-13 17:29:31 -0300288}
289
David Härdemand8b4b582010-10-29 16:08:23 -0300290void ir_raw_event_unregister(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300291{
David Härdemanc2163692010-06-13 17:29:36 -0300292 struct ir_raw_handler *handler;
David Härdeman667c9eb2010-06-13 17:29:31 -0300293
David Härdemand8b4b582010-10-29 16:08:23 -0300294 if (!dev || !dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300295 return;
296
David Härdemand8b4b582010-10-29 16:08:23 -0300297 kthread_stop(dev->raw->thread);
David Härdemanc2163692010-06-13 17:29:36 -0300298
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300299 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300300 list_del(&dev->raw->list);
David Härdemanc2163692010-06-13 17:29:36 -0300301 list_for_each_entry(handler, &ir_raw_handler_list, list)
302 if (handler->raw_unregister)
David Härdemand8b4b582010-10-29 16:08:23 -0300303 handler->raw_unregister(dev);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300304 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300305
David Härdemand8b4b582010-10-29 16:08:23 -0300306 kfifo_free(&dev->raw->kfifo);
307 kfree(dev->raw);
308 dev->raw = NULL;
David Härdeman667c9eb2010-06-13 17:29:31 -0300309}
310
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300311/*
312 * Extension interface - used to register the IR decoders
313 */
314
315int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
316{
David Härdemanc2163692010-06-13 17:29:36 -0300317 struct ir_raw_event_ctrl *raw;
318
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300319 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300320 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
David Härdemanc2163692010-06-13 17:29:36 -0300321 if (ir_raw_handler->raw_register)
322 list_for_each_entry(raw, &ir_raw_client_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -0300323 ir_raw_handler->raw_register(raw->dev);
David Härdeman667c9eb2010-06-13 17:29:31 -0300324 available_protocols |= ir_raw_handler->protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300325 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300326
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300327 return 0;
328}
329EXPORT_SYMBOL(ir_raw_handler_register);
330
331void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
332{
David Härdemanc2163692010-06-13 17:29:36 -0300333 struct ir_raw_event_ctrl *raw;
334
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300335 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300336 list_del(&ir_raw_handler->list);
David Härdemanc2163692010-06-13 17:29:36 -0300337 if (ir_raw_handler->raw_unregister)
338 list_for_each_entry(raw, &ir_raw_client_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -0300339 ir_raw_handler->raw_unregister(raw->dev);
David Härdeman667c9eb2010-06-13 17:29:31 -0300340 available_protocols &= ~ir_raw_handler->protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300341 mutex_unlock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300342}
343EXPORT_SYMBOL(ir_raw_handler_unregister);
344
Konstantin Khlebnikova4bb6f32012-12-14 07:02:48 -0300345void ir_raw_init(void)
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300346{
347 /* Load the decoder modules */
348
349 load_nec_decode();
Mauro Carvalho Chehabdb1423a2010-04-04 10:27:20 -0300350 load_rc5_decode();
David Härdeman784a4932010-04-08 20:04:40 -0300351 load_rc6_decode();
David Härdemanbf670f62010-04-15 18:46:05 -0300352 load_jvc_decode();
David Härdeman3fe29c82010-04-15 18:46:10 -0300353 load_sony_decode();
Mauro Carvalho Chehabb32e7242011-11-23 12:04:08 -0300354 load_sanyo_decode();
James Hogan324a6672014-02-05 19:15:16 -0300355 load_sharp_decode();
Jarod Wilsonf5f2cc62011-07-13 18:09:48 -0300356 load_mce_kbd_decode();
Jarod Wilsonca414692010-07-03 01:07:53 -0300357 load_lirc_codec();
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300358
359 /* If needed, we may later add some init code. In this case,
Mauro Carvalho Chehab6bda9642010-11-17 13:28:38 -0300360 it is needed to change the CONFIG_MODULE test at rc-core.h
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300361 */
362}