blob: 11c19d8d0ee0a7edad7aeb3ea241130585a6d381 [file] [log] [blame]
David Härdeman829ba9f2010-11-19 20:43:27 -03001/* ir-raw.c - handle IR pulse/space events
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -03002 *
3 * Copyright (C) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030015#include <linux/kthread.h>
Maxim Levitsky45a568f2010-07-31 11:59:16 -030016#include <linux/mutex.h>
David Härdeman724e2492010-04-08 13:10:00 -030017#include <linux/sched.h>
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030018#include <linux/freezer.h>
Mauro Carvalho Chehabf62de672010-11-09 23:09:57 -030019#include "rc-core-priv.h"
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030020
David Härdeman724e2492010-04-08 13:10:00 -030021/* Define the max number of pulse/space transitions to buffer */
22#define MAX_IR_EVENT_SIZE 512
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030023
David Härdemanc2163692010-06-13 17:29:36 -030024/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
25static LIST_HEAD(ir_raw_client_list);
26
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -030027/* Used to handle IR raw handler extensions */
Maxim Levitsky45a568f2010-07-31 11:59:16 -030028static DEFINE_MUTEX(ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -030029static LIST_HEAD(ir_raw_handler_list);
30static u64 available_protocols;
Mauro Carvalho Chehab93c312f2010-03-25 21:13:43 -030031
Mauro Carvalho Chehab5fa29892010-04-08 19:04:06 -030032#ifdef MODULE
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -030033/* Used to load the decoders */
34static struct work_struct wq_load;
Mauro Carvalho Chehab5fa29892010-04-08 19:04:06 -030035#endif
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -030036
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030037static int ir_raw_event_thread(void *data)
David Härdeman724e2492010-04-08 13:10:00 -030038{
David Härdemane40b1122010-04-15 18:46:00 -030039 struct ir_raw_event ev;
David Härdemanc2163692010-06-13 17:29:36 -030040 struct ir_raw_handler *handler;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030041 struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030042 int retval;
David Härdeman724e2492010-04-08 13:10:00 -030043
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030044 while (!kthread_should_stop()) {
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030045
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030046 spin_lock_irq(&raw->lock);
47 retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030048
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030049 if (!retval) {
50 set_current_state(TASK_INTERRUPTIBLE);
51
52 if (kthread_should_stop())
53 set_current_state(TASK_RUNNING);
54
55 spin_unlock_irq(&raw->lock);
56 schedule();
57 continue;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030058 }
59
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030060 spin_unlock_irq(&raw->lock);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030061
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030062
63 BUG_ON(retval != sizeof(ev));
64
65 mutex_lock(&ir_raw_handler_lock);
66 list_for_each_entry(handler, &ir_raw_handler_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -030067 handler->decode(raw->dev, ev);
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030068 raw->prev_ev = ev;
69 mutex_unlock(&ir_raw_handler_lock);
David Härdemanc2163692010-06-13 17:29:36 -030070 }
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030071
72 return 0;
David Härdeman724e2492010-04-08 13:10:00 -030073}
74
David Härdeman724e2492010-04-08 13:10:00 -030075/**
76 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
David Härdemand8b4b582010-10-29 16:08:23 -030077 * @dev: the struct rc_dev device descriptor
David Härdemane40b1122010-04-15 18:46:00 -030078 * @ev: the struct ir_raw_event descriptor of the pulse/space
David Härdeman724e2492010-04-08 13:10:00 -030079 *
80 * This routine (which may be called from an interrupt context) stores a
81 * pulse/space duration for the raw ir decoding state machines. Pulses are
82 * signalled as positive values and spaces as negative values. A zero value
83 * will reset the decoding state machines.
84 */
David Härdemand8b4b582010-10-29 16:08:23 -030085int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030086{
David Härdemand8b4b582010-10-29 16:08:23 -030087 if (!dev->raw)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030088 return -EINVAL;
89
Mauro Carvalho Chehab74c47922010-10-20 11:56:50 -030090 IR_dprintk(2, "sample: (%05dus %s)\n",
David Härdemand8b4b582010-10-29 16:08:23 -030091 TO_US(ev->duration), TO_STR(ev->pulse));
Maxim Levitsky510fcb72010-07-31 11:59:15 -030092
David Härdemand8b4b582010-10-29 16:08:23 -030093 if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
David Härdeman724e2492010-04-08 13:10:00 -030094 return -ENOMEM;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030095
David Härdeman724e2492010-04-08 13:10:00 -030096 return 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030097}
98EXPORT_SYMBOL_GPL(ir_raw_event_store);
99
David Härdeman724e2492010-04-08 13:10:00 -0300100/**
101 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
David Härdemand8b4b582010-10-29 16:08:23 -0300102 * @dev: the struct rc_dev device descriptor
David Härdeman724e2492010-04-08 13:10:00 -0300103 * @type: the type of the event that has occurred
104 *
105 * This routine (which may be called from an interrupt context) is used to
106 * store the beginning of an ir pulse or space (or the start/end of ir
107 * reception) for the raw ir decoding state machines. This is used by
108 * hardware which does not provide durations directly but only interrupts
109 * (or similar events) on state change.
110 */
David Härdemand8b4b582010-10-29 16:08:23 -0300111int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300112{
David Härdeman724e2492010-04-08 13:10:00 -0300113 ktime_t now;
114 s64 delta; /* ns */
Mauro Carvalho Chehab83587832011-01-20 18:16:50 -0300115 DEFINE_IR_RAW_EVENT(ev);
David Härdeman724e2492010-04-08 13:10:00 -0300116 int rc = 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300117
David Härdemand8b4b582010-10-29 16:08:23 -0300118 if (!dev->raw)
David Härdeman724e2492010-04-08 13:10:00 -0300119 return -EINVAL;
120
121 now = ktime_get();
David Härdemand8b4b582010-10-29 16:08:23 -0300122 delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
David Härdeman724e2492010-04-08 13:10:00 -0300123
124 /* Check for a long duration since last event or if we're
125 * being called for the first time, note that delta can't
126 * possibly be negative.
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300127 */
David Härdemand8b4b582010-10-29 16:08:23 -0300128 if (delta > IR_MAX_DURATION || !dev->raw->last_type)
David Härdeman724e2492010-04-08 13:10:00 -0300129 type |= IR_START_EVENT;
David Härdemane40b1122010-04-15 18:46:00 -0300130 else
131 ev.duration = delta;
David Härdeman724e2492010-04-08 13:10:00 -0300132
133 if (type & IR_START_EVENT)
David Härdemand8b4b582010-10-29 16:08:23 -0300134 ir_raw_event_reset(dev);
135 else if (dev->raw->last_type & IR_SPACE) {
David Härdemane40b1122010-04-15 18:46:00 -0300136 ev.pulse = false;
David Härdemand8b4b582010-10-29 16:08:23 -0300137 rc = ir_raw_event_store(dev, &ev);
138 } else if (dev->raw->last_type & IR_PULSE) {
David Härdemane40b1122010-04-15 18:46:00 -0300139 ev.pulse = true;
David Härdemand8b4b582010-10-29 16:08:23 -0300140 rc = ir_raw_event_store(dev, &ev);
David Härdemane40b1122010-04-15 18:46:00 -0300141 } else
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300142 return 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300143
David Härdemand8b4b582010-10-29 16:08:23 -0300144 dev->raw->last_event = now;
145 dev->raw->last_type = type;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300146 return rc;
147}
David Härdeman724e2492010-04-08 13:10:00 -0300148EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
149
150/**
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300151 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
David Härdemand8b4b582010-10-29 16:08:23 -0300152 * @dev: the struct rc_dev device descriptor
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300153 * @type: the type of the event that has occurred
154 *
155 * This routine (which may be called from an interrupt context) works
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300156 * in similar manner to ir_raw_event_store_edge.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300157 * This routine is intended for devices with limited internal buffer
158 * It automerges samples of same type, and handles timeouts
159 */
David Härdemand8b4b582010-10-29 16:08:23 -0300160int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300161{
David Härdemand8b4b582010-10-29 16:08:23 -0300162 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300163 return -EINVAL;
164
165 /* Ignore spaces in idle mode */
David Härdemand8b4b582010-10-29 16:08:23 -0300166 if (dev->idle && !ev->pulse)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300167 return 0;
David Härdemand8b4b582010-10-29 16:08:23 -0300168 else if (dev->idle)
169 ir_raw_event_set_idle(dev, false);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300170
David Härdemand8b4b582010-10-29 16:08:23 -0300171 if (!dev->raw->this_ev.duration)
172 dev->raw->this_ev = *ev;
173 else if (ev->pulse == dev->raw->this_ev.pulse)
174 dev->raw->this_ev.duration += ev->duration;
175 else {
176 ir_raw_event_store(dev, &dev->raw->this_ev);
177 dev->raw->this_ev = *ev;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300178 }
179
180 /* Enter idle mode if nessesary */
David Härdemand8b4b582010-10-29 16:08:23 -0300181 if (!ev->pulse && dev->timeout &&
182 dev->raw->this_ev.duration >= dev->timeout)
183 ir_raw_event_set_idle(dev, true);
184
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300185 return 0;
186}
187EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
188
Maxim Levitsky46519182010-10-16 19:56:28 -0300189/**
David Härdemand8b4b582010-10-29 16:08:23 -0300190 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
191 * @dev: the struct rc_dev device descriptor
192 * @idle: whether the device is idle or not
Maxim Levitsky46519182010-10-16 19:56:28 -0300193 */
David Härdemand8b4b582010-10-29 16:08:23 -0300194void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300195{
David Härdemand8b4b582010-10-29 16:08:23 -0300196 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300197 return;
198
Maxim Levitsky46519182010-10-16 19:56:28 -0300199 IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300200
201 if (idle) {
David Härdemand8b4b582010-10-29 16:08:23 -0300202 dev->raw->this_ev.timeout = true;
203 ir_raw_event_store(dev, &dev->raw->this_ev);
204 init_ir_raw_event(&dev->raw->this_ev);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300205 }
Maxim Levitsky46519182010-10-16 19:56:28 -0300206
David Härdemand8b4b582010-10-29 16:08:23 -0300207 if (dev->s_idle)
208 dev->s_idle(dev, idle);
209
210 dev->idle = idle;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300211}
212EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
213
214/**
David Härdeman724e2492010-04-08 13:10:00 -0300215 * ir_raw_event_handle() - schedules the decoding of stored ir data
David Härdemand8b4b582010-10-29 16:08:23 -0300216 * @dev: the struct rc_dev device descriptor
David Härdeman724e2492010-04-08 13:10:00 -0300217 *
David Härdemand8b4b582010-10-29 16:08:23 -0300218 * This routine will tell rc-core to start decoding stored ir data.
David Härdeman724e2492010-04-08 13:10:00 -0300219 */
David Härdemand8b4b582010-10-29 16:08:23 -0300220void ir_raw_event_handle(struct rc_dev *dev)
David Härdeman724e2492010-04-08 13:10:00 -0300221{
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -0300222 unsigned long flags;
David Härdeman724e2492010-04-08 13:10:00 -0300223
David Härdemand8b4b582010-10-29 16:08:23 -0300224 if (!dev->raw)
David Härdeman724e2492010-04-08 13:10:00 -0300225 return;
226
David Härdemand8b4b582010-10-29 16:08:23 -0300227 spin_lock_irqsave(&dev->raw->lock, flags);
228 wake_up_process(dev->raw->thread);
229 spin_unlock_irqrestore(&dev->raw->lock, flags);
David Härdeman724e2492010-04-08 13:10:00 -0300230}
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300231EXPORT_SYMBOL_GPL(ir_raw_event_handle);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300232
David Härdeman667c9eb2010-06-13 17:29:31 -0300233/* used internally by the sysfs interface */
234u64
Randy Dunlap2dbd61b2011-01-09 00:53:53 -0300235ir_raw_get_allowed_protocols(void)
David Härdeman667c9eb2010-06-13 17:29:31 -0300236{
237 u64 protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300238 mutex_lock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300239 protocols = available_protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300240 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300241 return protocols;
242}
243
244/*
245 * Used to (un)register raw event clients
246 */
David Härdemand8b4b582010-10-29 16:08:23 -0300247int ir_raw_event_register(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300248{
David Härdeman667c9eb2010-06-13 17:29:31 -0300249 int rc;
David Härdemanc2163692010-06-13 17:29:36 -0300250 struct ir_raw_handler *handler;
David Härdeman667c9eb2010-06-13 17:29:31 -0300251
David Härdemand8b4b582010-10-29 16:08:23 -0300252 if (!dev)
253 return -EINVAL;
254
255 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
256 if (!dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300257 return -ENOMEM;
258
David Härdemand8b4b582010-10-29 16:08:23 -0300259 dev->raw->dev = dev;
260 dev->raw->enabled_protocols = ~0;
261 rc = kfifo_alloc(&dev->raw->kfifo,
262 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
David Härdeman667c9eb2010-06-13 17:29:31 -0300263 GFP_KERNEL);
David Härdemand8b4b582010-10-29 16:08:23 -0300264 if (rc < 0)
265 goto out;
David Härdeman667c9eb2010-06-13 17:29:31 -0300266
David Härdemand8b4b582010-10-29 16:08:23 -0300267 spin_lock_init(&dev->raw->lock);
268 dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
269 "rc%ld", dev->devno);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300270
David Härdemand8b4b582010-10-29 16:08:23 -0300271 if (IS_ERR(dev->raw->thread)) {
272 rc = PTR_ERR(dev->raw->thread);
273 goto out;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300274 }
275
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300276 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300277 list_add_tail(&dev->raw->list, &ir_raw_client_list);
David Härdemanc2163692010-06-13 17:29:36 -0300278 list_for_each_entry(handler, &ir_raw_handler_list, list)
279 if (handler->raw_register)
David Härdemand8b4b582010-10-29 16:08:23 -0300280 handler->raw_register(dev);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300281 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300282
David Härdemanc2163692010-06-13 17:29:36 -0300283 return 0;
David Härdemand8b4b582010-10-29 16:08:23 -0300284
285out:
286 kfree(dev->raw);
287 dev->raw = NULL;
288 return rc;
David Härdeman667c9eb2010-06-13 17:29:31 -0300289}
290
David Härdemand8b4b582010-10-29 16:08:23 -0300291void ir_raw_event_unregister(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300292{
David Härdemanc2163692010-06-13 17:29:36 -0300293 struct ir_raw_handler *handler;
David Härdeman667c9eb2010-06-13 17:29:31 -0300294
David Härdemand8b4b582010-10-29 16:08:23 -0300295 if (!dev || !dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300296 return;
297
David Härdemand8b4b582010-10-29 16:08:23 -0300298 kthread_stop(dev->raw->thread);
David Härdemanc2163692010-06-13 17:29:36 -0300299
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300300 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300301 list_del(&dev->raw->list);
David Härdemanc2163692010-06-13 17:29:36 -0300302 list_for_each_entry(handler, &ir_raw_handler_list, list)
303 if (handler->raw_unregister)
David Härdemand8b4b582010-10-29 16:08:23 -0300304 handler->raw_unregister(dev);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300305 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300306
David Härdemand8b4b582010-10-29 16:08:23 -0300307 kfifo_free(&dev->raw->kfifo);
308 kfree(dev->raw);
309 dev->raw = NULL;
David Härdeman667c9eb2010-06-13 17:29:31 -0300310}
311
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300312/*
313 * Extension interface - used to register the IR decoders
314 */
315
316int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
317{
David Härdemanc2163692010-06-13 17:29:36 -0300318 struct ir_raw_event_ctrl *raw;
319
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300320 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300321 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
David Härdemanc2163692010-06-13 17:29:36 -0300322 if (ir_raw_handler->raw_register)
323 list_for_each_entry(raw, &ir_raw_client_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -0300324 ir_raw_handler->raw_register(raw->dev);
David Härdeman667c9eb2010-06-13 17:29:31 -0300325 available_protocols |= ir_raw_handler->protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300326 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300327
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300328 return 0;
329}
330EXPORT_SYMBOL(ir_raw_handler_register);
331
332void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
333{
David Härdemanc2163692010-06-13 17:29:36 -0300334 struct ir_raw_event_ctrl *raw;
335
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300336 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300337 list_del(&ir_raw_handler->list);
David Härdemanc2163692010-06-13 17:29:36 -0300338 if (ir_raw_handler->raw_unregister)
339 list_for_each_entry(raw, &ir_raw_client_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -0300340 ir_raw_handler->raw_unregister(raw->dev);
David Härdeman667c9eb2010-06-13 17:29:31 -0300341 available_protocols &= ~ir_raw_handler->protocols;
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300342 mutex_unlock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300343}
344EXPORT_SYMBOL(ir_raw_handler_unregister);
345
Mauro Carvalho Chehab5fa29892010-04-08 19:04:06 -0300346#ifdef MODULE
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300347static void init_decoders(struct work_struct *work)
348{
349 /* Load the decoder modules */
350
351 load_nec_decode();
Mauro Carvalho Chehabdb1423a2010-04-04 10:27:20 -0300352 load_rc5_decode();
David Härdeman784a4932010-04-08 20:04:40 -0300353 load_rc6_decode();
David Härdemanbf670f62010-04-15 18:46:05 -0300354 load_jvc_decode();
David Härdeman3fe29c82010-04-15 18:46:10 -0300355 load_sony_decode();
Jarod Wilsonca414692010-07-03 01:07:53 -0300356 load_lirc_codec();
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300357
358 /* If needed, we may later add some init code. In this case,
Mauro Carvalho Chehab6bda9642010-11-17 13:28:38 -0300359 it is needed to change the CONFIG_MODULE test at rc-core.h
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300360 */
361}
Mauro Carvalho Chehab5fa29892010-04-08 19:04:06 -0300362#endif
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300363
364void ir_raw_init(void)
365{
Mauro Carvalho Chehab93c312f2010-03-25 21:13:43 -0300366#ifdef MODULE
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300367 INIT_WORK(&wq_load, init_decoders);
368 schedule_work(&wq_load);
Mauro Carvalho Chehab93c312f2010-03-25 21:13:43 -0300369#endif
370}