blob: b6d256f0384727c95eca781ee302504522bd8245 [file] [log] [blame]
David Härdeman4924a312014-04-03 20:34:28 -03001/* rc-ir-raw.c - handle IR pulse/space events
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -03002 *
Mauro Carvalho Chehab37e59f82014-02-07 08:03:07 -02003 * Copyright (C) 2010 by Mauro Carvalho Chehab
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -03004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
Paul Gortmaker35a24632011-08-01 15:26:38 -040015#include <linux/export.h>
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030016#include <linux/kthread.h>
Maxim Levitsky45a568f2010-07-31 11:59:16 -030017#include <linux/mutex.h>
Stephen Rothwelldff65de2011-07-29 15:34:32 +100018#include <linux/kmod.h>
David Härdeman724e2492010-04-08 13:10:00 -030019#include <linux/sched.h>
Mauro Carvalho Chehabf62de672010-11-09 23:09:57 -030020#include "rc-core-priv.h"
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030021
David Härdemanc2163692010-06-13 17:29:36 -030022/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
23static LIST_HEAD(ir_raw_client_list);
24
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -030025/* Used to handle IR raw handler extensions */
Maxim Levitsky45a568f2010-07-31 11:59:16 -030026static DEFINE_MUTEX(ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -030027static LIST_HEAD(ir_raw_handler_list);
Heiner Kallweit37e90a22016-09-27 16:48:47 -030028static atomic64_t available_protocols = ATOMIC64_INIT(0);
Mauro Carvalho Chehab93c312f2010-03-25 21:13:43 -030029
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030030static int ir_raw_event_thread(void *data)
David Härdeman724e2492010-04-08 13:10:00 -030031{
David Härdemane40b1122010-04-15 18:46:00 -030032 struct ir_raw_event ev;
David Härdemanc2163692010-06-13 17:29:36 -030033 struct ir_raw_handler *handler;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030034 struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
David Härdeman724e2492010-04-08 13:10:00 -030035
Heiner Kallweit74d47d72016-08-02 02:44:07 -030036 while (1) {
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030037 mutex_lock(&ir_raw_handler_lock);
Heiner Kallweit74d47d72016-08-02 02:44:07 -030038 while (kfifo_out(&raw->kfifo, &ev, 1)) {
39 list_for_each_entry(handler, &ir_raw_handler_list, list)
40 if (raw->dev->enabled_protocols &
41 handler->protocols || !handler->protocols)
42 handler->decode(raw->dev, ev);
43 raw->prev_ev = ev;
44 }
Maxim Levitskyc6ef1e72010-09-06 18:26:06 -030045 mutex_unlock(&ir_raw_handler_lock);
Heiner Kallweit74d47d72016-08-02 02:44:07 -030046
47 set_current_state(TASK_INTERRUPTIBLE);
48
49 if (kthread_should_stop()) {
50 __set_current_state(TASK_RUNNING);
51 break;
52 } else if (!kfifo_is_empty(&raw->kfifo))
53 set_current_state(TASK_RUNNING);
54
55 schedule();
David Härdemanc2163692010-06-13 17:29:36 -030056 }
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -030057
58 return 0;
David Härdeman724e2492010-04-08 13:10:00 -030059}
60
David Härdeman724e2492010-04-08 13:10:00 -030061/**
62 * ir_raw_event_store() - pass a pulse/space duration to the raw ir decoders
David Härdemand8b4b582010-10-29 16:08:23 -030063 * @dev: the struct rc_dev device descriptor
David Härdemane40b1122010-04-15 18:46:00 -030064 * @ev: the struct ir_raw_event descriptor of the pulse/space
David Härdeman724e2492010-04-08 13:10:00 -030065 *
66 * This routine (which may be called from an interrupt context) stores a
67 * pulse/space duration for the raw ir decoding state machines. Pulses are
68 * signalled as positive values and spaces as negative values. A zero value
69 * will reset the decoding state machines.
70 */
David Härdemand8b4b582010-10-29 16:08:23 -030071int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030072{
David Härdemand8b4b582010-10-29 16:08:23 -030073 if (!dev->raw)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030074 return -EINVAL;
75
Mauro Carvalho Chehab74c47922010-10-20 11:56:50 -030076 IR_dprintk(2, "sample: (%05dus %s)\n",
David Härdemand8b4b582010-10-29 16:08:23 -030077 TO_US(ev->duration), TO_STR(ev->pulse));
Maxim Levitsky510fcb72010-07-31 11:59:15 -030078
Heiner Kallweit464254e2015-11-27 20:02:38 -020079 if (!kfifo_put(&dev->raw->kfifo, *ev)) {
80 dev_err(&dev->dev, "IR event FIFO is full!\n");
81 return -ENOSPC;
82 }
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030083
David Härdeman724e2492010-04-08 13:10:00 -030084 return 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -030085}
86EXPORT_SYMBOL_GPL(ir_raw_event_store);
87
David Härdeman724e2492010-04-08 13:10:00 -030088/**
89 * ir_raw_event_store_edge() - notify raw ir decoders of the start of a pulse/space
David Härdemand8b4b582010-10-29 16:08:23 -030090 * @dev: the struct rc_dev device descriptor
David Härdeman724e2492010-04-08 13:10:00 -030091 * @type: the type of the event that has occurred
92 *
93 * This routine (which may be called from an interrupt context) is used to
94 * store the beginning of an ir pulse or space (or the start/end of ir
95 * reception) for the raw ir decoding state machines. This is used by
96 * hardware which does not provide durations directly but only interrupts
97 * (or similar events) on state change.
98 */
David Härdemand8b4b582010-10-29 16:08:23 -030099int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300100{
David Härdeman724e2492010-04-08 13:10:00 -0300101 ktime_t now;
102 s64 delta; /* ns */
Mauro Carvalho Chehab83587832011-01-20 18:16:50 -0300103 DEFINE_IR_RAW_EVENT(ev);
David Härdeman724e2492010-04-08 13:10:00 -0300104 int rc = 0;
Jarod Wilson3f5c4c72011-06-16 16:18:37 -0300105 int delay;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300106
David Härdemand8b4b582010-10-29 16:08:23 -0300107 if (!dev->raw)
David Härdeman724e2492010-04-08 13:10:00 -0300108 return -EINVAL;
109
110 now = ktime_get();
David Härdemand8b4b582010-10-29 16:08:23 -0300111 delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
Jarod Wilson3f5c4c72011-06-16 16:18:37 -0300112 delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
David Härdeman724e2492010-04-08 13:10:00 -0300113
114 /* Check for a long duration since last event or if we're
115 * being called for the first time, note that delta can't
116 * possibly be negative.
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300117 */
Jarod Wilson3f5c4c72011-06-16 16:18:37 -0300118 if (delta > delay || !dev->raw->last_type)
David Härdeman724e2492010-04-08 13:10:00 -0300119 type |= IR_START_EVENT;
David Härdemane40b1122010-04-15 18:46:00 -0300120 else
121 ev.duration = delta;
David Härdeman724e2492010-04-08 13:10:00 -0300122
123 if (type & IR_START_EVENT)
David Härdemand8b4b582010-10-29 16:08:23 -0300124 ir_raw_event_reset(dev);
125 else if (dev->raw->last_type & IR_SPACE) {
David Härdemane40b1122010-04-15 18:46:00 -0300126 ev.pulse = false;
David Härdemand8b4b582010-10-29 16:08:23 -0300127 rc = ir_raw_event_store(dev, &ev);
128 } else if (dev->raw->last_type & IR_PULSE) {
David Härdemane40b1122010-04-15 18:46:00 -0300129 ev.pulse = true;
David Härdemand8b4b582010-10-29 16:08:23 -0300130 rc = ir_raw_event_store(dev, &ev);
David Härdemane40b1122010-04-15 18:46:00 -0300131 } else
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300132 return 0;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300133
David Härdemand8b4b582010-10-29 16:08:23 -0300134 dev->raw->last_event = now;
135 dev->raw->last_type = type;
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300136 return rc;
137}
David Härdeman724e2492010-04-08 13:10:00 -0300138EXPORT_SYMBOL_GPL(ir_raw_event_store_edge);
139
140/**
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300141 * ir_raw_event_store_with_filter() - pass next pulse/space to decoders with some processing
David Härdemand8b4b582010-10-29 16:08:23 -0300142 * @dev: the struct rc_dev device descriptor
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300143 * @type: the type of the event that has occurred
144 *
145 * This routine (which may be called from an interrupt context) works
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300146 * in similar manner to ir_raw_event_store_edge.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300147 * This routine is intended for devices with limited internal buffer
Sean Youngb83bfd12012-08-13 08:59:47 -0300148 * It automerges samples of same type, and handles timeouts. Returns non-zero
149 * if the event was added, and zero if the event was ignored due to idle
150 * processing.
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300151 */
David Härdemand8b4b582010-10-29 16:08:23 -0300152int ir_raw_event_store_with_filter(struct rc_dev *dev, struct ir_raw_event *ev)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300153{
David Härdemand8b4b582010-10-29 16:08:23 -0300154 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300155 return -EINVAL;
156
157 /* Ignore spaces in idle mode */
David Härdemand8b4b582010-10-29 16:08:23 -0300158 if (dev->idle && !ev->pulse)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300159 return 0;
David Härdemand8b4b582010-10-29 16:08:23 -0300160 else if (dev->idle)
161 ir_raw_event_set_idle(dev, false);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300162
David Härdemand8b4b582010-10-29 16:08:23 -0300163 if (!dev->raw->this_ev.duration)
164 dev->raw->this_ev = *ev;
165 else if (ev->pulse == dev->raw->this_ev.pulse)
166 dev->raw->this_ev.duration += ev->duration;
167 else {
168 ir_raw_event_store(dev, &dev->raw->this_ev);
169 dev->raw->this_ev = *ev;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300170 }
171
172 /* Enter idle mode if nessesary */
David Härdemand8b4b582010-10-29 16:08:23 -0300173 if (!ev->pulse && dev->timeout &&
174 dev->raw->this_ev.duration >= dev->timeout)
175 ir_raw_event_set_idle(dev, true);
176
Sean Youngb83bfd12012-08-13 08:59:47 -0300177 return 1;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300178}
179EXPORT_SYMBOL_GPL(ir_raw_event_store_with_filter);
180
Maxim Levitsky46519182010-10-16 19:56:28 -0300181/**
David Härdemand8b4b582010-10-29 16:08:23 -0300182 * ir_raw_event_set_idle() - provide hint to rc-core when the device is idle or not
183 * @dev: the struct rc_dev device descriptor
184 * @idle: whether the device is idle or not
Maxim Levitsky46519182010-10-16 19:56:28 -0300185 */
David Härdemand8b4b582010-10-29 16:08:23 -0300186void ir_raw_event_set_idle(struct rc_dev *dev, bool idle)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300187{
David Härdemand8b4b582010-10-29 16:08:23 -0300188 if (!dev->raw)
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300189 return;
190
Maxim Levitsky46519182010-10-16 19:56:28 -0300191 IR_dprintk(2, "%s idle mode\n", idle ? "enter" : "leave");
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300192
193 if (idle) {
David Härdemand8b4b582010-10-29 16:08:23 -0300194 dev->raw->this_ev.timeout = true;
195 ir_raw_event_store(dev, &dev->raw->this_ev);
196 init_ir_raw_event(&dev->raw->this_ev);
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300197 }
Maxim Levitsky46519182010-10-16 19:56:28 -0300198
David Härdemand8b4b582010-10-29 16:08:23 -0300199 if (dev->s_idle)
200 dev->s_idle(dev, idle);
201
202 dev->idle = idle;
Maxim Levitsky4a702eb2010-07-31 11:59:22 -0300203}
204EXPORT_SYMBOL_GPL(ir_raw_event_set_idle);
205
206/**
David Härdeman724e2492010-04-08 13:10:00 -0300207 * ir_raw_event_handle() - schedules the decoding of stored ir data
David Härdemand8b4b582010-10-29 16:08:23 -0300208 * @dev: the struct rc_dev device descriptor
David Härdeman724e2492010-04-08 13:10:00 -0300209 *
David Härdemand8b4b582010-10-29 16:08:23 -0300210 * This routine will tell rc-core to start decoding stored ir data.
David Härdeman724e2492010-04-08 13:10:00 -0300211 */
David Härdemand8b4b582010-10-29 16:08:23 -0300212void ir_raw_event_handle(struct rc_dev *dev)
David Härdeman724e2492010-04-08 13:10:00 -0300213{
Sean Young963761a2017-05-24 06:24:51 -0300214 if (!dev->raw || !dev->raw->thread)
David Härdeman724e2492010-04-08 13:10:00 -0300215 return;
216
David Härdemand8b4b582010-10-29 16:08:23 -0300217 wake_up_process(dev->raw->thread);
David Härdeman724e2492010-04-08 13:10:00 -0300218}
Mauro Carvalho Chehaba3572c32010-03-20 20:59:44 -0300219EXPORT_SYMBOL_GPL(ir_raw_event_handle);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300220
David Härdeman667c9eb2010-06-13 17:29:31 -0300221/* used internally by the sysfs interface */
222u64
Randy Dunlap2dbd61b2011-01-09 00:53:53 -0300223ir_raw_get_allowed_protocols(void)
David Härdeman667c9eb2010-06-13 17:29:31 -0300224{
Heiner Kallweit37e90a22016-09-27 16:48:47 -0300225 return atomic64_read(&available_protocols);
David Härdeman667c9eb2010-06-13 17:29:31 -0300226}
227
David Härdemanda6e1622014-04-03 20:32:16 -0300228static int change_protocol(struct rc_dev *dev, u64 *rc_type)
229{
230 /* the caller will update dev->enabled_protocols */
231 return 0;
232}
233
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200234static void ir_raw_disable_protocols(struct rc_dev *dev, u64 protocols)
235{
236 mutex_lock(&dev->lock);
237 dev->enabled_protocols &= ~protocols;
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200238 mutex_unlock(&dev->lock);
239}
240
James Hogan3875233d2015-03-31 14:48:06 -0300241/**
Antti Seppälä844a4f42015-03-31 14:48:07 -0300242 * ir_raw_gen_manchester() - Encode data with Manchester (bi-phase) modulation.
243 * @ev: Pointer to pointer to next free event. *@ev is incremented for
244 * each raw event filled.
245 * @max: Maximum number of raw events to fill.
246 * @timings: Manchester modulation timings.
247 * @n: Number of bits of data.
248 * @data: Data bits to encode.
249 *
250 * Encodes the @n least significant bits of @data using Manchester (bi-phase)
251 * modulation with the timing characteristics described by @timings, writing up
252 * to @max raw IR events using the *@ev pointer.
253 *
254 * Returns: 0 on success.
255 * -ENOBUFS if there isn't enough space in the array to fit the
256 * full encoded data. In this case all @max events will have been
257 * written.
258 */
259int ir_raw_gen_manchester(struct ir_raw_event **ev, unsigned int max,
260 const struct ir_raw_timings_manchester *timings,
Sean Youngb73bc162017-02-11 20:33:38 -0200261 unsigned int n, u64 data)
Antti Seppälä844a4f42015-03-31 14:48:07 -0300262{
263 bool need_pulse;
Sean Youngb73bc162017-02-11 20:33:38 -0200264 u64 i;
Antti Seppälä844a4f42015-03-31 14:48:07 -0300265 int ret = -ENOBUFS;
266
Sean Youngb73bc162017-02-11 20:33:38 -0200267 i = BIT_ULL(n - 1);
Antti Seppälä844a4f42015-03-31 14:48:07 -0300268
269 if (timings->leader) {
270 if (!max--)
271 return ret;
272 if (timings->pulse_space_start) {
273 init_ir_raw_event_duration((*ev)++, 1, timings->leader);
274
275 if (!max--)
276 return ret;
277 init_ir_raw_event_duration((*ev), 0, timings->leader);
278 } else {
279 init_ir_raw_event_duration((*ev), 1, timings->leader);
280 }
281 i >>= 1;
282 } else {
283 /* continue existing signal */
284 --(*ev);
285 }
286 /* from here on *ev will point to the last event rather than the next */
287
288 while (n && i > 0) {
289 need_pulse = !(data & i);
290 if (timings->invert)
291 need_pulse = !need_pulse;
292 if (need_pulse == !!(*ev)->pulse) {
293 (*ev)->duration += timings->clock;
294 } else {
295 if (!max--)
296 goto nobufs;
297 init_ir_raw_event_duration(++(*ev), need_pulse,
298 timings->clock);
299 }
300
301 if (!max--)
302 goto nobufs;
303 init_ir_raw_event_duration(++(*ev), !need_pulse,
304 timings->clock);
305 i >>= 1;
306 }
307
308 if (timings->trailer_space) {
309 if (!(*ev)->pulse)
310 (*ev)->duration += timings->trailer_space;
311 else if (!max--)
312 goto nobufs;
313 else
314 init_ir_raw_event_duration(++(*ev), 0,
315 timings->trailer_space);
316 }
317
318 ret = 0;
319nobufs:
320 /* point to the next event rather than last event before returning */
321 ++(*ev);
322 return ret;
323}
324EXPORT_SYMBOL(ir_raw_gen_manchester);
325
326/**
James Hogancaec0982014-03-14 20:04:12 -0300327 * ir_raw_gen_pd() - Encode data to raw events with pulse-distance modulation.
328 * @ev: Pointer to pointer to next free event. *@ev is incremented for
329 * each raw event filled.
330 * @max: Maximum number of raw events to fill.
331 * @timings: Pulse distance modulation timings.
332 * @n: Number of bits of data.
333 * @data: Data bits to encode.
334 *
335 * Encodes the @n least significant bits of @data using pulse-distance
336 * modulation with the timing characteristics described by @timings, writing up
337 * to @max raw IR events using the *@ev pointer.
338 *
339 * Returns: 0 on success.
340 * -ENOBUFS if there isn't enough space in the array to fit the
341 * full encoded data. In this case all @max events will have been
342 * written.
343 */
344int ir_raw_gen_pd(struct ir_raw_event **ev, unsigned int max,
345 const struct ir_raw_timings_pd *timings,
346 unsigned int n, u64 data)
347{
348 int i;
349 int ret;
350 unsigned int space;
351
352 if (timings->header_pulse) {
353 ret = ir_raw_gen_pulse_space(ev, &max, timings->header_pulse,
354 timings->header_space);
355 if (ret)
356 return ret;
357 }
358
359 if (timings->msb_first) {
360 for (i = n - 1; i >= 0; --i) {
361 space = timings->bit_space[(data >> i) & 1];
362 ret = ir_raw_gen_pulse_space(ev, &max,
363 timings->bit_pulse,
364 space);
365 if (ret)
366 return ret;
367 }
368 } else {
369 for (i = 0; i < n; ++i, data >>= 1) {
370 space = timings->bit_space[data & 1];
371 ret = ir_raw_gen_pulse_space(ev, &max,
372 timings->bit_pulse,
373 space);
374 if (ret)
375 return ret;
376 }
377 }
378
379 ret = ir_raw_gen_pulse_space(ev, &max, timings->trailer_pulse,
380 timings->trailer_space);
381 return ret;
382}
383EXPORT_SYMBOL(ir_raw_gen_pd);
384
385/**
Sean Young103293b2016-12-06 18:33:57 -0200386 * ir_raw_gen_pl() - Encode data to raw events with pulse-length modulation.
387 * @ev: Pointer to pointer to next free event. *@ev is incremented for
388 * each raw event filled.
389 * @max: Maximum number of raw events to fill.
390 * @timings: Pulse distance modulation timings.
391 * @n: Number of bits of data.
392 * @data: Data bits to encode.
393 *
394 * Encodes the @n least significant bits of @data using space-distance
395 * modulation with the timing characteristics described by @timings, writing up
396 * to @max raw IR events using the *@ev pointer.
397 *
398 * Returns: 0 on success.
399 * -ENOBUFS if there isn't enough space in the array to fit the
400 * full encoded data. In this case all @max events will have been
401 * written.
402 */
403int ir_raw_gen_pl(struct ir_raw_event **ev, unsigned int max,
404 const struct ir_raw_timings_pl *timings,
405 unsigned int n, u64 data)
406{
407 int i;
408 int ret = -ENOBUFS;
409 unsigned int pulse;
410
411 if (!max--)
412 return ret;
413
414 init_ir_raw_event_duration((*ev)++, 1, timings->header_pulse);
415
416 if (timings->msb_first) {
417 for (i = n - 1; i >= 0; --i) {
418 if (!max--)
419 return ret;
420 init_ir_raw_event_duration((*ev)++, 0,
421 timings->bit_space);
422 if (!max--)
423 return ret;
424 pulse = timings->bit_pulse[(data >> i) & 1];
425 init_ir_raw_event_duration((*ev)++, 1, pulse);
426 }
427 } else {
428 for (i = 0; i < n; ++i, data >>= 1) {
429 if (!max--)
430 return ret;
431 init_ir_raw_event_duration((*ev)++, 0,
432 timings->bit_space);
433 if (!max--)
434 return ret;
435 pulse = timings->bit_pulse[data & 1];
436 init_ir_raw_event_duration((*ev)++, 1, pulse);
437 }
438 }
439
440 if (!max--)
441 return ret;
442
443 init_ir_raw_event_duration((*ev)++, 0, timings->trailer_space);
444
445 return 0;
446}
447EXPORT_SYMBOL(ir_raw_gen_pl);
448
449/**
James Hogan3875233d2015-03-31 14:48:06 -0300450 * ir_raw_encode_scancode() - Encode a scancode as raw events
451 *
452 * @protocol: protocol
453 * @scancode: scancode filter describing a single scancode
454 * @events: array of raw events to write into
455 * @max: max number of raw events
456 *
457 * Attempts to encode the scancode as raw events.
458 *
459 * Returns: The number of events written.
460 * -ENOBUFS if there isn't enough space in the array to fit the
461 * encoding. In this case all @max events will have been written.
462 * -EINVAL if the scancode is ambiguous or invalid, or if no
463 * compatible encoder was found.
464 */
465int ir_raw_encode_scancode(enum rc_type protocol, u32 scancode,
466 struct ir_raw_event *events, unsigned int max)
467{
468 struct ir_raw_handler *handler;
469 int ret = -EINVAL;
470 u64 mask = 1ULL << protocol;
471
472 mutex_lock(&ir_raw_handler_lock);
473 list_for_each_entry(handler, &ir_raw_handler_list, list) {
474 if (handler->protocols & mask && handler->encode) {
475 ret = handler->encode(protocol, scancode, events, max);
476 if (ret >= 0 || ret == -ENOBUFS)
477 break;
478 }
479 }
480 mutex_unlock(&ir_raw_handler_lock);
481
482 return ret;
483}
484EXPORT_SYMBOL(ir_raw_encode_scancode);
485
David Härdeman667c9eb2010-06-13 17:29:31 -0300486/*
487 * Used to (un)register raw event clients
488 */
David Härdemanf56928a2017-05-03 07:04:00 -0300489int ir_raw_event_prepare(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300490{
David Härdemanf56928a2017-05-03 07:04:00 -0300491 static bool raw_init; /* 'false' default value, raw decoders loaded? */
David Härdeman667c9eb2010-06-13 17:29:31 -0300492
David Härdemand8b4b582010-10-29 16:08:23 -0300493 if (!dev)
494 return -EINVAL;
495
David Härdemanf56928a2017-05-03 07:04:00 -0300496 if (!raw_init) {
497 request_module("ir-lirc-codec");
498 raw_init = true;
499 }
500
David Härdemand8b4b582010-10-29 16:08:23 -0300501 dev->raw = kzalloc(sizeof(*dev->raw), GFP_KERNEL);
502 if (!dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300503 return -ENOMEM;
504
David Härdemand8b4b582010-10-29 16:08:23 -0300505 dev->raw->dev = dev;
David Härdemanda6e1622014-04-03 20:32:16 -0300506 dev->change_protocol = change_protocol;
Heiner Kallweit464254e2015-11-27 20:02:38 -0200507 INIT_KFIFO(dev->raw->kfifo);
David Härdeman667c9eb2010-06-13 17:29:31 -0300508
David Härdemanf56928a2017-05-03 07:04:00 -0300509 return 0;
510}
511
512int ir_raw_event_register(struct rc_dev *dev)
513{
514 struct ir_raw_handler *handler;
515 struct task_struct *thread;
516
Andi Shytid5083672016-12-16 04:12:16 -0200517 /*
518 * raw transmitters do not need any event registration
519 * because the event is coming from userspace
520 */
521 if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
Sean Young963761a2017-05-24 06:24:51 -0300522 thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u",
523 dev->minor);
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300524
David Härdemanf56928a2017-05-03 07:04:00 -0300525 if (IS_ERR(thread))
526 return PTR_ERR(thread);
Sean Young963761a2017-05-24 06:24:51 -0300527
528 dev->raw->thread = thread;
Maxim Levitsky0d2cb1d2010-07-31 11:59:17 -0300529 }
530
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300531 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300532 list_add_tail(&dev->raw->list, &ir_raw_client_list);
David Härdemanc2163692010-06-13 17:29:36 -0300533 list_for_each_entry(handler, &ir_raw_handler_list, list)
534 if (handler->raw_register)
David Härdemand8b4b582010-10-29 16:08:23 -0300535 handler->raw_register(dev);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300536 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300537
David Härdemanc2163692010-06-13 17:29:36 -0300538 return 0;
David Härdemanf56928a2017-05-03 07:04:00 -0300539}
David Härdemand8b4b582010-10-29 16:08:23 -0300540
David Härdemanf56928a2017-05-03 07:04:00 -0300541void ir_raw_event_free(struct rc_dev *dev)
542{
543 if (!dev)
544 return;
545
David Härdemand8b4b582010-10-29 16:08:23 -0300546 kfree(dev->raw);
547 dev->raw = NULL;
David Härdeman667c9eb2010-06-13 17:29:31 -0300548}
549
David Härdemand8b4b582010-10-29 16:08:23 -0300550void ir_raw_event_unregister(struct rc_dev *dev)
David Härdeman667c9eb2010-06-13 17:29:31 -0300551{
David Härdemanc2163692010-06-13 17:29:36 -0300552 struct ir_raw_handler *handler;
David Härdeman667c9eb2010-06-13 17:29:31 -0300553
David Härdemand8b4b582010-10-29 16:08:23 -0300554 if (!dev || !dev->raw)
David Härdeman667c9eb2010-06-13 17:29:31 -0300555 return;
556
David Härdemand8b4b582010-10-29 16:08:23 -0300557 kthread_stop(dev->raw->thread);
David Härdemanc2163692010-06-13 17:29:36 -0300558
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300559 mutex_lock(&ir_raw_handler_lock);
David Härdemand8b4b582010-10-29 16:08:23 -0300560 list_del(&dev->raw->list);
David Härdemanc2163692010-06-13 17:29:36 -0300561 list_for_each_entry(handler, &ir_raw_handler_list, list)
562 if (handler->raw_unregister)
David Härdemand8b4b582010-10-29 16:08:23 -0300563 handler->raw_unregister(dev);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300564 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300565
David Härdemanf56928a2017-05-03 07:04:00 -0300566 ir_raw_event_free(dev);
David Härdeman667c9eb2010-06-13 17:29:31 -0300567}
568
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300569/*
570 * Extension interface - used to register the IR decoders
571 */
572
573int ir_raw_handler_register(struct ir_raw_handler *ir_raw_handler)
574{
David Härdemanc2163692010-06-13 17:29:36 -0300575 struct ir_raw_event_ctrl *raw;
576
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300577 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300578 list_add_tail(&ir_raw_handler->list, &ir_raw_handler_list);
David Härdemanc2163692010-06-13 17:29:36 -0300579 if (ir_raw_handler->raw_register)
580 list_for_each_entry(raw, &ir_raw_client_list, list)
David Härdemand8b4b582010-10-29 16:08:23 -0300581 ir_raw_handler->raw_register(raw->dev);
Heiner Kallweit37e90a22016-09-27 16:48:47 -0300582 atomic64_or(ir_raw_handler->protocols, &available_protocols);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300583 mutex_unlock(&ir_raw_handler_lock);
David Härdeman667c9eb2010-06-13 17:29:31 -0300584
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300585 return 0;
586}
587EXPORT_SYMBOL(ir_raw_handler_register);
588
589void ir_raw_handler_unregister(struct ir_raw_handler *ir_raw_handler)
590{
David Härdemanc2163692010-06-13 17:29:36 -0300591 struct ir_raw_event_ctrl *raw;
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200592 u64 protocols = ir_raw_handler->protocols;
David Härdemanc2163692010-06-13 17:29:36 -0300593
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300594 mutex_lock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300595 list_del(&ir_raw_handler->list);
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200596 list_for_each_entry(raw, &ir_raw_client_list, list) {
597 ir_raw_disable_protocols(raw->dev, protocols);
598 if (ir_raw_handler->raw_unregister)
David Härdemand8b4b582010-10-29 16:08:23 -0300599 ir_raw_handler->raw_unregister(raw->dev);
Heiner Kallweit93cffffc2015-11-16 17:51:56 -0200600 }
Heiner Kallweit37e90a22016-09-27 16:48:47 -0300601 atomic64_andnot(protocols, &available_protocols);
Maxim Levitsky45a568f2010-07-31 11:59:16 -0300602 mutex_unlock(&ir_raw_handler_lock);
Mauro Carvalho Chehab995187b2010-03-24 20:47:53 -0300603}
604EXPORT_SYMBOL(ir_raw_handler_unregister);