blob: 1bca6d49ec968bbe32cc75248f07ea6fa59265df [file] [log] [blame]
Johannes Berg19d337d2009-06-02 13:01:37 +02001/*
2 * Input layer to RF Kill interface connector
3 *
4 * Copyright (c) 2007 Dmitry Torokhov
5 * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
10 *
11 * If you ever run into a situation in which you have a SW_ type rfkill
12 * input device, then you can revive code that was removed in the patch
13 * "rfkill-input: remove unused code".
14 */
15
16#include <linux/input.h>
17#include <linux/slab.h>
18#include <linux/workqueue.h>
19#include <linux/init.h>
20#include <linux/rfkill.h>
21#include <linux/sched.h>
22
23#include "rfkill.h"
24
25enum rfkill_input_master_mode {
26 RFKILL_INPUT_MASTER_UNLOCK = 0,
27 RFKILL_INPUT_MASTER_RESTORE = 1,
28 RFKILL_INPUT_MASTER_UNBLOCKALL = 2,
29 NUM_RFKILL_INPUT_MASTER_MODES
30};
31
32/* Delay (in ms) between consecutive switch ops */
33#define RFKILL_OPS_DELAY 200
34
35static enum rfkill_input_master_mode rfkill_master_switch_mode =
36 RFKILL_INPUT_MASTER_UNBLOCKALL;
37module_param_named(master_switch_mode, rfkill_master_switch_mode, uint, 0);
38MODULE_PARM_DESC(master_switch_mode,
39 "SW_RFKILL_ALL ON should: 0=do nothing (only unlock); 1=restore; 2=unblock all");
40
41static spinlock_t rfkill_op_lock;
42static bool rfkill_op_pending;
43static unsigned long rfkill_sw_pending[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
44static unsigned long rfkill_sw_state[BITS_TO_LONGS(NUM_RFKILL_TYPES)];
45
46enum rfkill_sched_op {
47 RFKILL_GLOBAL_OP_EPO = 0,
48 RFKILL_GLOBAL_OP_RESTORE,
49 RFKILL_GLOBAL_OP_UNLOCK,
50 RFKILL_GLOBAL_OP_UNBLOCK,
51};
52
53static enum rfkill_sched_op rfkill_master_switch_op;
54static enum rfkill_sched_op rfkill_op;
55
56static void __rfkill_handle_global_op(enum rfkill_sched_op op)
57{
58 unsigned int i;
59
60 switch (op) {
61 case RFKILL_GLOBAL_OP_EPO:
62 rfkill_epo();
63 break;
64 case RFKILL_GLOBAL_OP_RESTORE:
65 rfkill_restore_states();
66 break;
67 case RFKILL_GLOBAL_OP_UNLOCK:
68 rfkill_remove_epo_lock();
69 break;
70 case RFKILL_GLOBAL_OP_UNBLOCK:
71 rfkill_remove_epo_lock();
72 for (i = 0; i < NUM_RFKILL_TYPES; i++)
73 rfkill_switch_all(i, false);
74 break;
75 default:
76 /* memory corruption or bug, fail safely */
77 rfkill_epo();
78 WARN(1, "Unknown requested operation %d! "
79 "rfkill Emergency Power Off activated\n",
80 op);
81 }
82}
83
84static void __rfkill_handle_normal_op(const enum rfkill_type type,
85 const bool complement)
86{
87 bool blocked;
88
89 blocked = rfkill_get_global_sw_state(type);
90 if (complement)
91 blocked = !blocked;
92
93 rfkill_switch_all(type, blocked);
94}
95
96static void rfkill_op_handler(struct work_struct *work)
97{
98 unsigned int i;
99 bool c;
100
101 spin_lock_irq(&rfkill_op_lock);
102 do {
103 if (rfkill_op_pending) {
104 enum rfkill_sched_op op = rfkill_op;
105 rfkill_op_pending = false;
106 memset(rfkill_sw_pending, 0,
107 sizeof(rfkill_sw_pending));
108 spin_unlock_irq(&rfkill_op_lock);
109
110 __rfkill_handle_global_op(op);
111
112 spin_lock_irq(&rfkill_op_lock);
113
114 /*
115 * handle global ops first -- during unlocked period
116 * we might have gotten a new global op.
117 */
118 if (rfkill_op_pending)
119 continue;
120 }
121
122 if (rfkill_is_epo_lock_active())
123 continue;
124
125 for (i = 0; i < NUM_RFKILL_TYPES; i++) {
126 if (__test_and_clear_bit(i, rfkill_sw_pending)) {
127 c = __test_and_clear_bit(i, rfkill_sw_state);
128 spin_unlock_irq(&rfkill_op_lock);
129
130 __rfkill_handle_normal_op(i, c);
131
132 spin_lock_irq(&rfkill_op_lock);
133 }
134 }
135 } while (rfkill_op_pending);
136 spin_unlock_irq(&rfkill_op_lock);
137}
138
139static DECLARE_DELAYED_WORK(rfkill_op_work, rfkill_op_handler);
140static unsigned long rfkill_last_scheduled;
141
142static unsigned long rfkill_ratelimit(const unsigned long last)
143{
144 const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
Eric Dumazeta02cec22010-09-22 20:43:57 +0000145 return time_after(jiffies, last + delay) ? 0 : delay;
Johannes Berg19d337d2009-06-02 13:01:37 +0200146}
147
148static void rfkill_schedule_ratelimited(void)
149{
150 if (delayed_work_pending(&rfkill_op_work))
151 return;
152 schedule_delayed_work(&rfkill_op_work,
153 rfkill_ratelimit(rfkill_last_scheduled));
154 rfkill_last_scheduled = jiffies;
155}
156
157static void rfkill_schedule_global_op(enum rfkill_sched_op op)
158{
159 unsigned long flags;
160
161 spin_lock_irqsave(&rfkill_op_lock, flags);
162 rfkill_op = op;
163 rfkill_op_pending = true;
164 if (op == RFKILL_GLOBAL_OP_EPO && !rfkill_is_epo_lock_active()) {
165 /* bypass the limiter for EPO */
166 cancel_delayed_work(&rfkill_op_work);
167 schedule_delayed_work(&rfkill_op_work, 0);
168 rfkill_last_scheduled = jiffies;
169 } else
170 rfkill_schedule_ratelimited();
171 spin_unlock_irqrestore(&rfkill_op_lock, flags);
172}
173
174static void rfkill_schedule_toggle(enum rfkill_type type)
175{
176 unsigned long flags;
177
178 if (rfkill_is_epo_lock_active())
179 return;
180
181 spin_lock_irqsave(&rfkill_op_lock, flags);
182 if (!rfkill_op_pending) {
183 __set_bit(type, rfkill_sw_pending);
184 __change_bit(type, rfkill_sw_state);
185 rfkill_schedule_ratelimited();
186 }
187 spin_unlock_irqrestore(&rfkill_op_lock, flags);
188}
189
190static void rfkill_schedule_evsw_rfkillall(int state)
191{
192 if (state)
193 rfkill_schedule_global_op(rfkill_master_switch_op);
194 else
195 rfkill_schedule_global_op(RFKILL_GLOBAL_OP_EPO);
196}
197
198static void rfkill_event(struct input_handle *handle, unsigned int type,
199 unsigned int code, int data)
200{
201 if (type == EV_KEY && data == 1) {
202 switch (code) {
203 case KEY_WLAN:
204 rfkill_schedule_toggle(RFKILL_TYPE_WLAN);
205 break;
206 case KEY_BLUETOOTH:
207 rfkill_schedule_toggle(RFKILL_TYPE_BLUETOOTH);
208 break;
209 case KEY_UWB:
210 rfkill_schedule_toggle(RFKILL_TYPE_UWB);
211 break;
212 case KEY_WIMAX:
213 rfkill_schedule_toggle(RFKILL_TYPE_WIMAX);
214 break;
Matthew Garrett3082a2b2010-02-16 16:36:25 -0500215 case KEY_RFKILL:
216 rfkill_schedule_toggle(RFKILL_TYPE_ALL);
217 break;
Johannes Berg19d337d2009-06-02 13:01:37 +0200218 }
219 } else if (type == EV_SW && code == SW_RFKILL_ALL)
220 rfkill_schedule_evsw_rfkillall(data);
221}
222
223static int rfkill_connect(struct input_handler *handler, struct input_dev *dev,
224 const struct input_device_id *id)
225{
226 struct input_handle *handle;
227 int error;
228
229 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
230 if (!handle)
231 return -ENOMEM;
232
233 handle->dev = dev;
234 handle->handler = handler;
235 handle->name = "rfkill";
236
237 /* causes rfkill_start() to be called */
238 error = input_register_handle(handle);
239 if (error)
240 goto err_free_handle;
241
242 error = input_open_device(handle);
243 if (error)
244 goto err_unregister_handle;
245
246 return 0;
247
248 err_unregister_handle:
249 input_unregister_handle(handle);
250 err_free_handle:
251 kfree(handle);
252 return error;
253}
254
255static void rfkill_start(struct input_handle *handle)
256{
257 /*
258 * Take event_lock to guard against configuration changes, we
259 * should be able to deal with concurrency with rfkill_event()
260 * just fine (which event_lock will also avoid).
261 */
262 spin_lock_irq(&handle->dev->event_lock);
263
264 if (test_bit(EV_SW, handle->dev->evbit) &&
265 test_bit(SW_RFKILL_ALL, handle->dev->swbit))
266 rfkill_schedule_evsw_rfkillall(test_bit(SW_RFKILL_ALL,
267 handle->dev->sw));
268
269 spin_unlock_irq(&handle->dev->event_lock);
270}
271
272static void rfkill_disconnect(struct input_handle *handle)
273{
274 input_close_device(handle);
275 input_unregister_handle(handle);
276 kfree(handle);
277}
278
279static const struct input_device_id rfkill_ids[] = {
280 {
281 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
282 .evbit = { BIT_MASK(EV_KEY) },
283 .keybit = { [BIT_WORD(KEY_WLAN)] = BIT_MASK(KEY_WLAN) },
284 },
285 {
286 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
287 .evbit = { BIT_MASK(EV_KEY) },
288 .keybit = { [BIT_WORD(KEY_BLUETOOTH)] = BIT_MASK(KEY_BLUETOOTH) },
289 },
290 {
291 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
292 .evbit = { BIT_MASK(EV_KEY) },
293 .keybit = { [BIT_WORD(KEY_UWB)] = BIT_MASK(KEY_UWB) },
294 },
295 {
296 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
297 .evbit = { BIT_MASK(EV_KEY) },
298 .keybit = { [BIT_WORD(KEY_WIMAX)] = BIT_MASK(KEY_WIMAX) },
299 },
300 {
Matthew Garrett3082a2b2010-02-16 16:36:25 -0500301 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT,
302 .evbit = { BIT_MASK(EV_KEY) },
303 .keybit = { [BIT_WORD(KEY_RFKILL)] = BIT_MASK(KEY_RFKILL) },
304 },
305 {
Johannes Berg19d337d2009-06-02 13:01:37 +0200306 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_SWBIT,
307 .evbit = { BIT(EV_SW) },
308 .swbit = { [BIT_WORD(SW_RFKILL_ALL)] = BIT_MASK(SW_RFKILL_ALL) },
309 },
310 { }
311};
312
313static struct input_handler rfkill_handler = {
314 .name = "rfkill",
315 .event = rfkill_event,
316 .connect = rfkill_connect,
317 .start = rfkill_start,
318 .disconnect = rfkill_disconnect,
319 .id_table = rfkill_ids,
320};
321
322int __init rfkill_handler_init(void)
323{
324 switch (rfkill_master_switch_mode) {
325 case RFKILL_INPUT_MASTER_UNBLOCKALL:
326 rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNBLOCK;
327 break;
328 case RFKILL_INPUT_MASTER_RESTORE:
329 rfkill_master_switch_op = RFKILL_GLOBAL_OP_RESTORE;
330 break;
331 case RFKILL_INPUT_MASTER_UNLOCK:
332 rfkill_master_switch_op = RFKILL_GLOBAL_OP_UNLOCK;
333 break;
334 default:
335 return -EINVAL;
336 }
337
338 spin_lock_init(&rfkill_op_lock);
339
340 /* Avoid delay at first schedule */
341 rfkill_last_scheduled =
342 jiffies - msecs_to_jiffies(RFKILL_OPS_DELAY) - 1;
343 return input_register_handler(&rfkill_handler);
344}
345
346void __exit rfkill_handler_exit(void)
347{
348 input_unregister_handler(&rfkill_handler);
349 cancel_delayed_work_sync(&rfkill_op_work);
350}