blob: aaf6d468a8b98f9d522f73e81809f5f5f977db9d [file] [log] [blame]
Evgeniy Polyakov7672d0b2005-09-11 19:15:07 -07001/*
2 * connector.c
3 *
4 * 2004-2005 Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
5 * All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/list.h>
25#include <linux/skbuff.h>
26#include <linux/netlink.h>
27#include <linux/moduleparam.h>
28#include <linux/connector.h>
29
30#include <net/sock.h>
31
32MODULE_LICENSE("GPL");
33MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
34MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector.");
35
36static u32 cn_idx = CN_IDX_CONNECTOR;
37static u32 cn_val = CN_VAL_CONNECTOR;
38
39module_param(cn_idx, uint, 0);
40module_param(cn_val, uint, 0);
41MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
42MODULE_PARM_DESC(cn_val, "Connector's main device val.");
43
44static DECLARE_MUTEX(notify_lock);
45static LIST_HEAD(notify_list);
46
47static struct cn_dev cdev;
48
49int cn_already_initialized = 0;
50
51/*
52 * msg->seq and msg->ack are used to determine message genealogy.
53 * When someone sends message it puts there locally unique sequence
54 * and random acknowledge numbers. Sequence number may be copied into
55 * nlmsghdr->nlmsg_seq too.
56 *
57 * Sequence number is incremented with each message to be sent.
58 *
59 * If we expect reply to our message then the sequence number in
60 * received message MUST be the same as in original message, and
61 * acknowledge number MUST be the same + 1.
62 *
63 * If we receive a message and its sequence number is not equal to the
64 * one we are expecting then it is a new message.
65 *
66 * If we receive a message and its sequence number is the same as one
67 * we are expecting but it's acknowledgement number is not equal to
68 * the acknowledgement number in the original message + 1, then it is
69 * a new message.
70 *
71 */
72int cn_netlink_send(struct cn_msg *msg, u32 __group, int gfp_mask)
73{
74 struct cn_callback_entry *__cbq;
75 unsigned int size;
76 struct sk_buff *skb;
77 struct nlmsghdr *nlh;
78 struct cn_msg *data;
79 struct cn_dev *dev = &cdev;
80 u32 group = 0;
81 int found = 0;
82
83 if (!__group) {
84 spin_lock_bh(&dev->cbdev->queue_lock);
85 list_for_each_entry(__cbq, &dev->cbdev->queue_list,
86 callback_entry) {
87 if (cn_cb_equal(&__cbq->cb->id, &msg->id)) {
88 found = 1;
89 group = __cbq->group;
90 }
91 }
92 spin_unlock_bh(&dev->cbdev->queue_lock);
93
94 if (!found)
95 return -ENODEV;
96 } else {
97 group = __group;
98 }
99
100 size = NLMSG_SPACE(sizeof(*msg) + msg->len);
101
102 skb = alloc_skb(size, gfp_mask);
103 if (!skb)
104 return -ENOMEM;
105
106 nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh));
107
108 data = NLMSG_DATA(nlh);
109
110 memcpy(data, msg, sizeof(*data) + msg->len);
111
112 NETLINK_CB(skb).dst_group = group;
113
114 netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
115
116 return 0;
117
118nlmsg_failure:
119 kfree_skb(skb);
120 return -EINVAL;
121}
122
123/*
124 * Callback helper - queues work and setup destructor for given data.
125 */
126static int cn_call_callback(struct cn_msg *msg, void (*destruct_data)(void *), void *data)
127{
128 struct cn_callback_entry *__cbq;
129 struct cn_dev *dev = &cdev;
130 int found = 0;
131
132 spin_lock_bh(&dev->cbdev->queue_lock);
133 list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) {
134 if (cn_cb_equal(&__cbq->cb->id, &msg->id)) {
135 /*
136 * Let's scream if there is some magic and the
137 * data will arrive asynchronously here.
138 * [i.e. netlink messages will be queued].
139 * After the first warning I will fix it
140 * quickly, but now I think it is
141 * impossible. --zbr (2004_04_27).
142 */
143 if (likely(!test_bit(0, &__cbq->work.pending) &&
144 __cbq->ddata == NULL)) {
145 __cbq->cb->priv = msg;
146
147 __cbq->ddata = data;
148 __cbq->destruct_data = destruct_data;
149
150 if (queue_work(dev->cbdev->cn_queue,
151 &__cbq->work))
152 found = 1;
153 } else {
154 printk("%s: cbq->data=%p, "
155 "work->pending=%08lx.\n",
156 __func__, __cbq->ddata,
157 __cbq->work.pending);
158 WARN_ON(1);
159 }
160 break;
161 }
162 }
163 spin_unlock_bh(&dev->cbdev->queue_lock);
164
165 return found ? 0 : -ENODEV;
166}
167
168/*
169 * Skb receive helper - checks skb and msg size and calls callback
170 * helper.
171 */
172static int __cn_rx_skb(struct sk_buff *skb, struct nlmsghdr *nlh)
173{
174 u32 pid, uid, seq, group;
175 struct cn_msg *msg;
176
177 pid = NETLINK_CREDS(skb)->pid;
178 uid = NETLINK_CREDS(skb)->uid;
179 seq = nlh->nlmsg_seq;
180 group = NETLINK_CB((skb)).dst_group;
181 msg = NLMSG_DATA(nlh);
182
183 return cn_call_callback(msg, (void (*)(void *))kfree_skb, skb);
184}
185
186/*
187 * Main netlink receiving function.
188 *
189 * It checks skb and netlink header sizes and calls the skb receive
190 * helper with a shared skb.
191 */
192static void cn_rx_skb(struct sk_buff *__skb)
193{
194 struct nlmsghdr *nlh;
195 u32 len;
196 int err;
197 struct sk_buff *skb;
198
199 skb = skb_get(__skb);
200
201 if (skb->len >= NLMSG_SPACE(0)) {
202 nlh = (struct nlmsghdr *)skb->data;
203
204 if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
205 skb->len < nlh->nlmsg_len ||
206 nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
207 kfree_skb(skb);
208 goto out;
209 }
210
211 len = NLMSG_ALIGN(nlh->nlmsg_len);
212 if (len > skb->len)
213 len = skb->len;
214
215 err = __cn_rx_skb(skb, nlh);
216 if (err < 0)
217 kfree_skb(skb);
218 }
219
220out:
221 kfree_skb(__skb);
222}
223
224/*
225 * Netlink socket input callback - dequeues the skbs and calls the
226 * main netlink receiving function.
227 */
228static void cn_input(struct sock *sk, int len)
229{
230 struct sk_buff *skb;
231
232 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL)
233 cn_rx_skb(skb);
234}
235
236/*
237 * Notification routing.
238 *
239 * Gets id and checks if there are notification request for it's idx
240 * and val. If there are such requests notify the listeners with the
241 * given notify event.
242 *
243 */
244static void cn_notify(struct cb_id *id, u32 notify_event)
245{
246 struct cn_ctl_entry *ent;
247
248 down(&notify_lock);
249 list_for_each_entry(ent, &notify_list, notify_entry) {
250 int i;
251 struct cn_notify_req *req;
252 struct cn_ctl_msg *ctl = ent->msg;
253 int idx_found, val_found;
254
255 idx_found = val_found = 0;
256
257 req = (struct cn_notify_req *)ctl->data;
258 for (i = 0; i < ctl->idx_notify_num; ++i, ++req) {
259 if (id->idx >= req->first &&
260 id->idx < req->first + req->range) {
261 idx_found = 1;
262 break;
263 }
264 }
265
266 for (i = 0; i < ctl->val_notify_num; ++i, ++req) {
267 if (id->val >= req->first &&
268 id->val < req->first + req->range) {
269 val_found = 1;
270 break;
271 }
272 }
273
274 if (idx_found && val_found) {
275 struct cn_msg m = { .ack = notify_event, };
276
277 memcpy(&m.id, id, sizeof(m.id));
278 cn_netlink_send(&m, ctl->group, GFP_KERNEL);
279 }
280 }
281 up(&notify_lock);
282}
283
284/*
285 * Callback add routing - adds callback with given ID and name.
286 * If there is registered callback with the same ID it will not be added.
287 *
288 * May sleep.
289 */
290int cn_add_callback(struct cb_id *id, char *name, void (*callback)(void *))
291{
292 int err;
293 struct cn_dev *dev = &cdev;
294 struct cn_callback *cb;
295
296 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
297 if (!cb)
298 return -ENOMEM;
299
300 scnprintf(cb->name, sizeof(cb->name), "%s", name);
301
302 memcpy(&cb->id, id, sizeof(cb->id));
303 cb->callback = callback;
304
305 err = cn_queue_add_callback(dev->cbdev, cb);
306 if (err) {
307 kfree(cb);
308 return err;
309 }
310
311 cn_notify(id, 0);
312
313 return 0;
314}
315
316/*
317 * Callback remove routing - removes callback
318 * with given ID.
319 * If there is no registered callback with given
320 * ID nothing happens.
321 *
322 * May sleep while waiting for reference counter to become zero.
323 */
324void cn_del_callback(struct cb_id *id)
325{
326 struct cn_dev *dev = &cdev;
327
328 cn_queue_del_callback(dev->cbdev, id);
329 cn_notify(id, 1);
330}
331
332/*
333 * Checks two connector's control messages to be the same.
334 * Returns 1 if they are the same or if the first one is corrupted.
335 */
336static int cn_ctl_msg_equals(struct cn_ctl_msg *m1, struct cn_ctl_msg *m2)
337{
338 int i;
339 struct cn_notify_req *req1, *req2;
340
341 if (m1->idx_notify_num != m2->idx_notify_num)
342 return 0;
343
344 if (m1->val_notify_num != m2->val_notify_num)
345 return 0;
346
347 if (m1->len != m2->len)
348 return 0;
349
350 if ((m1->idx_notify_num + m1->val_notify_num) * sizeof(*req1) !=
351 m1->len)
352 return 1;
353
354 req1 = (struct cn_notify_req *)m1->data;
355 req2 = (struct cn_notify_req *)m2->data;
356
357 for (i = 0; i < m1->idx_notify_num; ++i) {
358 if (req1->first != req2->first || req1->range != req2->range)
359 return 0;
360 req1++;
361 req2++;
362 }
363
364 for (i = 0; i < m1->val_notify_num; ++i) {
365 if (req1->first != req2->first || req1->range != req2->range)
366 return 0;
367 req1++;
368 req2++;
369 }
370
371 return 1;
372}
373
374/*
375 * Main connector device's callback.
376 *
377 * Used for notification of a request's processing.
378 */
379static void cn_callback(void *data)
380{
381 struct cn_msg *msg = data;
382 struct cn_ctl_msg *ctl;
383 struct cn_ctl_entry *ent;
384 u32 size;
385
386 if (msg->len < sizeof(*ctl))
387 return;
388
389 ctl = (struct cn_ctl_msg *)msg->data;
390
391 size = (sizeof(*ctl) + ((ctl->idx_notify_num +
392 ctl->val_notify_num) *
393 sizeof(struct cn_notify_req)));
394
395 if (msg->len != size)
396 return;
397
398 if (ctl->len + sizeof(*ctl) != msg->len)
399 return;
400
401 /*
402 * Remove notification.
403 */
404 if (ctl->group == 0) {
405 struct cn_ctl_entry *n;
406
407 down(&notify_lock);
408 list_for_each_entry_safe(ent, n, &notify_list, notify_entry) {
409 if (cn_ctl_msg_equals(ent->msg, ctl)) {
410 list_del(&ent->notify_entry);
411 kfree(ent);
412 }
413 }
414 up(&notify_lock);
415
416 return;
417 }
418
419 size += sizeof(*ent);
420
421 ent = kzalloc(size, GFP_KERNEL);
422 if (!ent)
423 return;
424
425 ent->msg = (struct cn_ctl_msg *)(ent + 1);
426
427 memcpy(ent->msg, ctl, size - sizeof(*ent));
428
429 down(&notify_lock);
430 list_add(&ent->notify_entry, &notify_list);
431 up(&notify_lock);
432}
433
434static int __init cn_init(void)
435{
436 struct cn_dev *dev = &cdev;
437 int err;
438
439 dev->input = cn_input;
440 dev->id.idx = cn_idx;
441 dev->id.val = cn_val;
442
443 dev->nls = netlink_kernel_create(NETLINK_CONNECTOR,
444 CN_NETLINK_USERS + 0xf,
445 dev->input, THIS_MODULE);
446 if (!dev->nls)
447 return -EIO;
448
449 dev->cbdev = cn_queue_alloc_dev("cqueue", dev->nls);
450 if (!dev->cbdev) {
451 if (dev->nls->sk_socket)
452 sock_release(dev->nls->sk_socket);
453 return -EINVAL;
454 }
455
456 err = cn_add_callback(&dev->id, "connector", &cn_callback);
457 if (err) {
458 cn_queue_free_dev(dev->cbdev);
459 if (dev->nls->sk_socket)
460 sock_release(dev->nls->sk_socket);
461 return -EINVAL;
462 }
463
464 cn_already_initialized = 1;
465
466 return 0;
467}
468
469static void __exit cn_fini(void)
470{
471 struct cn_dev *dev = &cdev;
472
473 cn_already_initialized = 0;
474
475 cn_del_callback(&dev->id);
476 cn_queue_free_dev(dev->cbdev);
477 if (dev->nls->sk_socket)
478 sock_release(dev->nls->sk_socket);
479}
480
481module_init(cn_init);
482module_exit(cn_fini);
483
484EXPORT_SYMBOL_GPL(cn_add_callback);
485EXPORT_SYMBOL_GPL(cn_del_callback);
486EXPORT_SYMBOL_GPL(cn_netlink_send);