blob: 227d53b12a5cc05490687f6e9f3c9e5725b147f2 [file] [log] [blame]
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -07001/******************************************************************************
2 * xenbus_xs.c
3 *
4 * This is the kernel equivalent of the "xs" library. We don't need everything
5 * and we use xenbus_comms for communication.
6 *
7 * Copyright (C) 2005 Rusty Russell, IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#include <linux/unistd.h>
35#include <linux/errno.h>
36#include <linux/types.h>
37#include <linux/uio.h>
38#include <linux/kernel.h>
39#include <linux/string.h>
40#include <linux/err.h>
41#include <linux/slab.h>
42#include <linux/fcntl.h>
43#include <linux/kthread.h>
44#include <linux/rwsem.h>
45#include <linux/module.h>
46#include <linux/mutex.h>
47#include <xen/xenbus.h>
48#include "xenbus_comms.h"
49
50struct xs_stored_msg {
51 struct list_head list;
52
53 struct xsd_sockmsg hdr;
54
55 union {
56 /* Queued replies. */
57 struct {
58 char *body;
59 } reply;
60
61 /* Queued watch events. */
62 struct {
63 struct xenbus_watch *handle;
64 char **vec;
65 unsigned int vec_size;
66 } watch;
67 } u;
68};
69
70struct xs_handle {
71 /* A list of replies. Currently only one will ever be outstanding. */
72 struct list_head reply_list;
73 spinlock_t reply_lock;
74 wait_queue_head_t reply_waitq;
75
76 /*
77 * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex.
78 * response_mutex is never taken simultaneously with the other three.
79 */
80
81 /* One request at a time. */
82 struct mutex request_mutex;
83
84 /* Protect xenbus reader thread against save/restore. */
85 struct mutex response_mutex;
86
87 /* Protect transactions against save/restore. */
88 struct rw_semaphore transaction_mutex;
89
90 /* Protect watch (de)register against save/restore. */
91 struct rw_semaphore watch_mutex;
92};
93
94static struct xs_handle xs_state;
95
96/* List of registered watches, and a lock to protect it. */
97static LIST_HEAD(watches);
98static DEFINE_SPINLOCK(watches_lock);
99
100/* List of pending watch callback events, and a lock to protect it. */
101static LIST_HEAD(watch_events);
102static DEFINE_SPINLOCK(watch_events_lock);
103
104/*
105 * Details of the xenwatch callback kernel thread. The thread waits on the
106 * watch_events_waitq for work to do (queued on watch_events list). When it
107 * wakes up it acquires the xenwatch_mutex before reading the list and
108 * carrying out work.
109 */
110static pid_t xenwatch_pid;
111static DEFINE_MUTEX(xenwatch_mutex);
112static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
113
114static int get_error(const char *errorstring)
115{
116 unsigned int i;
117
118 for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
119 if (i == ARRAY_SIZE(xsd_errors) - 1) {
120 printk(KERN_WARNING
121 "XENBUS xen store gave: unknown error %s",
122 errorstring);
123 return EINVAL;
124 }
125 }
126 return xsd_errors[i].errnum;
127}
128
129static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len)
130{
131 struct xs_stored_msg *msg;
132 char *body;
133
134 spin_lock(&xs_state.reply_lock);
135
136 while (list_empty(&xs_state.reply_list)) {
137 spin_unlock(&xs_state.reply_lock);
138 /* XXX FIXME: Avoid synchronous wait for response here. */
139 wait_event(xs_state.reply_waitq,
140 !list_empty(&xs_state.reply_list));
141 spin_lock(&xs_state.reply_lock);
142 }
143
144 msg = list_entry(xs_state.reply_list.next,
145 struct xs_stored_msg, list);
146 list_del(&msg->list);
147
148 spin_unlock(&xs_state.reply_lock);
149
150 *type = msg->hdr.type;
151 if (len)
152 *len = msg->hdr.len;
153 body = msg->u.reply.body;
154
155 kfree(msg);
156
157 return body;
158}
159
160void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
161{
162 void *ret;
163 struct xsd_sockmsg req_msg = *msg;
164 int err;
165
166 if (req_msg.type == XS_TRANSACTION_START)
167 down_read(&xs_state.transaction_mutex);
168
169 mutex_lock(&xs_state.request_mutex);
170
171 err = xb_write(msg, sizeof(*msg) + msg->len);
172 if (err) {
173 msg->type = XS_ERROR;
174 ret = ERR_PTR(err);
175 } else
176 ret = read_reply(&msg->type, &msg->len);
177
178 mutex_unlock(&xs_state.request_mutex);
179
180 if ((msg->type == XS_TRANSACTION_END) ||
181 ((req_msg.type == XS_TRANSACTION_START) &&
182 (msg->type == XS_ERROR)))
183 up_read(&xs_state.transaction_mutex);
184
185 return ret;
186}
187
188/* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
189static void *xs_talkv(struct xenbus_transaction t,
190 enum xsd_sockmsg_type type,
191 const struct kvec *iovec,
192 unsigned int num_vecs,
193 unsigned int *len)
194{
195 struct xsd_sockmsg msg;
196 void *ret = NULL;
197 unsigned int i;
198 int err;
199
200 msg.tx_id = t.id;
201 msg.req_id = 0;
202 msg.type = type;
203 msg.len = 0;
204 for (i = 0; i < num_vecs; i++)
205 msg.len += iovec[i].iov_len;
206
207 mutex_lock(&xs_state.request_mutex);
208
209 err = xb_write(&msg, sizeof(msg));
210 if (err) {
211 mutex_unlock(&xs_state.request_mutex);
212 return ERR_PTR(err);
213 }
214
215 for (i = 0; i < num_vecs; i++) {
216 err = xb_write(iovec[i].iov_base, iovec[i].iov_len);
217 if (err) {
218 mutex_unlock(&xs_state.request_mutex);
219 return ERR_PTR(err);
220 }
221 }
222
223 ret = read_reply(&msg.type, len);
224
225 mutex_unlock(&xs_state.request_mutex);
226
227 if (IS_ERR(ret))
228 return ret;
229
230 if (msg.type == XS_ERROR) {
231 err = get_error(ret);
232 kfree(ret);
233 return ERR_PTR(-err);
234 }
235
236 if (msg.type != type) {
237 if (printk_ratelimit())
238 printk(KERN_WARNING
239 "XENBUS unexpected type [%d], expected [%d]\n",
240 msg.type, type);
241 kfree(ret);
242 return ERR_PTR(-EINVAL);
243 }
244 return ret;
245}
246
247/* Simplified version of xs_talkv: single message. */
248static void *xs_single(struct xenbus_transaction t,
249 enum xsd_sockmsg_type type,
250 const char *string,
251 unsigned int *len)
252{
253 struct kvec iovec;
254
255 iovec.iov_base = (void *)string;
256 iovec.iov_len = strlen(string) + 1;
257 return xs_talkv(t, type, &iovec, 1, len);
258}
259
260/* Many commands only need an ack, don't care what it says. */
261static int xs_error(char *reply)
262{
263 if (IS_ERR(reply))
264 return PTR_ERR(reply);
265 kfree(reply);
266 return 0;
267}
268
269static unsigned int count_strings(const char *strings, unsigned int len)
270{
271 unsigned int num;
272 const char *p;
273
274 for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
275 num++;
276
277 return num;
278}
279
280/* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
281static char *join(const char *dir, const char *name)
282{
283 char *buffer;
284
285 if (strlen(name) == 0)
286 buffer = kasprintf(GFP_KERNEL, "%s", dir);
287 else
288 buffer = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
289 return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
290}
291
292static char **split(char *strings, unsigned int len, unsigned int *num)
293{
294 char *p, **ret;
295
296 /* Count the strings. */
297 *num = count_strings(strings, len);
298
299 /* Transfer to one big alloc for easy freeing. */
300 ret = kmalloc(*num * sizeof(char *) + len, GFP_KERNEL);
301 if (!ret) {
302 kfree(strings);
303 return ERR_PTR(-ENOMEM);
304 }
305 memcpy(&ret[*num], strings, len);
306 kfree(strings);
307
308 strings = (char *)&ret[*num];
309 for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
310 ret[(*num)++] = p;
311
312 return ret;
313}
314
315char **xenbus_directory(struct xenbus_transaction t,
316 const char *dir, const char *node, unsigned int *num)
317{
318 char *strings, *path;
319 unsigned int len;
320
321 path = join(dir, node);
322 if (IS_ERR(path))
323 return (char **)path;
324
325 strings = xs_single(t, XS_DIRECTORY, path, &len);
326 kfree(path);
327 if (IS_ERR(strings))
328 return (char **)strings;
329
330 return split(strings, len, num);
331}
332EXPORT_SYMBOL_GPL(xenbus_directory);
333
334/* Check if a path exists. Return 1 if it does. */
335int xenbus_exists(struct xenbus_transaction t,
336 const char *dir, const char *node)
337{
338 char **d;
339 int dir_n;
340
341 d = xenbus_directory(t, dir, node, &dir_n);
342 if (IS_ERR(d))
343 return 0;
344 kfree(d);
345 return 1;
346}
347EXPORT_SYMBOL_GPL(xenbus_exists);
348
349/* Get the value of a single file.
350 * Returns a kmalloced value: call free() on it after use.
351 * len indicates length in bytes.
352 */
353void *xenbus_read(struct xenbus_transaction t,
354 const char *dir, const char *node, unsigned int *len)
355{
356 char *path;
357 void *ret;
358
359 path = join(dir, node);
360 if (IS_ERR(path))
361 return (void *)path;
362
363 ret = xs_single(t, XS_READ, path, len);
364 kfree(path);
365 return ret;
366}
367EXPORT_SYMBOL_GPL(xenbus_read);
368
369/* Write the value of a single file.
370 * Returns -err on failure.
371 */
372int xenbus_write(struct xenbus_transaction t,
373 const char *dir, const char *node, const char *string)
374{
375 const char *path;
376 struct kvec iovec[2];
377 int ret;
378
379 path = join(dir, node);
380 if (IS_ERR(path))
381 return PTR_ERR(path);
382
383 iovec[0].iov_base = (void *)path;
384 iovec[0].iov_len = strlen(path) + 1;
385 iovec[1].iov_base = (void *)string;
386 iovec[1].iov_len = strlen(string);
387
388 ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
389 kfree(path);
390 return ret;
391}
392EXPORT_SYMBOL_GPL(xenbus_write);
393
394/* Create a new directory. */
395int xenbus_mkdir(struct xenbus_transaction t,
396 const char *dir, const char *node)
397{
398 char *path;
399 int ret;
400
401 path = join(dir, node);
402 if (IS_ERR(path))
403 return PTR_ERR(path);
404
405 ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
406 kfree(path);
407 return ret;
408}
409EXPORT_SYMBOL_GPL(xenbus_mkdir);
410
411/* Destroy a file or directory (directories must be empty). */
412int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
413{
414 char *path;
415 int ret;
416
417 path = join(dir, node);
418 if (IS_ERR(path))
419 return PTR_ERR(path);
420
421 ret = xs_error(xs_single(t, XS_RM, path, NULL));
422 kfree(path);
423 return ret;
424}
425EXPORT_SYMBOL_GPL(xenbus_rm);
426
427/* Start a transaction: changes by others will not be seen during this
428 * transaction, and changes will not be visible to others until end.
429 */
430int xenbus_transaction_start(struct xenbus_transaction *t)
431{
432 char *id_str;
433
434 down_read(&xs_state.transaction_mutex);
435
436 id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
437 if (IS_ERR(id_str)) {
438 up_read(&xs_state.transaction_mutex);
439 return PTR_ERR(id_str);
440 }
441
442 t->id = simple_strtoul(id_str, NULL, 0);
443 kfree(id_str);
444 return 0;
445}
446EXPORT_SYMBOL_GPL(xenbus_transaction_start);
447
448/* End a transaction.
449 * If abandon is true, transaction is discarded instead of committed.
450 */
451int xenbus_transaction_end(struct xenbus_transaction t, int abort)
452{
453 char abortstr[2];
454 int err;
455
456 if (abort)
457 strcpy(abortstr, "F");
458 else
459 strcpy(abortstr, "T");
460
461 err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
462
463 up_read(&xs_state.transaction_mutex);
464
465 return err;
466}
467EXPORT_SYMBOL_GPL(xenbus_transaction_end);
468
469/* Single read and scanf: returns -errno or num scanned. */
470int xenbus_scanf(struct xenbus_transaction t,
471 const char *dir, const char *node, const char *fmt, ...)
472{
473 va_list ap;
474 int ret;
475 char *val;
476
477 val = xenbus_read(t, dir, node, NULL);
478 if (IS_ERR(val))
479 return PTR_ERR(val);
480
481 va_start(ap, fmt);
482 ret = vsscanf(val, fmt, ap);
483 va_end(ap);
484 kfree(val);
485 /* Distinctive errno. */
486 if (ret == 0)
487 return -ERANGE;
488 return ret;
489}
490EXPORT_SYMBOL_GPL(xenbus_scanf);
491
492/* Single printf and write: returns -errno or 0. */
493int xenbus_printf(struct xenbus_transaction t,
494 const char *dir, const char *node, const char *fmt, ...)
495{
496 va_list ap;
497 int ret;
498#define PRINTF_BUFFER_SIZE 4096
499 char *printf_buffer;
500
501 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
502 if (printf_buffer == NULL)
503 return -ENOMEM;
504
505 va_start(ap, fmt);
506 ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
507 va_end(ap);
508
509 BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
510 ret = xenbus_write(t, dir, node, printf_buffer);
511
512 kfree(printf_buffer);
513
514 return ret;
515}
516EXPORT_SYMBOL_GPL(xenbus_printf);
517
518/* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
519int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
520{
521 va_list ap;
522 const char *name;
523 int ret = 0;
524
525 va_start(ap, dir);
526 while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
527 const char *fmt = va_arg(ap, char *);
528 void *result = va_arg(ap, void *);
529 char *p;
530
531 p = xenbus_read(t, dir, name, NULL);
532 if (IS_ERR(p)) {
533 ret = PTR_ERR(p);
534 break;
535 }
536 if (fmt) {
537 if (sscanf(p, fmt, result) == 0)
538 ret = -EINVAL;
539 kfree(p);
540 } else
541 *(char **)result = p;
542 }
543 va_end(ap);
544 return ret;
545}
546EXPORT_SYMBOL_GPL(xenbus_gather);
547
548static int xs_watch(const char *path, const char *token)
549{
550 struct kvec iov[2];
551
552 iov[0].iov_base = (void *)path;
553 iov[0].iov_len = strlen(path) + 1;
554 iov[1].iov_base = (void *)token;
555 iov[1].iov_len = strlen(token) + 1;
556
557 return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
558 ARRAY_SIZE(iov), NULL));
559}
560
561static int xs_unwatch(const char *path, const char *token)
562{
563 struct kvec iov[2];
564
565 iov[0].iov_base = (char *)path;
566 iov[0].iov_len = strlen(path) + 1;
567 iov[1].iov_base = (char *)token;
568 iov[1].iov_len = strlen(token) + 1;
569
570 return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
571 ARRAY_SIZE(iov), NULL));
572}
573
574static struct xenbus_watch *find_watch(const char *token)
575{
576 struct xenbus_watch *i, *cmp;
577
578 cmp = (void *)simple_strtoul(token, NULL, 16);
579
580 list_for_each_entry(i, &watches, list)
581 if (i == cmp)
582 return i;
583
584 return NULL;
585}
586
587/* Register callback to watch this node. */
588int register_xenbus_watch(struct xenbus_watch *watch)
589{
590 /* Pointer in ascii is the token. */
591 char token[sizeof(watch) * 2 + 1];
592 int err;
593
594 sprintf(token, "%lX", (long)watch);
595
596 down_read(&xs_state.watch_mutex);
597
598 spin_lock(&watches_lock);
599 BUG_ON(find_watch(token));
600 list_add(&watch->list, &watches);
601 spin_unlock(&watches_lock);
602
603 err = xs_watch(watch->node, token);
604
605 /* Ignore errors due to multiple registration. */
606 if ((err != 0) && (err != -EEXIST)) {
607 spin_lock(&watches_lock);
608 list_del(&watch->list);
609 spin_unlock(&watches_lock);
610 }
611
612 up_read(&xs_state.watch_mutex);
613
614 return err;
615}
616EXPORT_SYMBOL_GPL(register_xenbus_watch);
617
618void unregister_xenbus_watch(struct xenbus_watch *watch)
619{
620 struct xs_stored_msg *msg, *tmp;
621 char token[sizeof(watch) * 2 + 1];
622 int err;
623
624 sprintf(token, "%lX", (long)watch);
625
626 down_read(&xs_state.watch_mutex);
627
628 spin_lock(&watches_lock);
629 BUG_ON(!find_watch(token));
630 list_del(&watch->list);
631 spin_unlock(&watches_lock);
632
633 err = xs_unwatch(watch->node, token);
634 if (err)
635 printk(KERN_WARNING
636 "XENBUS Failed to release watch %s: %i\n",
637 watch->node, err);
638
639 up_read(&xs_state.watch_mutex);
640
641 /* Make sure there are no callbacks running currently (unless
642 its us) */
643 if (current->pid != xenwatch_pid)
644 mutex_lock(&xenwatch_mutex);
645
646 /* Cancel pending watch events. */
647 spin_lock(&watch_events_lock);
648 list_for_each_entry_safe(msg, tmp, &watch_events, list) {
649 if (msg->u.watch.handle != watch)
650 continue;
651 list_del(&msg->list);
652 kfree(msg->u.watch.vec);
653 kfree(msg);
654 }
655 spin_unlock(&watch_events_lock);
656
657 if (current->pid != xenwatch_pid)
658 mutex_unlock(&xenwatch_mutex);
659}
660EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
661
662void xs_suspend(void)
663{
664 down_write(&xs_state.transaction_mutex);
665 down_write(&xs_state.watch_mutex);
666 mutex_lock(&xs_state.request_mutex);
667 mutex_lock(&xs_state.response_mutex);
668}
669
670void xs_resume(void)
671{
672 struct xenbus_watch *watch;
673 char token[sizeof(watch) * 2 + 1];
674
675 mutex_unlock(&xs_state.response_mutex);
676 mutex_unlock(&xs_state.request_mutex);
677 up_write(&xs_state.transaction_mutex);
678
679 /* No need for watches_lock: the watch_mutex is sufficient. */
680 list_for_each_entry(watch, &watches, list) {
681 sprintf(token, "%lX", (long)watch);
682 xs_watch(watch->node, token);
683 }
684
685 up_write(&xs_state.watch_mutex);
686}
687
688void xs_suspend_cancel(void)
689{
690 mutex_unlock(&xs_state.response_mutex);
691 mutex_unlock(&xs_state.request_mutex);
692 up_write(&xs_state.watch_mutex);
693 up_write(&xs_state.transaction_mutex);
694}
695
696static int xenwatch_thread(void *unused)
697{
698 struct list_head *ent;
699 struct xs_stored_msg *msg;
700
701 for (;;) {
702 wait_event_interruptible(watch_events_waitq,
703 !list_empty(&watch_events));
704
705 if (kthread_should_stop())
706 break;
707
708 mutex_lock(&xenwatch_mutex);
709
710 spin_lock(&watch_events_lock);
711 ent = watch_events.next;
712 if (ent != &watch_events)
713 list_del(ent);
714 spin_unlock(&watch_events_lock);
715
716 if (ent != &watch_events) {
717 msg = list_entry(ent, struct xs_stored_msg, list);
718 msg->u.watch.handle->callback(
719 msg->u.watch.handle,
720 (const char **)msg->u.watch.vec,
721 msg->u.watch.vec_size);
722 kfree(msg->u.watch.vec);
723 kfree(msg);
724 }
725
726 mutex_unlock(&xenwatch_mutex);
727 }
728
729 return 0;
730}
731
732static int process_msg(void)
733{
734 struct xs_stored_msg *msg;
735 char *body;
736 int err;
737
738 /*
739 * We must disallow save/restore while reading a xenstore message.
740 * A partial read across s/r leaves us out of sync with xenstored.
741 */
742 for (;;) {
743 err = xb_wait_for_data_to_read();
744 if (err)
745 return err;
746 mutex_lock(&xs_state.response_mutex);
747 if (xb_data_to_read())
748 break;
749 /* We raced with save/restore: pending data 'disappeared'. */
750 mutex_unlock(&xs_state.response_mutex);
751 }
752
753
754 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
755 if (msg == NULL) {
756 err = -ENOMEM;
757 goto out;
758 }
759
760 err = xb_read(&msg->hdr, sizeof(msg->hdr));
761 if (err) {
762 kfree(msg);
763 goto out;
764 }
765
766 body = kmalloc(msg->hdr.len + 1, GFP_KERNEL);
767 if (body == NULL) {
768 kfree(msg);
769 err = -ENOMEM;
770 goto out;
771 }
772
773 err = xb_read(body, msg->hdr.len);
774 if (err) {
775 kfree(body);
776 kfree(msg);
777 goto out;
778 }
779 body[msg->hdr.len] = '\0';
780
781 if (msg->hdr.type == XS_WATCH_EVENT) {
782 msg->u.watch.vec = split(body, msg->hdr.len,
783 &msg->u.watch.vec_size);
784 if (IS_ERR(msg->u.watch.vec)) {
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700785 err = PTR_ERR(msg->u.watch.vec);
Adrian Bunk98ac0e52007-07-26 10:41:10 -0700786 kfree(msg);
Jeremy Fitzhardinge4bac07c2007-07-17 18:37:06 -0700787 goto out;
788 }
789
790 spin_lock(&watches_lock);
791 msg->u.watch.handle = find_watch(
792 msg->u.watch.vec[XS_WATCH_TOKEN]);
793 if (msg->u.watch.handle != NULL) {
794 spin_lock(&watch_events_lock);
795 list_add_tail(&msg->list, &watch_events);
796 wake_up(&watch_events_waitq);
797 spin_unlock(&watch_events_lock);
798 } else {
799 kfree(msg->u.watch.vec);
800 kfree(msg);
801 }
802 spin_unlock(&watches_lock);
803 } else {
804 msg->u.reply.body = body;
805 spin_lock(&xs_state.reply_lock);
806 list_add_tail(&msg->list, &xs_state.reply_list);
807 spin_unlock(&xs_state.reply_lock);
808 wake_up(&xs_state.reply_waitq);
809 }
810
811 out:
812 mutex_unlock(&xs_state.response_mutex);
813 return err;
814}
815
816static int xenbus_thread(void *unused)
817{
818 int err;
819
820 for (;;) {
821 err = process_msg();
822 if (err)
823 printk(KERN_WARNING "XENBUS error %d while reading "
824 "message\n", err);
825 if (kthread_should_stop())
826 break;
827 }
828
829 return 0;
830}
831
832int xs_init(void)
833{
834 int err;
835 struct task_struct *task;
836
837 INIT_LIST_HEAD(&xs_state.reply_list);
838 spin_lock_init(&xs_state.reply_lock);
839 init_waitqueue_head(&xs_state.reply_waitq);
840
841 mutex_init(&xs_state.request_mutex);
842 mutex_init(&xs_state.response_mutex);
843 init_rwsem(&xs_state.transaction_mutex);
844 init_rwsem(&xs_state.watch_mutex);
845
846 /* Initialize the shared memory rings to talk to xenstored */
847 err = xb_init_comms();
848 if (err)
849 return err;
850
851 task = kthread_run(xenwatch_thread, NULL, "xenwatch");
852 if (IS_ERR(task))
853 return PTR_ERR(task);
854 xenwatch_pid = task->pid;
855
856 task = kthread_run(xenbus_thread, NULL, "xenbus");
857 if (IS_ERR(task))
858 return PTR_ERR(task);
859
860 return 0;
861}