blob: 76ca83c79123e3888a8314a398186f17e17266db [file] [log] [blame]
Carl van Schaik6d7b2ff2018-07-06 22:00:55 +10001/*
2 * drivers/vservices/core_server.c
3 *
4 * Copyright (c) 2012-2018 General Dynamics
5 * Copyright (c) 2014 Open Kernel Labs, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Server side core service application driver
12 */
13
14#include <linux/kernel.h>
15#include <linux/device.h>
16#include <linux/mutex.h>
17#include <linux/slab.h>
18#include <linux/list.h>
19#include <linux/err.h>
20#include <linux/module.h>
21#include <linux/ctype.h>
22
23#include <vservices/types.h>
24#include <vservices/transport.h>
25#include <vservices/session.h>
26#include <vservices/buffer.h>
27#include <vservices/service.h>
28
29#include <vservices/protocol/core/types.h>
30#include <vservices/protocol/core/common.h>
31#include <vservices/protocol/core/server.h>
32
33#include "transport.h"
34#include "session.h"
35#include "compat.h"
36
37#define VSERVICE_CORE_SERVICE_NAME "core"
38
39struct core_server {
40 struct vs_server_core_state state;
41 struct vs_service_device *service;
42
43 /*
44 * A list of messages to send, a mutex protecting it, and a
45 * work item to process the list.
46 */
47 struct list_head message_queue;
48 struct mutex message_queue_lock;
49 struct work_struct message_queue_work;
50
51 struct mutex alloc_lock;
52
53 /* The following are all protected by alloc_lock. */
54 unsigned long *in_notify_map;
55 int in_notify_map_bits;
56
57 unsigned long *out_notify_map;
58 int out_notify_map_bits;
59
60 unsigned in_quota_remaining;
61 unsigned out_quota_remaining;
62};
63
64/*
65 * Used for message deferral when the core service is over quota.
66 */
67struct pending_message {
68 vservice_core_message_id_t type;
69 struct vs_service_device *service;
70 struct list_head list;
71};
72
73#define to_core_server(x) container_of(x, struct core_server, state)
74#define dev_to_core_server(x) to_core_server(dev_get_drvdata(x))
75
76static struct vs_session_device *
77vs_core_server_session(struct core_server *server)
78{
79 return vs_service_get_session(server->service);
80}
81
82static struct core_server *
83vs_server_session_core_server(struct vs_session_device *session)
84{
85 struct vs_service_device *core_service = session->core_service;
86
87 if (!core_service)
88 return NULL;
89
90 return dev_to_core_server(&core_service->dev);
91}
92
93static int vs_server_core_send_service_removed(struct core_server *server,
94 struct vs_service_device *service)
95{
96 return vs_server_core_core_send_service_removed(&server->state,
97 service->id, GFP_KERNEL);
98}
99
100static bool
101cancel_pending_created(struct core_server *server,
102 struct vs_service_device *service)
103{
104 struct pending_message *msg;
105
106 list_for_each_entry(msg, &server->message_queue, list) {
107 if (msg->type == VSERVICE_CORE_CORE_MSG_SERVICE_CREATED &&
108 msg->service == service) {
109 vs_put_service(msg->service);
110 list_del(&msg->list);
111 kfree(msg);
112
113 /* there can only be one */
114 return true;
115 }
116 }
117
118 return false;
119}
120
121static int vs_server_core_queue_service_removed(struct core_server *server,
122 struct vs_service_device *service)
123{
124 struct pending_message *msg;
125
126 lockdep_assert_held(&service->ready_lock);
127
128 mutex_lock(&server->message_queue_lock);
129
130 /*
131 * If we haven't sent the notification that the service was created,
132 * nuke it and do nothing else.
133 *
134 * This is not just an optimisation; see below.
135 */
136 if (cancel_pending_created(server, service)) {
137 mutex_unlock(&server->message_queue_lock);
138 return 0;
139 }
140
141 /*
142 * Do nothing if the core state is not connected. We must avoid
143 * queueing service_removed messages on a reset service.
144 *
145 * Note that we cannot take the core server state lock here, because
146 * we may (or may not) have been called from a core service message
147 * handler. Thus, we must beware of races with changes to this
148 * condition:
149 *
150 * - It becomes true when the req_connect handler sends an
151 * ack_connect, *after* it queues service_created for each existing
152 * service (while holding the service ready lock). The handler sends
153 * ack_connect with the message queue lock held.
154 *
155 * - If we see the service as connected, then the req_connect
156 * handler has already queued and sent a service_created for this
157 * service, so it's ok for us to send a service_removed.
158 *
159 * - If we see it as disconnected, the req_connect handler hasn't
160 * taken the message queue lock to send ack_connect yet, and thus
161 * has not released the service state lock; so if it queued a
162 * service_created we caught it in the flush above before it was
163 * sent.
164 *
165 * - It becomes false before the reset / disconnect handlers are
166 * called and those will both flush the message queue afterwards.
167 *
168 * - If we see the service as connected, then the reset / disconnect
169 * handler is going to flush the message.
170 *
171 * - If we see it disconnected, the state change has occurred and
172 * implicitly had the same effect as this message, so doing
173 * nothing is correct.
174 *
175 * Note that ordering in all of the above cases is guaranteed by the
176 * message queue lock.
177 */
178 if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
179 mutex_unlock(&server->message_queue_lock);
180 return 0;
181 }
182
183 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
184 if (!msg) {
185 mutex_unlock(&server->message_queue_lock);
186 return -ENOMEM;
187 }
188
189 msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
190 /* put by message_queue_work */
191 msg->service = vs_get_service(service);
192
193 list_add_tail(&msg->list, &server->message_queue);
194
195 mutex_unlock(&server->message_queue_lock);
196 queue_work(server->service->work_queue, &server->message_queue_work);
197
198 return 0;
199}
200
201static int vs_server_core_send_service_created(struct core_server *server,
202 struct vs_service_device *service)
203{
204 struct vs_session_device *session =
205 vs_service_get_session(server->service);
206
207 struct vs_mbuf *mbuf;
208 struct vs_string service_name, protocol_name;
209 size_t service_name_len, protocol_name_len;
210
211 int err;
212
213 mbuf = vs_server_core_core_alloc_service_created(&server->state,
214 &service_name, &protocol_name, GFP_KERNEL);
215
216 if (IS_ERR(mbuf))
217 return PTR_ERR(mbuf);
218
219 vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
220 "Sending service created message for %d (%s:%s)\n",
221 service->id, service->name, service->protocol);
222
223 service_name_len = strlen(service->name);
224 protocol_name_len = strlen(service->protocol);
225
226 if (service_name_len > vs_string_max_size(&service_name) ||
227 protocol_name_len > vs_string_max_size(&protocol_name)) {
228 dev_err(&session->dev,
229 "Invalid name/protocol for service %d (%s:%s)\n",
230 service->id, service->name,
231 service->protocol);
232 err = -EINVAL;
233 goto fail;
234 }
235
236 vs_string_copyin(&service_name, service->name);
237 vs_string_copyin(&protocol_name, service->protocol);
238
239 err = vs_server_core_core_send_service_created(&server->state,
240 service->id, service_name, protocol_name, mbuf);
241 if (err) {
242 dev_err(&session->dev,
243 "Fatal error sending service creation message for %d (%s:%s): %d\n",
244 service->id, service->name,
245 service->protocol, err);
246 goto fail;
247 }
248
249 return 0;
250
251fail:
252 vs_server_core_core_free_service_created(&server->state,
253 &service_name, &protocol_name, mbuf);
254
255 return err;
256}
257
258static int vs_server_core_queue_service_created(struct core_server *server,
259 struct vs_service_device *service)
260{
261 struct pending_message *msg;
262
263 lockdep_assert_held(&service->ready_lock);
264 lockdep_assert_held(&server->service->state_mutex);
265
266 mutex_lock(&server->message_queue_lock);
267
268 /* Do nothing if the core state is disconnected. */
269 if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
270 mutex_unlock(&server->message_queue_lock);
271 return 0;
272 }
273
274 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
275 if (!msg) {
276 mutex_unlock(&server->message_queue_lock);
277 return -ENOMEM;
278 }
279
280 msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
281 /* put by message_queue_work */
282 msg->service = vs_get_service(service);
283
284 list_add_tail(&msg->list, &server->message_queue);
285
286 mutex_unlock(&server->message_queue_lock);
287 queue_work(server->service->work_queue, &server->message_queue_work);
288
289 return 0;
290}
291
292static struct vs_service_device *
293__vs_server_core_register_service(struct vs_session_device *session,
294 vs_service_id_t service_id, struct vs_service_device *owner,
295 const char *name, const char *protocol, const void *plat_data)
296{
297 if (!session->is_server)
298 return ERR_PTR(-ENODEV);
299
300 if (!name || strnlen(name, VSERVICE_CORE_SERVICE_NAME_SIZE + 1) >
301 VSERVICE_CORE_SERVICE_NAME_SIZE || name[0] == '\n')
302 return ERR_PTR(-EINVAL);
303
304 /* The server core must only be registered as service_id zero */
305 if (service_id == 0 && (owner != NULL ||
306 strcmp(name, VSERVICE_CORE_SERVICE_NAME) != 0 ||
307 strcmp(protocol, VSERVICE_CORE_PROTOCOL_NAME) != 0))
308 return ERR_PTR(-EINVAL);
309
310 return vs_service_register(session, owner, service_id, protocol, name,
311 plat_data);
312}
313
314static struct vs_service_device *
315vs_server_core_create_service(struct core_server *server,
316 struct vs_session_device *session,
317 struct vs_service_device *owner, vs_service_id_t service_id,
318 const char *name, const char *protocol, const void *plat_data)
319{
320 struct vs_service_device *service;
321
322 service = __vs_server_core_register_service(session, service_id,
323 owner, name, protocol, plat_data);
324 if (IS_ERR(service))
325 return service;
326
327 if (protocol) {
328 vs_service_state_lock(server->service);
329 vs_service_start(service);
330 if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
331 vs_service_enable(service);
332 vs_service_state_unlock(server->service);
333 }
334
335 return service;
336}
337
338static int
339vs_server_core_send_service_reset_ready(struct core_server *server,
340 vservice_core_message_id_t type,
341 struct vs_service_device *service)
342{
343 bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
344 struct vs_session_device *session __maybe_unused =
345 vs_service_get_session(server->service);
346 int err;
347
348 vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
349 "Sending %s for service %d\n",
350 is_reset ? "reset" : "ready", service->id);
351
352 if (is_reset)
353 err = vs_server_core_core_send_service_reset(&server->state,
354 service->id, GFP_KERNEL);
355 else
356 err = vs_server_core_core_send_server_ready(&server->state,
357 service->id, service->recv_quota,
358 service->send_quota,
359 service->notify_recv_offset,
360 service->notify_recv_bits,
361 service->notify_send_offset,
362 service->notify_send_bits,
363 GFP_KERNEL);
364
365 return err;
366}
367
368static bool
369cancel_pending_ready(struct core_server *server,
370 struct vs_service_device *service)
371{
372 struct pending_message *msg;
373
374 list_for_each_entry(msg, &server->message_queue, list) {
375 if (msg->type == VSERVICE_CORE_CORE_MSG_SERVER_READY &&
376 msg->service == service) {
377 vs_put_service(msg->service);
378 list_del(&msg->list);
379 kfree(msg);
380
381 /* there can only be one */
382 return true;
383 }
384 }
385
386 return false;
387}
388
389static int
390vs_server_core_queue_service_reset_ready(struct core_server *server,
391 vservice_core_message_id_t type,
392 struct vs_service_device *service)
393{
394 bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
395 struct pending_message *msg;
396
397 mutex_lock(&server->message_queue_lock);
398
399 /*
400 * If this is a reset, and there is an outgoing ready in the
401 * queue, we must cancel it so it can't be sent with invalid
402 * transport resources, and then return immediately so we
403 * don't send a redundant reset.
404 */
405 if (is_reset && cancel_pending_ready(server, service)) {
406 mutex_unlock(&server->message_queue_lock);
407 return VS_SERVICE_ALREADY_RESET;
408 }
409
410 msg = kzalloc(sizeof(*msg), GFP_KERNEL);
411 if (!msg) {
412 mutex_unlock(&server->message_queue_lock);
413 return -ENOMEM;
414 }
415
416 msg->type = type;
417 /* put by message_queue_work */
418 msg->service = vs_get_service(service);
419 list_add_tail(&msg->list, &server->message_queue);
420
421 mutex_unlock(&server->message_queue_lock);
422 queue_work(server->service->work_queue, &server->message_queue_work);
423
424 return 0;
425}
426
427static int vs_core_server_tx_ready(struct vs_server_core_state *state)
428{
429 struct core_server *server = to_core_server(state);
430 struct vs_session_device *session __maybe_unused =
431 vs_service_get_session(server->service);
432
433 vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev, "tx_ready\n");
434
435 queue_work(server->service->work_queue, &server->message_queue_work);
436
437 return 0;
438}
439
440static void message_queue_work(struct work_struct *work)
441{
442 struct core_server *server = container_of(work, struct core_server,
443 message_queue_work);
444 struct pending_message *msg;
445 int err;
446
447 vs_service_state_lock(server->service);
448
449 if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
450 vs_service_state_unlock(server->service);
451 return;
452 }
453
454 /*
455 * If any pending message fails we exit the loop immediately so that
456 * we preserve the message order.
457 */
458 mutex_lock(&server->message_queue_lock);
459 while (!list_empty(&server->message_queue)) {
460 msg = list_first_entry(&server->message_queue,
461 struct pending_message, list);
462
463 switch (msg->type) {
464 case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
465 err = vs_server_core_send_service_created(server,
466 msg->service);
467 break;
468
469 case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
470 err = vs_server_core_send_service_removed(server,
471 msg->service);
472 break;
473
474 case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
475 case VSERVICE_CORE_CORE_MSG_SERVER_READY:
476 err = vs_server_core_send_service_reset_ready(
477 server, msg->type, msg->service);
478 break;
479
480 default:
481 dev_warn(&server->service->dev,
482 "Don't know how to handle pending message type %d\n",
483 msg->type);
484 err = 0;
485 break;
486 }
487
488 /*
489 * If we're out of quota we exit and wait for tx_ready to
490 * queue us again.
491 */
492 if (err == -ENOBUFS)
493 break;
494
495 /* Any other error is fatal */
496 if (err < 0) {
497 dev_err(&server->service->dev,
498 "Failed to send pending message type %d: %d - resetting session",
499 msg->type, err);
500 vs_service_reset_nosync(server->service);
501 break;
502 }
503
504 /*
505 * The message sent successfully - remove it from the
506 * queue. The corresponding vs_get_service() was done
507 * when the pending message was created.
508 */
509 vs_put_service(msg->service);
510 list_del(&msg->list);
511 kfree(msg);
512 }
513 mutex_unlock(&server->message_queue_lock);
514
515 vs_service_state_unlock(server->service);
516
517 return;
518}
519
520/*
521 * Core server sysfs interface
522 */
523static ssize_t server_core_create_service_store(struct device *dev,
524 struct device_attribute *attr, const char *buf, size_t count)
525{
526 struct vs_service_device *service = to_vs_service_device(dev);
527 struct vs_session_device *session = to_vs_session_device(dev->parent);
528 struct core_server *server = dev_to_core_server(&service->dev);
529 struct vs_service_device *new_service;
530 char *p;
531 ssize_t ret = count;
532
533 /* FIXME - Buffer sizes are not defined in generated headers */
534 /* discard leading whitespace */
535 while (count && isspace(*buf)) {
536 buf++;
537 count--;
538 }
539 if (!count) {
540 dev_info(dev, "empty service name");
541 return -EINVAL;
542 }
543 /* discard trailing whitespace */
544 while (count && isspace(buf[count - 1]))
545 count--;
546
547 if (count > VSERVICE_CORE_SERVICE_NAME_SIZE) {
548 dev_info(dev, "service name too long (max %d)\n", VSERVICE_CORE_SERVICE_NAME_SIZE);
549 return -EINVAL;
550 }
551
552 p = kstrndup(buf, count, GFP_KERNEL);
553
554 /*
555 * Writing a service name to this file creates a new service. The
556 * service is created without a protocol. It will appear in sysfs
557 * but will not be bound to a driver until a valid protocol name
558 * has been written to the created devices protocol sysfs attribute.
559 */
560 new_service = vs_server_core_create_service(server, session, service,
561 VS_SERVICE_AUTO_ALLOCATE_ID, p, NULL, NULL);
562 if (IS_ERR(new_service))
563 ret = PTR_ERR(new_service);
564
565 kfree(p);
566
567 return ret;
568}
569
570static ssize_t server_core_reset_service_store(struct device *dev,
571 struct device_attribute *attr, const char *buf, size_t count)
572{
573 struct vs_service_device *core_service = to_vs_service_device(dev);
574 struct vs_session_device *session =
575 vs_service_get_session(core_service);
576 struct vs_service_device *target;
577 vs_service_id_t service_id;
578 unsigned long val;
579 int err;
580
581 /*
582 * Writing a valid service_id to this file does a reset of that service
583 */
584 err = kstrtoul(buf, 0, &val);
585 if (err)
586 return err;
587
588 service_id = val;
589 target = vs_session_get_service(session, service_id);
590 if (!target)
591 return -EINVAL;
592
593 err = vs_service_reset(target, core_service);
594
595 vs_put_service(target);
596 return err < 0 ? err : count;
597}
598
599static ssize_t server_core_remove_service_store(struct device *dev,
600 struct device_attribute *attr, const char *buf, size_t count)
601{
602 struct vs_service_device *service = to_vs_service_device(dev);
603 struct vs_session_device *session = vs_service_get_session(service);
604 struct vs_service_device *target;
605 vs_service_id_t service_id;
606 unsigned long val;
607 int err;
608
609 err = kstrtoul(buf, 0, &val);
610 if (err)
611 return err;
612
613 service_id = val;
614 if (service_id == 0) {
615 /*
616 * We don't allow removing the core service this way. The
617 * core service will be removed when the session is removed.
618 */
619 return -EINVAL;
620 }
621
622 target = vs_session_get_service(session, service_id);
623 if (!target)
624 return -EINVAL;
625
626 err = vs_service_delete(target, service);
627
628 vs_put_service(target);
629 return err < 0 ? err : count;
630}
631
632static DEVICE_ATTR(create_service, S_IWUSR,
633 NULL, server_core_create_service_store);
634static DEVICE_ATTR(reset_service, S_IWUSR,
635 NULL, server_core_reset_service_store);
636static DEVICE_ATTR(remove_service, S_IWUSR,
637 NULL, server_core_remove_service_store);
638
639static struct attribute *server_core_dev_attrs[] = {
640 &dev_attr_create_service.attr,
641 &dev_attr_reset_service.attr,
642 &dev_attr_remove_service.attr,
643 NULL,
644};
645
646static const struct attribute_group server_core_attr_group = {
647 .attrs = server_core_dev_attrs,
648};
649
650static int init_transport_resource_allocation(struct core_server *server)
651{
652 struct vs_session_device *session = vs_core_server_session(server);
653 struct vs_transport *transport = session->transport;
654 size_t size;
655 int err;
656
657 mutex_init(&server->alloc_lock);
658 mutex_lock(&server->alloc_lock);
659
660 transport->vt->get_quota_limits(transport, &server->out_quota_remaining,
661 &server->in_quota_remaining);
662
663 transport->vt->get_notify_bits(transport, &server->out_notify_map_bits,
664 &server->in_notify_map_bits);
665
666 size = BITS_TO_LONGS(server->in_notify_map_bits) *
667 sizeof(unsigned long);
668 server->in_notify_map = kzalloc(size, GFP_KERNEL);
669 if (server->in_notify_map_bits && !server->in_notify_map) {
670 err = -ENOMEM;
671 goto fail;
672 }
673
674 size = BITS_TO_LONGS(server->out_notify_map_bits) *
675 sizeof(unsigned long);
676 server->out_notify_map = kzalloc(size, GFP_KERNEL);
677 if (server->out_notify_map_bits && !server->out_notify_map) {
678 err = -ENOMEM;
679 goto fail_free_in_bits;
680 }
681
682 mutex_unlock(&server->alloc_lock);
683
684 return 0;
685
686fail_free_in_bits:
687 kfree(server->in_notify_map);
688fail:
689 mutex_unlock(&server->alloc_lock);
690 return err;
691}
692
693static int alloc_quota(unsigned minimum, unsigned best, unsigned set,
694 unsigned *remaining)
695{
696 unsigned quota;
697
698 if (set) {
699 quota = set;
700
701 if (quota > *remaining)
702 return -ENOSPC;
703 } else if (best) {
704 quota = min(best, *remaining);
705 } else {
706 quota = minimum;
707 }
708
709 if (quota < minimum)
710 return -ENOSPC;
711
712 *remaining -= quota;
713
714 return min_t(unsigned, quota, INT_MAX);
715}
716
717static int alloc_notify_bits(unsigned notify_count, unsigned long *map,
718 unsigned nr_bits)
719{
720 unsigned offset;
721
722 if (notify_count) {
723 offset = bitmap_find_next_zero_area(map, nr_bits, 0,
724 notify_count, 0);
725
726 if (offset >= nr_bits || offset > (unsigned)INT_MAX)
727 return -ENOSPC;
728
729 bitmap_set(map, offset, notify_count);
730 } else {
731 offset = 0;
732 }
733
734 return offset;
735}
736
737/*
738 * alloc_transport_resources - Allocates the quotas and notification bits for
739 * a service.
740 * @server: the core service state.
741 * @service: the service device to allocate resources for.
742 *
743 * This function allocates message quotas and notification bits. It is called
744 * for the core service in alloc(), and for every other service by the server
745 * bus probe() function.
746 */
747static int alloc_transport_resources(struct core_server *server,
748 struct vs_service_device *service)
749{
750 struct vs_session_device *session __maybe_unused =
751 vs_service_get_session(service);
752 unsigned in_bit_offset, out_bit_offset;
753 unsigned in_quota, out_quota;
754 int ret;
755 struct vs_service_driver *driver;
756
757 if (WARN_ON(!service->dev.driver))
758 return -ENODEV;
759
760 mutex_lock(&server->alloc_lock);
761
762 driver = to_vs_service_driver(service->dev.driver);
763
764 /* Quota allocations */
765 ret = alloc_quota(driver->in_quota_min, driver->in_quota_best,
766 service->in_quota_set, &server->in_quota_remaining);
767 if (ret < 0) {
768 dev_err(&service->dev, "cannot allocate in quota\n");
769 goto fail_in_quota;
770 }
771 in_quota = ret;
772
773 ret = alloc_quota(driver->out_quota_min, driver->out_quota_best,
774 service->out_quota_set, &server->out_quota_remaining);
775 if (ret < 0) {
776 dev_err(&service->dev, "cannot allocate out quota\n");
777 goto fail_out_quota;
778 }
779 out_quota = ret;
780
781 vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
782 "%d: quota in: %u out: %u; remaining in: %u out: %u\n",
783 service->id, in_quota, out_quota,
784 server->in_quota_remaining,
785 server->out_quota_remaining);
786
787 /* Notification bit allocations */
788 ret = alloc_notify_bits(service->notify_recv_bits,
789 server->in_notify_map, server->in_notify_map_bits);
790 if (ret < 0) {
791 dev_err(&service->dev, "cannot allocate in notify bits\n");
792 goto fail_in_notify;
793 }
794 in_bit_offset = ret;
795
796 ret = alloc_notify_bits(service->notify_send_bits,
797 server->out_notify_map, server->out_notify_map_bits);
798 if (ret < 0) {
799 dev_err(&service->dev, "cannot allocate out notify bits\n");
800 goto fail_out_notify;
801 }
802 out_bit_offset = ret;
803
804 vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
805 "notify bits in: %u/%u out: %u/%u\n",
806 in_bit_offset, service->notify_recv_bits,
807 out_bit_offset, service->notify_send_bits);
808
809 /* Fill in the device's allocations */
810 service->recv_quota = in_quota;
811 service->send_quota = out_quota;
812 service->notify_recv_offset = in_bit_offset;
813 service->notify_send_offset = out_bit_offset;
814
815 mutex_unlock(&server->alloc_lock);
816
817 return 0;
818
819fail_out_notify:
820 if (service->notify_recv_bits)
821 bitmap_clear(server->in_notify_map,
822 in_bit_offset, service->notify_recv_bits);
823fail_in_notify:
824 server->out_quota_remaining += out_quota;
825fail_out_quota:
826 server->in_quota_remaining += in_quota;
827fail_in_quota:
828
829 mutex_unlock(&server->alloc_lock);
830
831 service->recv_quota = 0;
832 service->send_quota = 0;
833 service->notify_recv_bits = 0;
834 service->notify_recv_offset = 0;
835 service->notify_send_bits = 0;
836 service->notify_send_offset = 0;
837
838 return ret;
839}
840
841/*
842 * free_transport_resources - Frees the quotas and notification bits for
843 * a non-core service.
844 * @server: the core service state.
845 * @service: the service device to free resources for.
846 *
847 * This function is called by the server to free message quotas and
848 * notification bits that were allocated by alloc_transport_resources. It must
849 * only be called when the target service is in reset, and must be called with
850 * the core service's state lock held.
851 */
852static int free_transport_resources(struct core_server *server,
853 struct vs_service_device *service)
854{
855 mutex_lock(&server->alloc_lock);
856
857 if (service->notify_recv_bits)
858 bitmap_clear(server->in_notify_map,
859 service->notify_recv_offset,
860 service->notify_recv_bits);
861
862 if (service->notify_send_bits)
863 bitmap_clear(server->out_notify_map,
864 service->notify_send_offset,
865 service->notify_send_bits);
866
867 server->in_quota_remaining += service->recv_quota;
868 server->out_quota_remaining += service->send_quota;
869
870 mutex_unlock(&server->alloc_lock);
871
872 service->recv_quota = 0;
873 service->send_quota = 0;
874 service->notify_recv_bits = 0;
875 service->notify_recv_offset = 0;
876 service->notify_send_bits = 0;
877 service->notify_send_offset = 0;
878
879 return 0;
880}
881
882static struct vs_server_core_state *
883vs_core_server_alloc(struct vs_service_device *service)
884{
885 struct core_server *server;
886 int err;
887
888 if (WARN_ON(service->id != 0))
889 goto fail;
890
891 server = kzalloc(sizeof(*server), GFP_KERNEL);
892 if (!server)
893 goto fail;
894
895 server->service = service;
896 INIT_LIST_HEAD(&server->message_queue);
897 INIT_WORK(&server->message_queue_work, message_queue_work);
898 mutex_init(&server->message_queue_lock);
899
900 err = init_transport_resource_allocation(server);
901 if (err)
902 goto fail_init_alloc;
903
904 err = alloc_transport_resources(server, service);
905 if (err)
906 goto fail_alloc_transport;
907
908 err = sysfs_create_group(&service->dev.kobj, &server_core_attr_group);
909 if (err)
910 goto fail_sysfs;
911
912 return &server->state;
913
914fail_sysfs:
915 free_transport_resources(server, service);
916fail_alloc_transport:
917 kfree(server->out_notify_map);
918 kfree(server->in_notify_map);
919fail_init_alloc:
920 kfree(server);
921fail:
922 return NULL;
923}
924
925static void vs_core_server_release(struct vs_server_core_state *state)
926{
927 struct core_server *server = to_core_server(state);
928 struct vs_session_device *session = vs_core_server_session(server);
929
930 /* Delete all the other services */
931 vs_session_delete_noncore(session);
932
933 sysfs_remove_group(&server->service->dev.kobj, &server_core_attr_group);
934 kfree(server->out_notify_map);
935 kfree(server->in_notify_map);
936 kfree(server);
937}
938
939/**
940 * vs_server_create_service - create and register a new vService server
941 * @session: the session to create the vService server on
942 * @parent: an existing server that is managing the new server
943 * @name: the name of the new service
944 * @protocol: the protocol for the new service
945 * @plat_data: value to be assigned to (struct device *)->platform_data
946 */
947struct vs_service_device *
948vs_server_create_service(struct vs_session_device *session,
949 struct vs_service_device *parent, const char *name,
950 const char *protocol, const void *plat_data)
951{
952 struct vs_service_device *core_service, *new_service;
953 struct core_server *server;
954
955 if (!session->is_server || !name || !protocol)
956 return NULL;
957
958 core_service = session->core_service;
959 if (!core_service)
960 return NULL;
961
962 device_lock(&core_service->dev);
963 if (!core_service->dev.driver) {
964 device_unlock(&core_service->dev);
965 return NULL;
966 }
967
968 server = dev_to_core_server(&core_service->dev);
969
970 if (!parent)
971 parent = core_service;
972
973 new_service = vs_server_core_create_service(server, session, parent,
974 VS_SERVICE_AUTO_ALLOCATE_ID, name, protocol, plat_data);
975
976 device_unlock(&core_service->dev);
977
978 if (IS_ERR(new_service))
979 return NULL;
980
981 return new_service;
982}
983EXPORT_SYMBOL(vs_server_create_service);
984
985/**
986 * vs_server_destroy_service - destroy and unregister a vService server. This
987 * function must _not_ be used from the target service's own workqueue.
988 * @service: The service to destroy
989 */
990int vs_server_destroy_service(struct vs_service_device *service,
991 struct vs_service_device *parent)
992{
993 struct vs_session_device *session = vs_service_get_session(service);
994
995 if (!session->is_server || service->id == 0)
996 return -EINVAL;
997
998 if (!parent)
999 parent = session->core_service;
1000
1001 return vs_service_delete(service, parent);
1002}
1003EXPORT_SYMBOL(vs_server_destroy_service);
1004
1005static void __queue_service_created(struct vs_service_device *service,
1006 void *data)
1007{
1008 struct core_server *server = (struct core_server *)data;
1009
1010 vs_server_core_queue_service_created(server, service);
1011}
1012
1013static int vs_server_core_handle_connect(struct vs_server_core_state *state)
1014{
1015 struct core_server *server = to_core_server(state);
1016 struct vs_session_device *session = vs_core_server_session(server);
1017 int err;
1018
1019 /* Tell the other end that we've finished connecting. */
1020 err = vs_server_core_core_send_ack_connect(state, GFP_KERNEL);
1021 if (err)
1022 return err;
1023
1024 /* Queue a service-created message for each existing service. */
1025 vs_session_for_each_service(session, __queue_service_created, server);
1026
1027 /* Re-enable all the services. */
1028 vs_session_enable_noncore(session);
1029
1030 return 0;
1031}
1032
1033static void vs_core_server_disable_services(struct core_server *server)
1034{
1035 struct vs_session_device *session = vs_core_server_session(server);
1036 struct pending_message *msg;
1037
1038 /* Disable all the other services */
1039 vs_session_disable_noncore(session);
1040
1041 /* Flush all the pending service-readiness messages */
1042 mutex_lock(&server->message_queue_lock);
1043 while (!list_empty(&server->message_queue)) {
1044 msg = list_first_entry(&server->message_queue,
1045 struct pending_message, list);
1046 vs_put_service(msg->service);
1047 list_del(&msg->list);
1048 kfree(msg);
1049 }
1050 mutex_unlock(&server->message_queue_lock);
1051}
1052
1053static int vs_server_core_handle_disconnect(struct vs_server_core_state *state)
1054{
1055 struct core_server *server = to_core_server(state);
1056
1057 vs_core_server_disable_services(server);
1058
1059 return vs_server_core_core_send_ack_disconnect(state, GFP_KERNEL);
1060}
1061
1062static int
1063vs_server_core_handle_service_reset(struct vs_server_core_state *state,
1064 unsigned service_id)
1065{
1066 struct core_server *server = to_core_server(state);
1067 struct vs_session_device *session = vs_core_server_session(server);
1068
1069 if (service_id == 0)
1070 return -EPROTO;
1071
1072 return vs_service_handle_reset(session, service_id, false);
1073}
1074
1075static void vs_core_server_start(struct vs_server_core_state *state)
1076{
1077 struct core_server *server = to_core_server(state);
1078 struct vs_session_device *session = vs_core_server_session(server);
1079 int err;
1080
1081 vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
1082 "Core server start\n");
1083
1084 err = vs_server_core_core_send_startup(&server->state,
1085 server->service->recv_quota,
1086 server->service->send_quota, GFP_KERNEL);
1087
1088 if (err)
1089 dev_err(&session->dev, "Failed to start core protocol: %d\n",
1090 err);
1091}
1092
1093static void vs_core_server_reset(struct vs_server_core_state *state)
1094{
1095 struct core_server *server = to_core_server(state);
1096 struct vs_session_device *session = vs_core_server_session(server);
1097
1098 vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
1099 "Core server reset\n");
1100
1101 vs_core_server_disable_services(server);
1102}
1103
1104static struct vs_server_core vs_core_server_driver = {
1105 .alloc = vs_core_server_alloc,
1106 .release = vs_core_server_release,
1107 .start = vs_core_server_start,
1108 .reset = vs_core_server_reset,
1109 .tx_ready = vs_core_server_tx_ready,
1110 .core = {
1111 .req_connect = vs_server_core_handle_connect,
1112 .req_disconnect = vs_server_core_handle_disconnect,
1113 .msg_service_reset = vs_server_core_handle_service_reset,
1114 },
1115};
1116
1117/*
1118 * Server bus driver
1119 */
1120static int vs_server_bus_match(struct device *dev, struct device_driver *driver)
1121{
1122 struct vs_service_device *service = to_vs_service_device(dev);
1123 struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
1124
1125 /* Don't match anything to the devio driver; it's bound manually */
1126 if (!vsdrv->protocol)
1127 return 0;
1128
1129 WARN_ON_ONCE(!service->is_server || !vsdrv->is_server);
1130
1131 /* Don't match anything that doesn't have a protocol set yet */
1132 if (!service->protocol)
1133 return 0;
1134
1135 if (strcmp(service->protocol, vsdrv->protocol) == 0)
1136 return 1;
1137
1138 return 0;
1139}
1140
1141static int vs_server_bus_probe(struct device *dev)
1142{
1143 struct vs_service_device *service = to_vs_service_device(dev);
1144 struct vs_session_device *session = vs_service_get_session(service);
1145 struct core_server *server = vs_server_session_core_server(session);
1146 int ret;
1147
1148 /*
1149 * Set the notify counts for the service, unless the driver is the
1150 * devio driver in which case it has already been done by the devio
1151 * bind ioctl. The devio driver cannot be bound automatically.
1152 */
1153 struct vs_service_driver *driver =
1154 to_vs_service_driver(service->dev.driver);
1155#ifdef CONFIG_VSERVICES_CHAR_DEV
1156 if (driver != &vs_devio_server_driver)
1157#endif
1158 {
1159 service->notify_recv_bits = driver->in_notify_count;
1160 service->notify_send_bits = driver->out_notify_count;
1161 }
1162
1163 /*
1164 * We can't allocate transport resources here for the core service
1165 * because the resource pool doesn't exist yet. It's done in alloc()
1166 * instead (which is called, indirectly, by vs_service_bus_probe()).
1167 */
1168 if (service->id == 0)
1169 return vs_service_bus_probe(dev);
1170
1171 if (!server)
1172 return -ENODEV;
1173 ret = alloc_transport_resources(server, service);
1174 if (ret < 0)
1175 goto fail;
1176
1177 ret = vs_service_bus_probe(dev);
1178 if (ret < 0)
1179 goto fail_free_resources;
1180
1181 return 0;
1182
1183fail_free_resources:
1184 free_transport_resources(server, service);
1185fail:
1186 return ret;
1187}
1188
1189static int vs_server_bus_remove(struct device *dev)
1190{
1191 struct vs_service_device *service = to_vs_service_device(dev);
1192 struct vs_session_device *session = vs_service_get_session(service);
1193 struct core_server *server = vs_server_session_core_server(session);
1194
1195 vs_service_bus_remove(dev);
1196
1197 /*
1198 * We skip free_transport_resources for the core service because the
1199 * resource pool has already been freed at this point. It's also
1200 * possible that the core service has disappeared, in which case
1201 * there's no work to do here.
1202 */
1203 if (server != NULL && service->id != 0)
1204 free_transport_resources(server, service);
1205
1206 return 0;
1207}
1208
1209static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
1210 char *buf)
1211{
1212 struct vs_service_device *service = to_vs_service_device(dev);
1213
1214 return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
1215}
1216
1217static ssize_t id_show(struct device *dev, struct device_attribute *attr,
1218 char *buf)
1219{
1220 struct vs_service_device *service = to_vs_service_device(dev);
1221
1222 return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
1223}
1224
1225static ssize_t dev_protocol_show(struct device *dev,
1226 struct device_attribute *attr, char *buf)
1227{
1228 struct vs_service_device *service = to_vs_service_device(dev);
1229
1230 return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
1231}
1232
1233struct service_enable_work_struct {
1234 struct vs_service_device *service;
1235 struct work_struct work;
1236};
1237
1238static void service_enable_work(struct work_struct *work)
1239{
1240 struct service_enable_work_struct *enable_work = container_of(work,
1241 struct service_enable_work_struct, work);
1242 struct vs_service_device *service = enable_work->service;
1243 struct vs_session_device *session = vs_service_get_session(service);
1244 struct core_server *server = vs_server_session_core_server(session);
1245 bool started;
1246 int ret;
1247
1248 kfree(enable_work);
1249
1250 if (!server)
1251 return;
1252 /* Start and enable the service */
1253 vs_service_state_lock(server->service);
1254 started = vs_service_start(service);
1255 if (!started) {
1256 vs_service_state_unlock(server->service);
1257 vs_put_service(service);
1258 return;
1259 }
1260
1261 if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
1262 vs_service_enable(service);
1263 vs_service_state_unlock(server->service);
1264
1265 /* Tell the bus to search for a driver that supports the protocol */
1266 ret = device_attach(&service->dev);
1267 if (ret == 0)
1268 dev_warn(&service->dev, "No driver found for protocol: %s\n",
1269 service->protocol);
1270 kobject_uevent(&service->dev.kobj, KOBJ_CHANGE);
1271
1272 /* The corresponding vs_get_service was done when the work was queued */
1273 vs_put_service(service);
1274}
1275
1276static ssize_t dev_protocol_store(struct device *dev,
1277 struct device_attribute *attr, const char *buf, size_t count)
1278{
1279 struct vs_service_device *service = to_vs_service_device(dev);
1280 struct service_enable_work_struct *enable_work;
1281
1282 /* The protocol can only be set once */
1283 if (service->protocol)
1284 return -EPERM;
1285
1286 /* Registering additional core servers is not allowed */
1287 if (strcmp(buf, VSERVICE_CORE_PROTOCOL_NAME) == 0)
1288 return -EINVAL;
1289
1290 if (strnlen(buf, VSERVICE_CORE_PROTOCOL_NAME_SIZE) + 1 >
1291 VSERVICE_CORE_PROTOCOL_NAME_SIZE)
1292 return -E2BIG;
1293
1294 enable_work = kmalloc(sizeof(*enable_work), GFP_KERNEL);
1295 if (!enable_work)
1296 return -ENOMEM;
1297
1298 /* Set the protocol and tell the client about it */
1299 service->protocol = kstrdup(buf, GFP_KERNEL);
1300 if (!service->protocol) {
1301 kfree(enable_work);
1302 return -ENOMEM;
1303 }
1304 strim(service->protocol);
1305
1306 /*
1307 * Schedule work to enable the service. We can't do it here because
1308 * we need to take the core service lock, and doing that here makes
1309 * it depend circularly on this sysfs attribute, which can be deleted
1310 * with that lock held.
1311 *
1312 * The corresponding vs_put_service is called in the enable_work
1313 * function.
1314 */
1315 INIT_WORK(&enable_work->work, service_enable_work);
1316 enable_work->service = vs_get_service(service);
1317 schedule_work(&enable_work->work);
1318
1319 return count;
1320}
1321
1322static ssize_t service_name_show(struct device *dev,
1323 struct device_attribute *attr, char *buf)
1324{
1325 struct vs_service_device *service = to_vs_service_device(dev);
1326
1327 return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
1328}
1329
1330static ssize_t quota_in_store(struct device *dev,
1331 struct device_attribute *attr, const char *buf, size_t count)
1332{
1333 struct vs_service_device *service = to_vs_service_device(dev);
1334 struct vs_session_device *session = vs_service_get_session(service);
1335 struct core_server *server = vs_server_session_core_server(session);
1336 int ret;
1337 unsigned long in_quota;
1338
1339 if (!server)
1340 return -ENODEV;
1341 /*
1342 * Don't allow quota to be changed for services that have a driver
1343 * bound. We take the alloc lock here because the device lock is held
1344 * while creating and destroying this sysfs item. This means we can
1345 * race with driver binding, but that doesn't matter: we actually just
1346 * want to know that alloc_transport_resources() hasn't run yet, and
1347 * that takes the alloc lock.
1348 */
1349 mutex_lock(&server->alloc_lock);
1350 if (service->dev.driver) {
1351 ret = -EPERM;
1352 goto out;
1353 }
1354
1355 ret = kstrtoul(buf, 0, &in_quota);
1356 if (ret < 0)
1357 goto out;
1358
1359 service->in_quota_set = in_quota;
1360 ret = count;
1361
1362out:
1363 mutex_unlock(&server->alloc_lock);
1364
1365 return ret;
1366}
1367
1368static ssize_t quota_in_show(struct device *dev,
1369 struct device_attribute *attr, char *buf)
1370{
1371 struct vs_service_device *service = to_vs_service_device(dev);
1372
1373 return scnprintf(buf, PAGE_SIZE, "%u\n", service->recv_quota);
1374}
1375
1376static ssize_t quota_out_store(struct device *dev,
1377 struct device_attribute *attr, const char *buf, size_t count)
1378{
1379 struct vs_service_device *service = to_vs_service_device(dev);
1380 struct vs_session_device *session = vs_service_get_session(service);
1381 struct core_server *server = vs_server_session_core_server(session);
1382 int ret;
1383 unsigned long out_quota;
1384
1385 if (!server)
1386 return -ENODEV;
1387 /* See comment in quota_in_store. */
1388 mutex_lock(&server->alloc_lock);
1389 if (service->dev.driver) {
1390 ret = -EPERM;
1391 goto out;
1392 }
1393
1394 ret = kstrtoul(buf, 0, &out_quota);
1395 if (ret < 0)
1396 goto out;
1397
1398 service->out_quota_set = out_quota;
1399 ret = count;
1400
1401out:
1402 mutex_unlock(&server->alloc_lock);
1403
1404 return ret;
1405}
1406
1407static ssize_t quota_out_show(struct device *dev,
1408 struct device_attribute *attr, char *buf)
1409{
1410 struct vs_service_device *service = to_vs_service_device(dev);
1411
1412 return scnprintf(buf, PAGE_SIZE, "%u\n", service->send_quota);
1413}
1414
1415static struct device_attribute vs_server_dev_attrs[] = {
1416 __ATTR_RO(id),
1417 __ATTR_RO(is_server),
1418 __ATTR(protocol, S_IRUGO | S_IWUSR,
1419 dev_protocol_show, dev_protocol_store),
1420 __ATTR_RO(service_name),
1421 __ATTR(quota_in, S_IRUGO | S_IWUSR,
1422 quota_in_show, quota_in_store),
1423 __ATTR(quota_out, S_IRUGO | S_IWUSR,
1424 quota_out_show, quota_out_store),
1425 __ATTR_NULL
1426};
1427
1428static ssize_t protocol_show(struct device_driver *drv, char *buf)
1429{
1430 struct vs_service_driver *vsdrv = to_vs_service_driver(drv);
1431
1432 return scnprintf(buf, PAGE_SIZE, "%s\n", vsdrv->protocol);
1433}
1434
1435#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
1436static struct driver_attribute vs_server_drv_attrs[] = {
1437 __ATTR_RO(protocol),
1438 __ATTR_NULL
1439};
1440#else
1441static DRIVER_ATTR_RO(protocol);
1442
1443static struct attribute *vs_server_drv_attrs[] = {
1444 &driver_attr_protocol.attr,
1445 NULL,
1446};
1447ATTRIBUTE_GROUPS(vs_server_drv);
1448#endif
1449
1450struct bus_type vs_server_bus_type = {
1451 .name = "vservices-server",
1452 .dev_attrs = vs_server_dev_attrs,
1453#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
1454 .drv_attrs = vs_server_drv_attrs,
1455#else
1456 .drv_groups = vs_server_drv_groups,
1457#endif
1458 .match = vs_server_bus_match,
1459 .probe = vs_server_bus_probe,
1460 .remove = vs_server_bus_remove,
1461 .uevent = vs_service_bus_uevent,
1462};
1463EXPORT_SYMBOL(vs_server_bus_type);
1464
1465/*
1466 * Server session driver
1467 */
1468static int vs_server_session_probe(struct device *dev)
1469{
1470 struct vs_session_device *session = to_vs_session_device(dev);
1471 struct vs_service_device *service;
1472
1473 service = __vs_server_core_register_service(session, 0, NULL,
1474 VSERVICE_CORE_SERVICE_NAME,
1475 VSERVICE_CORE_PROTOCOL_NAME, NULL);
1476 if (IS_ERR(service))
1477 return PTR_ERR(service);
1478
1479 return 0;
1480}
1481
1482static int
1483vs_server_session_service_added(struct vs_session_device *session,
1484 struct vs_service_device *service)
1485{
1486 struct core_server *server = vs_server_session_core_server(session);
1487 int err;
1488
1489 if (WARN_ON(!server || !service->id))
1490 return -EINVAL;
1491
1492 err = vs_server_core_queue_service_created(server, service);
1493
1494 if (err)
1495 vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
1496 "failed to send service_created: %d\n", err);
1497
1498 return err;
1499}
1500
1501static int
1502vs_server_session_service_start(struct vs_session_device *session,
1503 struct vs_service_device *service)
1504{
1505 struct core_server *server = vs_server_session_core_server(session);
1506 int err;
1507
1508 if (WARN_ON(!server || !service->id))
1509 return -EINVAL;
1510
1511 err = vs_server_core_queue_service_reset_ready(server,
1512 VSERVICE_CORE_CORE_MSG_SERVER_READY, service);
1513
1514 if (err)
1515 vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
1516 "failed to send server_ready: %d\n", err);
1517
1518 return err;
1519}
1520
1521static int
1522vs_server_session_service_local_reset(struct vs_session_device *session,
1523 struct vs_service_device *service)
1524{
1525 struct core_server *server = vs_server_session_core_server(session);
1526 int err;
1527
1528 if (WARN_ON(!server || !service->id))
1529 return -EINVAL;
1530
1531 err = vs_server_core_queue_service_reset_ready(server,
1532 VSERVICE_CORE_CORE_MSG_SERVICE_RESET, service);
1533
1534 if (err)
1535 vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
1536 "failed to send service_reset: %d\n", err);
1537
1538 return err;
1539}
1540
1541static int
1542vs_server_session_service_removed(struct vs_session_device *session,
1543 struct vs_service_device *service)
1544{
1545 struct core_server *server = vs_server_session_core_server(session);
1546 int err;
1547
1548 /*
1549 * It's possible for the core server to be forcibly removed before
1550 * the other services, for example when the underlying transport
1551 * vanishes. If that happens, we can end up here with a NULL core
1552 * server pointer.
1553 */
1554 if (!server)
1555 return 0;
1556
1557 if (WARN_ON(!service->id))
1558 return -EINVAL;
1559
1560 err = vs_server_core_queue_service_removed(server, service);
1561 if (err)
1562 vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
1563 "failed to send service_removed: %d\n", err);
1564
1565 return err;
1566}
1567
1568static struct vs_session_driver vs_server_session_driver = {
1569 .driver = {
1570 .name = "vservices-server-session",
1571 .owner = THIS_MODULE,
1572 .bus = &vs_session_bus_type,
1573 .probe = vs_server_session_probe,
1574 .suppress_bind_attrs = true,
1575 },
1576 .is_server = true,
1577 .service_bus = &vs_server_bus_type,
1578 .service_added = vs_server_session_service_added,
1579 .service_start = vs_server_session_service_start,
1580 .service_local_reset = vs_server_session_service_local_reset,
1581 .service_removed = vs_server_session_service_removed,
1582};
1583
1584static int __init vs_core_server_init(void)
1585{
1586 int ret;
1587
1588 ret = bus_register(&vs_server_bus_type);
1589 if (ret)
1590 goto fail_bus_register;
1591
1592#ifdef CONFIG_VSERVICES_CHAR_DEV
1593 vs_devio_server_driver.driver.bus = &vs_server_bus_type;
1594 vs_devio_server_driver.driver.owner = THIS_MODULE;
1595 ret = driver_register(&vs_devio_server_driver.driver);
1596 if (ret)
1597 goto fail_devio_register;
1598#endif
1599
1600 ret = driver_register(&vs_server_session_driver.driver);
1601 if (ret)
1602 goto fail_driver_register;
1603
1604 ret = vservice_core_server_register(&vs_core_server_driver,
1605 "vs_core_server");
1606 if (ret)
1607 goto fail_core_register;
1608
1609 vservices_server_root = kobject_create_and_add("server-sessions",
1610 vservices_root);
1611 if (!vservices_server_root) {
1612 ret = -ENOMEM;
1613 goto fail_create_root;
1614 }
1615
1616 return 0;
1617
1618fail_create_root:
1619 vservice_core_server_unregister(&vs_core_server_driver);
1620fail_core_register:
1621 driver_unregister(&vs_server_session_driver.driver);
1622fail_driver_register:
1623#ifdef CONFIG_VSERVICES_CHAR_DEV
1624 driver_unregister(&vs_devio_server_driver.driver);
1625 vs_devio_server_driver.driver.bus = NULL;
1626 vs_devio_server_driver.driver.owner = NULL;
1627fail_devio_register:
1628#endif
1629 bus_unregister(&vs_server_bus_type);
1630fail_bus_register:
1631 return ret;
1632}
1633
1634static void __exit vs_core_server_exit(void)
1635{
1636 kobject_put(vservices_server_root);
1637 vservice_core_server_unregister(&vs_core_server_driver);
1638 driver_unregister(&vs_server_session_driver.driver);
1639#ifdef CONFIG_VSERVICES_CHAR_DEV
1640 driver_unregister(&vs_devio_server_driver.driver);
1641 vs_devio_server_driver.driver.bus = NULL;
1642 vs_devio_server_driver.driver.owner = NULL;
1643#endif
1644 bus_unregister(&vs_server_bus_type);
1645}
1646
1647subsys_initcall(vs_core_server_init);
1648module_exit(vs_core_server_exit);
1649
1650MODULE_DESCRIPTION("OKL4 Virtual Services Core Server Driver");
1651MODULE_AUTHOR("Open Kernel Labs, Inc");