Carl van Schaik | 6d7b2ff | 2018-07-06 22:00:55 +1000 | [diff] [blame] | 1 | /* |
| 2 | * include/vservices/service.h |
| 3 | * |
| 4 | * Copyright (c) 2012-2018 General Dynamics |
| 5 | * Copyright (c) 2014 Open Kernel Labs, Inc. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * This file defines the driver and device types for vServices client and |
| 12 | * server drivers. These are generally defined by generated protocol-layer |
| 13 | * code. However, they can also be defined directly by applications that |
| 14 | * don't require protocol generation. |
| 15 | */ |
| 16 | |
| 17 | #ifndef _VSERVICE_SERVICE_H_ |
| 18 | #define _VSERVICE_SERVICE_H_ |
| 19 | |
| 20 | #include <linux/version.h> |
| 21 | #include <linux/types.h> |
| 22 | #include <linux/device.h> |
| 23 | #include <linux/spinlock.h> |
| 24 | #include <linux/interrupt.h> |
| 25 | #include <linux/jiffies.h> |
| 26 | #include <linux/wait.h> |
| 27 | #include <linux/err.h> |
| 28 | |
| 29 | #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38) |
| 30 | #include <asm/atomic.h> |
| 31 | #else |
| 32 | #include <linux/atomic.h> |
| 33 | #endif |
| 34 | |
| 35 | #include <vservices/transport.h> |
| 36 | #include <vservices/session.h> |
| 37 | #include <vservices/types.h> |
| 38 | |
| 39 | struct vs_mbuf; |
| 40 | |
| 41 | /** |
| 42 | * struct vs_service_driver - Virtual service driver structure |
| 43 | * @protocol: Protocol name for this driver |
| 44 | * @is_server: True if this is a server driver, false if it is a client driver |
| 45 | * @rx_atomic: If set to false then the receive message handlers are run from |
| 46 | * workqueue context and are allowed to sleep. If set to true |
| 47 | * the message handlers are run from tasklet context and may not |
| 48 | * sleep. For this purpose, tx_ready is considered a receive |
| 49 | * message handler. |
| 50 | * @tx_atomic: If this is set to true along with rx_atomic, the driver is |
| 51 | * allowed to send messages from softirq contexts other than the receive |
| 52 | * message handlers, after calling vs_service_state_lock_bh. Otherwise, |
| 53 | * messages may only be sent from the receive message handlers, or from |
| 54 | * task context after calling vs_service_state_lock. |
| 55 | * @probe: Probe function for this service |
| 56 | * @remove: Remove function for this service |
| 57 | * --- Callbacks --- |
| 58 | * @receive: Message handler function for this service |
| 59 | * @notify: Incoming notification handler function for this service |
| 60 | * @start: Callback which is run when this service is started |
| 61 | * @reset: Callback which is run when this service is reset |
| 62 | * @tx_ready: Callback which is run when the service has dropped below its |
| 63 | * send quota |
| 64 | * --- Resource requirements (valid for server only) --- |
| 65 | * @in_quota_min: minimum number of input messages for protocol functionality |
| 66 | * @in_quota_best: suggested number of input messages |
| 67 | * @out_quota_min: minimum number of output messages for protocol functionality |
| 68 | * @out_quota_best: suggested number of output messages |
| 69 | * @in_notify_count: number of input notification bits used |
| 70 | * @out_notify_count: number of output notification bits used |
| 71 | * --- Internal --- |
| 72 | * @driver: Linux device model driver structure |
| 73 | * |
| 74 | * The callback functions for a virtual service driver are all called from |
| 75 | * the virtual service device's work queue. |
| 76 | */ |
| 77 | struct vs_service_driver { |
| 78 | const char *protocol; |
| 79 | bool is_server; |
| 80 | bool rx_atomic, tx_atomic; |
| 81 | |
| 82 | int (*probe)(struct vs_service_device *service); |
| 83 | int (*remove)(struct vs_service_device *service); |
| 84 | |
| 85 | int (*receive)(struct vs_service_device *service, |
| 86 | struct vs_mbuf *mbuf); |
| 87 | void (*notify)(struct vs_service_device *service, u32 flags); |
| 88 | |
| 89 | void (*start)(struct vs_service_device *service); |
| 90 | void (*reset)(struct vs_service_device *service); |
| 91 | |
| 92 | int (*tx_ready)(struct vs_service_device *service); |
| 93 | |
| 94 | unsigned in_quota_min; |
| 95 | unsigned in_quota_best; |
| 96 | unsigned out_quota_min; |
| 97 | unsigned out_quota_best; |
| 98 | unsigned in_notify_count; |
| 99 | unsigned out_notify_count; |
| 100 | |
| 101 | struct device_driver driver; |
| 102 | }; |
| 103 | |
| 104 | #define to_vs_service_driver(d) \ |
| 105 | container_of(d, struct vs_service_driver, driver) |
| 106 | |
| 107 | /* The vServices server/client bus types */ |
| 108 | extern struct bus_type vs_client_bus_type; |
| 109 | extern struct bus_type vs_server_bus_type; |
| 110 | |
| 111 | /** |
| 112 | * struct vs_service_stats - Virtual service statistics |
| 113 | * @over_quota_time: Internal counter for tracking over quota time. |
| 114 | * @sent_mbufs: Total number of message buffers sent. |
| 115 | * @sent_bytes: Total bytes sent. |
| 116 | * @send_failures: Total number of send failures. |
| 117 | * @recv_mbufs: Total number of message buffers received. |
| 118 | * @recv_bytes: Total number of bytes recevied. |
| 119 | * @recv_failures: Total number of receive failures. |
| 120 | * @nr_over_quota: Number of times an mbuf allocation has failed because the |
| 121 | * service is over quota. |
| 122 | * @nr_tx_ready: Number of times the service has run its tx_ready handler |
| 123 | * @over_quota_time_total: The total amount of time in milli-seconds that the |
| 124 | * service has spent over quota. Measured as the time |
| 125 | * between exceeding quota in mbuf allocation and |
| 126 | * running the tx_ready handler. |
| 127 | * @over_quota_time_avg: The average amount of time in milli-seconds that the |
| 128 | * service is spending in the over quota state. |
| 129 | */ |
| 130 | struct vs_service_stats { |
| 131 | unsigned long over_quota_time; |
| 132 | |
| 133 | atomic_t sent_mbufs; |
| 134 | atomic_t sent_bytes; |
| 135 | atomic_t send_failures; |
| 136 | atomic_t recv_mbufs; |
| 137 | atomic_t recv_bytes; |
| 138 | atomic_t recv_failures; |
| 139 | atomic_t nr_over_quota; |
| 140 | atomic_t nr_tx_ready; |
| 141 | atomic_t over_quota_time_total; |
| 142 | atomic_t over_quota_time_avg; |
| 143 | }; |
| 144 | |
| 145 | /** |
| 146 | * struct vs_service_device - Virtual service device |
| 147 | * @id: Unique ID (to the session) for this service |
| 148 | * @name: Service name |
| 149 | * @sysfs_name: The sysfs name for the service |
| 150 | * @protocol: Service protocol name |
| 151 | * @is_server: True if this device is server, false if it is a client |
| 152 | * @owner: service responsible for managing this service. This must be |
| 153 | * on the same session, and is NULL iff this is the core service. |
| 154 | * It must not be a service whose driver has tx_atomic set. |
| 155 | * @lock_subclass: the number of generations of owners between this service |
| 156 | * and the core service; 0 for the core service, 1 for anything directly |
| 157 | * created by it, and so on. This is only used for verifying lock |
| 158 | * ordering (when lockdep is enabled), hence the name. |
| 159 | * @ready_lock: mutex protecting readiness, disable_count and driver_probed. |
| 160 | * This depends on the state_mutex of the service's owner, if any. Acquire |
| 161 | * it using mutex_lock_nested(ready_lock, lock_subclass). |
| 162 | * @readiness: Service's readiness state, owned by session layer. |
| 163 | * @disable_count: Number of times the service has been disabled without |
| 164 | * a matching enable. |
| 165 | * @driver_probed: True if a driver has been probed (and not removed) |
| 166 | * @work_queue: Work queue for this service's task-context work. |
| 167 | * @rx_tasklet: Tasklet for handling incoming messages. This is only used |
| 168 | * if the service driver has rx_atomic set to true. Otherwise |
| 169 | * incoming messages are handled on the workqueue by rx_work. |
| 170 | * @rx_work: Work structure for handling incoming messages. This is only |
| 171 | * used if the service driver has rx_atomic set to false. |
| 172 | * @rx_lock: Spinlock which protects access to rx_queue and tx_ready |
| 173 | * @rx_queue: Queue of incoming messages |
| 174 | * @tx_ready: Flag indicating that a tx_ready event is pending |
| 175 | * @tx_batching: Flag indicating that outgoing messages are being batched |
| 176 | * @state_spinlock: spinlock used to protect the service state if the |
| 177 | * service driver has tx_atomic (and rx_atomic) set to true. This |
| 178 | * depends on the service's ready_lock. Acquire it only by |
| 179 | * calling vs_service_state_lock_bh(). |
| 180 | * @state_mutex: mutex used to protect the service state if the service |
| 181 | * driver has tx_atomic set to false. This depends on the service's |
| 182 | * ready_lock, and if rx_atomic is true, the rx_tasklet must be |
| 183 | * disabled while it is held. Acquire it only by calling |
| 184 | * vs_service_state_lock(). |
| 185 | * @state_spinlock_used: Flag to check if the state spinlock has been acquired. |
| 186 | * @state_mutex_used: Flag to check if the state mutex has been acquired. |
| 187 | * @reset_work: Work to reset the service after a driver fails |
| 188 | * @pending_reset: Set if reset_work has been queued and not completed. |
| 189 | * @ready_work: Work to make service ready after a throttling delay |
| 190 | * @cooloff_work: Work for cooling off reset throttling after the reset |
| 191 | * throttling limit was hit |
| 192 | * @cleanup_work: Work for cleaning up and freeing the service structure |
| 193 | * @last_reset: Time in jiffies at which this service last reset |
| 194 | * @last_reset_request: Time in jiffies the last reset request for this |
| 195 | * service occurred at |
| 196 | * @last_ready: Time in jiffies at which this service last became ready |
| 197 | * @reset_delay: Time in jiffies that the next throttled reset will be |
| 198 | * delayed for. A value of zero means that reset throttling is not in |
| 199 | * effect. |
| 200 | * @is_over_quota: Internal flag for whether the service is over quota. This |
| 201 | * flag is only used for stats accounting. |
| 202 | * @quota_wq: waitqueue that is woken whenever the available send quota |
| 203 | * increases. |
| 204 | * @notify_send_bits: The number of bits allocated for outgoing notifications. |
| 205 | * @notify_send_offset: The first bit allocated for outgoing notifications. |
| 206 | * @notify_recv_bits: The number of bits allocated for incoming notifications. |
| 207 | * @notify_recv_offset: The first bit allocated for incoming notifications. |
| 208 | * @send_quota: The maximum number of outgoing messages. |
| 209 | * @recv_quota: The maximum number of incoming messages. |
| 210 | * @in_quota_set: For servers, the number of client->server messages |
| 211 | * requested during system configuration (sysfs or environment). |
| 212 | * @out_quota_set: For servers, the number of server->client messages |
| 213 | * requested during system configuration (sysfs or environment). |
| 214 | * @dev: Linux device model device structure |
| 215 | * @stats: Service statistics |
| 216 | */ |
| 217 | struct vs_service_device { |
| 218 | vs_service_id_t id; |
| 219 | char *name; |
| 220 | char *sysfs_name; |
| 221 | char *protocol; |
| 222 | bool is_server; |
| 223 | |
| 224 | struct vs_service_device *owner; |
| 225 | unsigned lock_subclass; |
| 226 | |
| 227 | struct mutex ready_lock; |
| 228 | unsigned readiness; |
| 229 | int disable_count; |
| 230 | bool driver_probed; |
| 231 | |
| 232 | struct workqueue_struct *work_queue; |
| 233 | |
| 234 | struct tasklet_struct rx_tasklet; |
| 235 | struct work_struct rx_work; |
| 236 | |
| 237 | spinlock_t rx_lock; |
| 238 | struct list_head rx_queue; |
| 239 | bool tx_ready, tx_batching; |
| 240 | |
| 241 | spinlock_t state_spinlock; |
| 242 | struct mutex state_mutex; |
| 243 | |
| 244 | struct work_struct reset_work; |
| 245 | bool pending_reset; |
| 246 | struct delayed_work ready_work; |
| 247 | struct delayed_work cooloff_work; |
| 248 | struct work_struct cleanup_work; |
| 249 | |
| 250 | unsigned long last_reset; |
| 251 | unsigned long last_reset_request; |
| 252 | unsigned long last_ready; |
| 253 | unsigned long reset_delay; |
| 254 | |
| 255 | atomic_t is_over_quota; |
| 256 | wait_queue_head_t quota_wq; |
| 257 | |
| 258 | unsigned notify_send_bits; |
| 259 | unsigned notify_send_offset; |
| 260 | unsigned notify_recv_bits; |
| 261 | unsigned notify_recv_offset; |
| 262 | unsigned send_quota; |
| 263 | unsigned recv_quota; |
| 264 | |
| 265 | unsigned in_quota_set; |
| 266 | unsigned out_quota_set; |
| 267 | |
| 268 | void *transport_priv; |
| 269 | |
| 270 | struct device dev; |
| 271 | struct vs_service_stats stats; |
| 272 | |
| 273 | #ifdef CONFIG_VSERVICES_LOCK_DEBUG |
| 274 | bool state_spinlock_used; |
| 275 | bool state_mutex_used; |
| 276 | #endif |
| 277 | }; |
| 278 | |
| 279 | #define to_vs_service_device(d) container_of(d, struct vs_service_device, dev) |
| 280 | |
| 281 | /** |
| 282 | * vs_service_get_session - Return the session for a service |
| 283 | * @service: Service to get the session for |
| 284 | */ |
| 285 | static inline struct vs_session_device * |
| 286 | vs_service_get_session(struct vs_service_device *service) |
| 287 | { |
| 288 | return to_vs_session_device(service->dev.parent); |
| 289 | } |
| 290 | |
| 291 | /** |
| 292 | * vs_service_send - Send a message from a service |
| 293 | * @service: Service to send the message from |
| 294 | * @mbuf: Message buffer to send |
| 295 | */ |
| 296 | static inline int |
| 297 | vs_service_send(struct vs_service_device *service, struct vs_mbuf *mbuf) |
| 298 | { |
| 299 | struct vs_session_device *session = vs_service_get_session(service); |
| 300 | const struct vs_transport_vtable *vt = session->transport->vt; |
| 301 | const unsigned long flags = |
| 302 | service->tx_batching ? VS_TRANSPORT_SEND_FLAGS_MORE : 0; |
| 303 | size_t msg_size = vt->mbuf_size(mbuf); |
| 304 | int err; |
| 305 | |
| 306 | err = vt->send(session->transport, service, mbuf, flags); |
| 307 | if (!err) { |
| 308 | atomic_inc(&service->stats.sent_mbufs); |
| 309 | atomic_add(msg_size, &service->stats.sent_bytes); |
| 310 | } else { |
| 311 | atomic_inc(&service->stats.send_failures); |
| 312 | } |
| 313 | |
| 314 | return err; |
| 315 | } |
| 316 | |
| 317 | /** |
| 318 | * vs_service_alloc_mbuf - Allocate a message buffer for a service |
| 319 | * @service: Service to allocate the buffer for |
| 320 | * @size: Size of the data buffer to allocate |
| 321 | * @flags: Flags to pass to the buffer allocation |
| 322 | */ |
| 323 | static inline struct vs_mbuf * |
| 324 | vs_service_alloc_mbuf(struct vs_service_device *service, size_t size, |
| 325 | gfp_t flags) |
| 326 | { |
| 327 | struct vs_session_device *session = vs_service_get_session(service); |
| 328 | struct vs_mbuf *mbuf; |
| 329 | |
| 330 | mbuf = session->transport->vt->alloc_mbuf(session->transport, |
| 331 | service, size, flags); |
| 332 | if (IS_ERR(mbuf) && PTR_ERR(mbuf) == -ENOBUFS) { |
| 333 | /* Over quota accounting */ |
| 334 | if (atomic_cmpxchg(&service->is_over_quota, 0, 1) == 0) { |
| 335 | service->stats.over_quota_time = jiffies; |
| 336 | atomic_inc(&service->stats.nr_over_quota); |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | /* |
| 341 | * The transport drivers should return either a valid message buffer |
| 342 | * pointer or an ERR_PTR value. Warn here if a transport driver is |
| 343 | * returning NULL on message buffer allocation failure. |
| 344 | */ |
| 345 | if (WARN_ON_ONCE(!mbuf)) |
| 346 | return ERR_PTR(-ENOMEM); |
| 347 | |
| 348 | return mbuf; |
| 349 | } |
| 350 | |
| 351 | /** |
| 352 | * vs_service_free_mbuf - Deallocate a message buffer for a service |
| 353 | * @service: Service the message buffer was allocated for |
| 354 | * @mbuf: Message buffer to deallocate |
| 355 | */ |
| 356 | static inline void |
| 357 | vs_service_free_mbuf(struct vs_service_device *service, struct vs_mbuf *mbuf) |
| 358 | { |
| 359 | struct vs_session_device *session = vs_service_get_session(service); |
| 360 | |
| 361 | session->transport->vt->free_mbuf(session->transport, service, mbuf); |
| 362 | } |
| 363 | |
| 364 | /** |
| 365 | * vs_service_notify - Send a notification from a service |
| 366 | * @service: Service to send the notification from |
| 367 | * @flags: Notification bits to send |
| 368 | */ |
| 369 | static inline int |
| 370 | vs_service_notify(struct vs_service_device *service, u32 flags) |
| 371 | { |
| 372 | struct vs_session_device *session = vs_service_get_session(service); |
| 373 | |
| 374 | return session->transport->vt->notify(session->transport, |
| 375 | service, flags); |
| 376 | } |
| 377 | |
| 378 | /** |
| 379 | * vs_service_has_atomic_rx - Return whether or not a service's receive |
| 380 | * message handler runs in atomic context. This function should only be |
| 381 | * called for services which are bound to a driver. |
| 382 | * |
| 383 | * @service: Service to check |
| 384 | */ |
| 385 | static inline bool |
| 386 | vs_service_has_atomic_rx(struct vs_service_device *service) |
| 387 | { |
| 388 | if (WARN_ON(!service->dev.driver)) |
| 389 | return false; |
| 390 | |
| 391 | return to_vs_service_driver(service->dev.driver)->rx_atomic; |
| 392 | } |
| 393 | |
| 394 | /** |
| 395 | * vs_session_max_mbuf_size - Return the maximum allocation size of a message |
| 396 | * buffer. |
| 397 | * @service: The service to check |
| 398 | */ |
| 399 | static inline size_t |
| 400 | vs_service_max_mbuf_size(struct vs_service_device *service) |
| 401 | { |
| 402 | struct vs_session_device *session = vs_service_get_session(service); |
| 403 | |
| 404 | return session->transport->vt->max_mbuf_size(session->transport); |
| 405 | } |
| 406 | |
| 407 | /** |
| 408 | * vs_service_send_mbufs_available - Return the number of mbufs which can be |
| 409 | * allocated for sending before going over quota. |
| 410 | * @service: The service to check |
| 411 | */ |
| 412 | static inline ssize_t |
| 413 | vs_service_send_mbufs_available(struct vs_service_device *service) |
| 414 | { |
| 415 | struct vs_session_device *session = vs_service_get_session(service); |
| 416 | |
| 417 | return session->transport->vt->service_send_avail(session->transport, |
| 418 | service); |
| 419 | } |
| 420 | |
| 421 | /** |
| 422 | * vs_service_has_atomic_tx - Return whether or not a service is allowed to |
| 423 | * transmit from atomic context (other than its receive message handler). |
| 424 | * This function should only be called for services which are bound to a |
| 425 | * driver. |
| 426 | * |
| 427 | * @service: Service to check |
| 428 | */ |
| 429 | static inline bool |
| 430 | vs_service_has_atomic_tx(struct vs_service_device *service) |
| 431 | { |
| 432 | if (WARN_ON(!service->dev.driver)) |
| 433 | return false; |
| 434 | |
| 435 | return to_vs_service_driver(service->dev.driver)->tx_atomic; |
| 436 | } |
| 437 | |
| 438 | /** |
| 439 | * vs_service_state_lock - Acquire a lock allowing service state operations |
| 440 | * from external task contexts. |
| 441 | * |
| 442 | * @service: Service to lock. |
| 443 | * |
| 444 | * This must be used to protect any service state accesses that occur in task |
| 445 | * contexts outside of a callback from the vservices protocol layer. It must |
| 446 | * not be called from a protocol layer callback, nor from atomic context. |
| 447 | * |
| 448 | * If this service's state is also accessed from softirq contexts other than |
| 449 | * vservices protocol layer callbacks, use vs_service_state_lock_bh instead, |
| 450 | * and set the driver's tx_atomic flag. |
| 451 | * |
| 452 | * If this is called from outside the service's workqueue, the calling driver |
| 453 | * must provide its own guarantee that it has not been detached from the |
| 454 | * service. If that is not possible, use vs_state_lock_safe(). |
| 455 | */ |
| 456 | static inline void |
| 457 | vs_service_state_lock(struct vs_service_device *service) |
| 458 | __acquires(service) |
| 459 | { |
| 460 | #ifdef CONFIG_VSERVICES_LOCK_DEBUG |
| 461 | WARN_ON_ONCE(vs_service_has_atomic_tx(service)); |
| 462 | #endif |
| 463 | |
| 464 | mutex_lock_nested(&service->state_mutex, service->lock_subclass); |
| 465 | |
| 466 | #ifdef CONFIG_VSERVICES_LOCK_DEBUG |
| 467 | if (WARN_ON_ONCE(service->state_spinlock_used)) |
| 468 | dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n"); |
| 469 | service->state_mutex_used = true; |
| 470 | #endif |
| 471 | |
| 472 | if (vs_service_has_atomic_rx(service)) |
| 473 | tasklet_disable(&service->rx_tasklet); |
| 474 | |
| 475 | __acquire(service); |
| 476 | } |
| 477 | |
| 478 | /** |
| 479 | * vs_service_state_unlock - Release the lock acquired by vs_service_state_lock. |
| 480 | * |
| 481 | * @service: Service to unlock. |
| 482 | */ |
| 483 | static inline void |
| 484 | vs_service_state_unlock(struct vs_service_device *service) |
| 485 | __releases(service) |
| 486 | { |
| 487 | __release(service); |
| 488 | |
| 489 | mutex_unlock(&service->state_mutex); |
| 490 | |
| 491 | if (vs_service_has_atomic_rx(service)) { |
| 492 | tasklet_enable(&service->rx_tasklet); |
| 493 | |
| 494 | /* Kick the tasklet if there is RX work to do */ |
| 495 | if (!list_empty(&service->rx_queue)) |
| 496 | tasklet_schedule(&service->rx_tasklet); |
| 497 | } |
| 498 | } |
| 499 | |
| 500 | /** |
| 501 | * vs_service_state_lock_bh - Acquire a lock allowing service state operations |
| 502 | * from external task or softirq contexts. |
| 503 | * |
| 504 | * @service: Service to lock. |
| 505 | * |
| 506 | * This is an alternative to vs_service_state_lock for drivers that receive |
| 507 | * messages in atomic context (i.e. have their rx_atomic flag set), *and* must |
| 508 | * transmit messages from softirq contexts other than their own message |
| 509 | * receive and tx_ready callbacks. Such drivers must set their tx_atomic |
| 510 | * flag, so generated protocol drivers perform correct locking. |
| 511 | * |
| 512 | * This should replace all calls to vs_service_state_lock for services that |
| 513 | * need it. Do not use both locking functions in one service driver. |
| 514 | * |
| 515 | * The calling driver must provide its own guarantee that it has not been |
| 516 | * detached from the service. If that is not possible, use |
| 517 | * vs_state_lock_safe_bh(). |
| 518 | */ |
| 519 | static inline void |
| 520 | vs_service_state_lock_bh(struct vs_service_device *service) |
| 521 | __acquires(service) |
| 522 | __acquires(&service->state_spinlock) |
| 523 | { |
| 524 | #ifdef CONFIG_VSERVICES_LOCK_DEBUG |
| 525 | WARN_ON_ONCE(!vs_service_has_atomic_rx(service)); |
| 526 | WARN_ON_ONCE(!vs_service_has_atomic_tx(service)); |
| 527 | #endif |
| 528 | |
| 529 | #ifdef CONFIG_SMP |
| 530 | /* Not necessary on UP because it's implied by spin_lock_bh(). */ |
| 531 | tasklet_disable(&service->rx_tasklet); |
| 532 | #endif |
| 533 | |
| 534 | spin_lock_bh(&service->state_spinlock); |
| 535 | |
| 536 | #ifdef CONFIG_VSERVICES_LOCK_DEBUG |
| 537 | if (WARN_ON_ONCE(service->state_mutex_used)) |
| 538 | dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n"); |
| 539 | service->state_spinlock_used = true; |
| 540 | #endif |
| 541 | |
| 542 | __acquire(service); |
| 543 | } |
| 544 | |
| 545 | /** |
| 546 | * vs_service_state_unlock_bh - Release the lock acquired by |
| 547 | * vs_service_state_lock_bh. |
| 548 | * |
| 549 | * @service: Service to unlock. |
| 550 | */ |
| 551 | static inline void |
| 552 | vs_service_state_unlock_bh(struct vs_service_device *service) |
| 553 | __releases(service) |
| 554 | __releases(&service->state_spinlock) |
| 555 | { |
| 556 | __release(service); |
| 557 | |
| 558 | spin_unlock_bh(&service->state_spinlock); |
| 559 | |
| 560 | #ifdef CONFIG_SMP |
| 561 | tasklet_enable(&service->rx_tasklet); |
| 562 | #endif |
| 563 | } |
| 564 | |
| 565 | /* Convenience macros for locking a state structure rather than a service. */ |
| 566 | #define vs_state_lock(state) vs_service_state_lock((state)->service) |
| 567 | #define vs_state_unlock(state) vs_service_state_unlock((state)->service) |
| 568 | #define vs_state_lock_bh(state) vs_service_state_lock_bh((state)->service) |
| 569 | #define vs_state_unlock_bh(state) vs_service_state_unlock_bh((state)->service) |
| 570 | |
| 571 | /** |
| 572 | * vs_state_lock_safe[_bh] - Aqcuire a lock for a state structure's service, |
| 573 | * when the service may have been detached from the state. |
| 574 | * |
| 575 | * This is useful for blocking operations that can't easily be terminated |
| 576 | * before returning from the service reset handler, such as file I/O. To use |
| 577 | * this, the state structure should be reference-counted rather than freed in |
| 578 | * the release callback, and the driver should retain its own reference to the |
| 579 | * service until the state structure is freed. |
| 580 | * |
| 581 | * This macro acquires the lock and returns true if the state has not been |
| 582 | * detached from the service. Otherwise, it returns false. |
| 583 | * |
| 584 | * Note that the _bh variant cannot be used from atomic context, because it |
| 585 | * acquires a mutex. |
| 586 | */ |
| 587 | #define __vs_state_lock_safe(_state, _lock, _unlock) ({ \ |
| 588 | bool __ok = true; \ |
| 589 | typeof(_state) __state = (_state); \ |
| 590 | struct vs_service_device *__service = __state->service; \ |
| 591 | mutex_lock_nested(&__service->ready_lock, \ |
| 592 | __service->lock_subclass); \ |
| 593 | __ok = !ACCESS_ONCE(__state->released); \ |
| 594 | if (__ok) { \ |
| 595 | _lock(__state); \ |
| 596 | __ok = !ACCESS_ONCE(__state->released); \ |
| 597 | if (!__ok) \ |
| 598 | _unlock(__state); \ |
| 599 | } \ |
| 600 | mutex_unlock(&__service->ready_lock); \ |
| 601 | __ok; \ |
| 602 | }) |
| 603 | #define vs_state_lock_safe(_state) \ |
| 604 | __vs_state_lock_safe((_state), vs_state_lock, vs_state_unlock) |
| 605 | #define vs_state_lock_safe_bh(_state) \ |
| 606 | __vs_state_lock_safe((_state), vs_state_lock_bh, vs_state_unlock_bh) |
| 607 | |
| 608 | /** |
| 609 | * vs_get_service - Get a reference to a service. |
| 610 | * @service: Service to get a reference to. |
| 611 | */ |
| 612 | static inline struct vs_service_device * |
| 613 | vs_get_service(struct vs_service_device *service) |
| 614 | { |
| 615 | if (service) |
| 616 | get_device(&service->dev); |
| 617 | return service; |
| 618 | } |
| 619 | |
| 620 | /** |
| 621 | * vs_put_service - Put a reference to a service. |
| 622 | * @service: The service to put the reference to. |
| 623 | */ |
| 624 | static inline void |
| 625 | vs_put_service(struct vs_service_device *service) |
| 626 | { |
| 627 | put_device(&service->dev); |
| 628 | } |
| 629 | |
| 630 | extern int vs_service_reset(struct vs_service_device *service, |
| 631 | struct vs_service_device *caller); |
| 632 | extern void vs_service_reset_nosync(struct vs_service_device *service); |
| 633 | |
| 634 | /** |
| 635 | * vs_service_send_batch_start - Start a batch of outgoing messages |
| 636 | * @service: The service that is starting a batch |
| 637 | * @flush: Finish any previously started batch (if false, then duplicate |
| 638 | * calls to this function have no effect) |
| 639 | */ |
| 640 | static inline void |
| 641 | vs_service_send_batch_start(struct vs_service_device *service, bool flush) |
| 642 | { |
| 643 | if (flush && service->tx_batching) { |
| 644 | struct vs_session_device *session = |
| 645 | vs_service_get_session(service); |
| 646 | const struct vs_transport_vtable *vt = session->transport->vt; |
| 647 | if (vt->flush) |
| 648 | vt->flush(session->transport, service); |
| 649 | } else { |
| 650 | service->tx_batching = true; |
| 651 | } |
| 652 | } |
| 653 | |
| 654 | /** |
| 655 | * vs_service_send_batch_end - End a batch of outgoing messages |
| 656 | * @service: The service that is ending a batch |
| 657 | * @flush: Start sending the batch immediately (if false, the batch will |
| 658 | * be flushed when the next message is sent) |
| 659 | */ |
| 660 | static inline void |
| 661 | vs_service_send_batch_end(struct vs_service_device *service, bool flush) |
| 662 | { |
| 663 | service->tx_batching = false; |
| 664 | if (flush) { |
| 665 | struct vs_session_device *session = |
| 666 | vs_service_get_session(service); |
| 667 | const struct vs_transport_vtable *vt = session->transport->vt; |
| 668 | if (vt->flush) |
| 669 | vt->flush(session->transport, service); |
| 670 | } |
| 671 | } |
| 672 | |
| 673 | |
| 674 | #endif /* _VSERVICE_SERVICE_H_ */ |