Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2008-2012 Niels Provos and Nick Mathewson |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * 3. The name of the author may not be used to endorse or promote products |
| 13 | * derived from this software without specific prior written permission. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 25 | */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 26 | #ifndef BUFFEREVENT_INTERNAL_H_INCLUDED_ |
| 27 | #define BUFFEREVENT_INTERNAL_H_INCLUDED_ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 28 | |
| 29 | #ifdef __cplusplus |
| 30 | extern "C" { |
| 31 | #endif |
| 32 | |
| 33 | #include "event2/event-config.h" |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 34 | #include "event2/event_struct.h" |
| 35 | #include "evconfig-private.h" |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 36 | #include "event2/util.h" |
| 37 | #include "defer-internal.h" |
| 38 | #include "evthread-internal.h" |
| 39 | #include "event2/thread.h" |
| 40 | #include "ratelim-internal.h" |
| 41 | #include "event2/bufferevent_struct.h" |
| 42 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 43 | #include "ipv6-internal.h" |
| 44 | #ifdef _WIN32 |
| 45 | #include <ws2tcpip.h> |
| 46 | #endif |
| 47 | #ifdef EVENT__HAVE_NETINET_IN_H |
| 48 | #include <netinet/in.h> |
| 49 | #endif |
| 50 | #ifdef EVENT__HAVE_NETINET_IN6_H |
| 51 | #include <netinet/in6.h> |
| 52 | #endif |
| 53 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 54 | /* These flags are reasons that we might be declining to actually enable |
| 55 | reading or writing on a bufferevent. |
| 56 | */ |
| 57 | |
| 58 | /* On a all bufferevents, for reading: used when we have read up to the |
| 59 | watermark value. |
| 60 | |
| 61 | On a filtering bufferevent, for writing: used when the underlying |
| 62 | bufferevent's write buffer has been filled up to its watermark |
| 63 | value. |
| 64 | */ |
| 65 | #define BEV_SUSPEND_WM 0x01 |
| 66 | /* On a base bufferevent: when we have emptied a bandwidth buckets */ |
| 67 | #define BEV_SUSPEND_BW 0x02 |
| 68 | /* On a base bufferevent: when we have emptied the group's bandwidth bucket. */ |
| 69 | #define BEV_SUSPEND_BW_GROUP 0x04 |
| 70 | /* On a socket bufferevent: can't do any operations while we're waiting for |
| 71 | * name lookup to finish. */ |
| 72 | #define BEV_SUSPEND_LOOKUP 0x08 |
| 73 | /* On a base bufferevent, for reading: used when a filter has choked this |
| 74 | * (underlying) bufferevent because it has stopped reading from it. */ |
| 75 | #define BEV_SUSPEND_FILT_READ 0x10 |
| 76 | |
| 77 | typedef ev_uint16_t bufferevent_suspend_flags; |
| 78 | |
| 79 | struct bufferevent_rate_limit_group { |
| 80 | /** List of all members in the group */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 81 | LIST_HEAD(rlim_group_member_list, bufferevent_private) members; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 82 | /** Current limits for the group. */ |
| 83 | struct ev_token_bucket rate_limit; |
| 84 | struct ev_token_bucket_cfg rate_limit_cfg; |
| 85 | |
| 86 | /** True iff we don't want to read from any member of the group.until |
| 87 | * the token bucket refills. */ |
| 88 | unsigned read_suspended : 1; |
| 89 | /** True iff we don't want to write from any member of the group.until |
| 90 | * the token bucket refills. */ |
| 91 | unsigned write_suspended : 1; |
| 92 | /** True iff we were unable to suspend one of the bufferevents in the |
| 93 | * group for reading the last time we tried, and we should try |
| 94 | * again. */ |
| 95 | unsigned pending_unsuspend_read : 1; |
| 96 | /** True iff we were unable to suspend one of the bufferevents in the |
| 97 | * group for writing the last time we tried, and we should try |
| 98 | * again. */ |
| 99 | unsigned pending_unsuspend_write : 1; |
| 100 | |
| 101 | /*@{*/ |
| 102 | /** Total number of bytes read or written in this group since last |
| 103 | * reset. */ |
| 104 | ev_uint64_t total_read; |
| 105 | ev_uint64_t total_written; |
| 106 | /*@}*/ |
| 107 | |
| 108 | /** The number of bufferevents in the group. */ |
| 109 | int n_members; |
| 110 | |
| 111 | /** The smallest number of bytes that any member of the group should |
| 112 | * be limited to read or write at a time. */ |
| 113 | ev_ssize_t min_share; |
| 114 | ev_ssize_t configured_min_share; |
| 115 | |
| 116 | /** Timeout event that goes off once a tick, when the bucket is ready |
| 117 | * to refill. */ |
| 118 | struct event master_refill_event; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 119 | |
| 120 | /** Seed for weak random number generator. Protected by 'lock' */ |
| 121 | struct evutil_weakrand_state weakrand_seed; |
| 122 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 123 | /** Lock to protect the members of this group. This lock should nest |
| 124 | * within every bufferevent lock: if you are holding this lock, do |
| 125 | * not assume you can lock another bufferevent. */ |
| 126 | void *lock; |
| 127 | }; |
| 128 | |
| 129 | /** Fields for rate-limiting a single bufferevent. */ |
| 130 | struct bufferevent_rate_limit { |
| 131 | /* Linked-list elements for storing this bufferevent_private in a |
| 132 | * group. |
| 133 | * |
| 134 | * Note that this field is supposed to be protected by the group |
| 135 | * lock */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 136 | LIST_ENTRY(bufferevent_private) next_in_group; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 137 | /** The rate-limiting group for this bufferevent, or NULL if it is |
| 138 | * only rate-limited on its own. */ |
| 139 | struct bufferevent_rate_limit_group *group; |
| 140 | |
| 141 | /* This bufferevent's current limits. */ |
| 142 | struct ev_token_bucket limit; |
| 143 | /* Pointer to the rate-limit configuration for this bufferevent. |
| 144 | * Can be shared. XXX reference-count this? */ |
| 145 | struct ev_token_bucket_cfg *cfg; |
| 146 | |
| 147 | /* Timeout event used when one this bufferevent's buckets are |
| 148 | * empty. */ |
| 149 | struct event refill_bucket_event; |
| 150 | }; |
| 151 | |
| 152 | /** Parts of the bufferevent structure that are shared among all bufferevent |
| 153 | * types, but not exposed in bufferevent_struct.h. */ |
| 154 | struct bufferevent_private { |
| 155 | /** The underlying bufferevent structure. */ |
| 156 | struct bufferevent bev; |
| 157 | |
| 158 | /** Evbuffer callback to enforce watermarks on input. */ |
| 159 | struct evbuffer_cb_entry *read_watermarks_cb; |
| 160 | |
| 161 | /** If set, we should free the lock when we free the bufferevent. */ |
| 162 | unsigned own_lock : 1; |
| 163 | |
| 164 | /** Flag: set if we have deferred callbacks and a read callback is |
| 165 | * pending. */ |
| 166 | unsigned readcb_pending : 1; |
| 167 | /** Flag: set if we have deferred callbacks and a write callback is |
| 168 | * pending. */ |
| 169 | unsigned writecb_pending : 1; |
| 170 | /** Flag: set if we are currently busy connecting. */ |
| 171 | unsigned connecting : 1; |
| 172 | /** Flag: set if a connect failed prematurely; this is a hack for |
| 173 | * getting around the bufferevent abstraction. */ |
| 174 | unsigned connection_refused : 1; |
| 175 | /** Set to the events pending if we have deferred callbacks and |
| 176 | * an events callback is pending. */ |
| 177 | short eventcb_pending; |
| 178 | |
| 179 | /** If set, read is suspended until one or more conditions are over. |
| 180 | * The actual value here is a bitfield of those conditions; see the |
| 181 | * BEV_SUSPEND_* flags above. */ |
| 182 | bufferevent_suspend_flags read_suspended; |
| 183 | |
| 184 | /** If set, writing is suspended until one or more conditions are over. |
| 185 | * The actual value here is a bitfield of those conditions; see the |
| 186 | * BEV_SUSPEND_* flags above. */ |
| 187 | bufferevent_suspend_flags write_suspended; |
| 188 | |
| 189 | /** Set to the current socket errno if we have deferred callbacks and |
| 190 | * an events callback is pending. */ |
| 191 | int errno_pending; |
| 192 | |
| 193 | /** The DNS error code for bufferevent_socket_connect_hostname */ |
| 194 | int dns_error; |
| 195 | |
| 196 | /** Used to implement deferred callbacks */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 197 | struct event_callback deferred; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 198 | |
| 199 | /** The options this bufferevent was constructed with */ |
| 200 | enum bufferevent_options options; |
| 201 | |
| 202 | /** Current reference count for this bufferevent. */ |
| 203 | int refcnt; |
| 204 | |
| 205 | /** Lock for this bufferevent. Shared by the inbuf and the outbuf. |
| 206 | * If NULL, locking is disabled. */ |
| 207 | void *lock; |
| 208 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 209 | /** No matter how big our bucket gets, don't try to read more than this |
| 210 | * much in a single read operation. */ |
| 211 | ev_ssize_t max_single_read; |
| 212 | |
| 213 | /** No matter how big our bucket gets, don't try to write more than this |
| 214 | * much in a single write operation. */ |
| 215 | ev_ssize_t max_single_write; |
| 216 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 217 | /** Rate-limiting information for this bufferevent */ |
| 218 | struct bufferevent_rate_limit *rate_limiting; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 219 | |
| 220 | /* Saved conn_addr, to extract IP address from it. |
| 221 | * |
| 222 | * Because some servers may reset/close connection without waiting clients, |
| 223 | * in that case we can't extract IP address even in close_cb. |
| 224 | * So we need to save it, just after we connected to remote server, or |
| 225 | * after resolving (to avoid extra dns requests during retrying, since UDP |
| 226 | * is slow) */ |
| 227 | union { |
| 228 | struct sockaddr_in6 in6; |
| 229 | struct sockaddr_in in; |
| 230 | } conn_address; |
| 231 | |
| 232 | struct evdns_getaddrinfo_request *dns_request; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 233 | }; |
| 234 | |
| 235 | /** Possible operations for a control callback. */ |
| 236 | enum bufferevent_ctrl_op { |
| 237 | BEV_CTRL_SET_FD, |
| 238 | BEV_CTRL_GET_FD, |
| 239 | BEV_CTRL_GET_UNDERLYING, |
| 240 | BEV_CTRL_CANCEL_ALL |
| 241 | }; |
| 242 | |
| 243 | /** Possible data types for a control callback */ |
| 244 | union bufferevent_ctrl_data { |
| 245 | void *ptr; |
| 246 | evutil_socket_t fd; |
| 247 | }; |
| 248 | |
| 249 | /** |
| 250 | Implementation table for a bufferevent: holds function pointers and other |
| 251 | information to make the various bufferevent types work. |
| 252 | */ |
| 253 | struct bufferevent_ops { |
| 254 | /** The name of the bufferevent's type. */ |
| 255 | const char *type; |
| 256 | /** At what offset into the implementation type will we find a |
| 257 | bufferevent structure? |
| 258 | |
| 259 | Example: if the type is implemented as |
| 260 | struct bufferevent_x { |
| 261 | int extra_data; |
| 262 | struct bufferevent bev; |
| 263 | } |
| 264 | then mem_offset should be offsetof(struct bufferevent_x, bev) |
| 265 | */ |
| 266 | off_t mem_offset; |
| 267 | |
| 268 | /** Enables one or more of EV_READ|EV_WRITE on a bufferevent. Does |
| 269 | not need to adjust the 'enabled' field. Returns 0 on success, -1 |
| 270 | on failure. |
| 271 | */ |
| 272 | int (*enable)(struct bufferevent *, short); |
| 273 | |
| 274 | /** Disables one or more of EV_READ|EV_WRITE on a bufferevent. Does |
| 275 | not need to adjust the 'enabled' field. Returns 0 on success, -1 |
| 276 | on failure. |
| 277 | */ |
| 278 | int (*disable)(struct bufferevent *, short); |
| 279 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 280 | /** Detatches the bufferevent from related data structures. Called as |
| 281 | * soon as its reference count reaches 0. */ |
| 282 | void (*unlink)(struct bufferevent *); |
| 283 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 284 | /** Free any storage and deallocate any extra data or structures used |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 285 | in this implementation. Called when the bufferevent is |
| 286 | finalized. |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 287 | */ |
| 288 | void (*destruct)(struct bufferevent *); |
| 289 | |
| 290 | /** Called when the timeouts on the bufferevent have changed.*/ |
| 291 | int (*adj_timeouts)(struct bufferevent *); |
| 292 | |
| 293 | /** Called to flush data. */ |
| 294 | int (*flush)(struct bufferevent *, short, enum bufferevent_flush_mode); |
| 295 | |
| 296 | /** Called to access miscellaneous fields. */ |
| 297 | int (*ctrl)(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *); |
| 298 | |
| 299 | }; |
| 300 | |
| 301 | extern const struct bufferevent_ops bufferevent_ops_socket; |
| 302 | extern const struct bufferevent_ops bufferevent_ops_filter; |
| 303 | extern const struct bufferevent_ops bufferevent_ops_pair; |
| 304 | |
| 305 | #define BEV_IS_SOCKET(bevp) ((bevp)->be_ops == &bufferevent_ops_socket) |
| 306 | #define BEV_IS_FILTER(bevp) ((bevp)->be_ops == &bufferevent_ops_filter) |
| 307 | #define BEV_IS_PAIR(bevp) ((bevp)->be_ops == &bufferevent_ops_pair) |
| 308 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 309 | #ifdef _WIN32 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 310 | extern const struct bufferevent_ops bufferevent_ops_async; |
| 311 | #define BEV_IS_ASYNC(bevp) ((bevp)->be_ops == &bufferevent_ops_async) |
| 312 | #else |
| 313 | #define BEV_IS_ASYNC(bevp) 0 |
| 314 | #endif |
| 315 | |
| 316 | /** Initialize the shared parts of a bufferevent. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 317 | int bufferevent_init_common_(struct bufferevent_private *, struct event_base *, const struct bufferevent_ops *, enum bufferevent_options options); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 318 | |
| 319 | /** For internal use: temporarily stop all reads on bufev, until the conditions |
| 320 | * in 'what' are over. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 321 | void bufferevent_suspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 322 | /** For internal use: clear the conditions 'what' on bufev, and re-enable |
| 323 | * reading if there are no conditions left. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 324 | void bufferevent_unsuspend_read_(struct bufferevent *bufev, bufferevent_suspend_flags what); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 325 | |
| 326 | /** For internal use: temporarily stop all writes on bufev, until the conditions |
| 327 | * in 'what' are over. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 328 | void bufferevent_suspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 329 | /** For internal use: clear the conditions 'what' on bufev, and re-enable |
| 330 | * writing if there are no conditions left. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 331 | void bufferevent_unsuspend_write_(struct bufferevent *bufev, bufferevent_suspend_flags what); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 332 | |
| 333 | #define bufferevent_wm_suspend_read(b) \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 334 | bufferevent_suspend_read_((b), BEV_SUSPEND_WM) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 335 | #define bufferevent_wm_unsuspend_read(b) \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 336 | bufferevent_unsuspend_read_((b), BEV_SUSPEND_WM) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 337 | |
| 338 | /* |
| 339 | Disable a bufferevent. Equivalent to bufferevent_disable(), but |
| 340 | first resets 'connecting' flag to force EV_WRITE down for sure. |
| 341 | |
| 342 | XXXX this method will go away in the future; try not to add new users. |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 343 | See comment in evhttp_connection_reset_() for discussion. |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 344 | |
| 345 | @param bufev the bufferevent to be disabled |
| 346 | @param event any combination of EV_READ | EV_WRITE. |
| 347 | @return 0 if successful, or -1 if an error occurred |
| 348 | @see bufferevent_disable() |
| 349 | */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 350 | int bufferevent_disable_hard_(struct bufferevent *bufev, short event); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 351 | |
| 352 | /** Internal: Set up locking on a bufferevent. If lock is set, use it. |
| 353 | * Otherwise, use a new lock. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 354 | int bufferevent_enable_locking_(struct bufferevent *bufev, void *lock); |
| 355 | /** Internal: backwards compat macro for the now public function |
| 356 | * Increment the reference count on bufev. */ |
| 357 | #define bufferevent_incref_(bufev) bufferevent_incref(bufev) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 358 | /** Internal: Lock bufev and increase its reference count. |
| 359 | * unlocking it otherwise. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 360 | void bufferevent_incref_and_lock_(struct bufferevent *bufev); |
| 361 | /** Internal: backwards compat macro for the now public function |
| 362 | * Decrement the reference count on bufev. Returns 1 if it freed |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 363 | * the bufferevent.*/ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 364 | #define bufferevent_decref_(bufev) bufferevent_decref(bufev) |
| 365 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 366 | /** Internal: Drop the reference count on bufev, freeing as necessary, and |
| 367 | * unlocking it otherwise. Returns 1 if it freed the bufferevent. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 368 | int bufferevent_decref_and_unlock_(struct bufferevent *bufev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 369 | |
| 370 | /** Internal: If callbacks are deferred and we have a read callback, schedule |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 371 | * a readcb. Otherwise just run the readcb. Ignores watermarks. */ |
| 372 | void bufferevent_run_readcb_(struct bufferevent *bufev, int options); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 373 | /** Internal: If callbacks are deferred and we have a write callback, schedule |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 374 | * a writecb. Otherwise just run the writecb. Ignores watermarks. */ |
| 375 | void bufferevent_run_writecb_(struct bufferevent *bufev, int options); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 376 | /** Internal: If callbacks are deferred and we have an eventcb, schedule |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 377 | * it to run with events "what". Otherwise just run the eventcb. |
| 378 | * See bufferevent_trigger_event for meaning of "options". */ |
| 379 | void bufferevent_run_eventcb_(struct bufferevent *bufev, short what, int options); |
| 380 | |
| 381 | /** Internal: Run or schedule (if deferred or options contain |
| 382 | * BEV_TRIG_DEFER_CALLBACKS) I/O callbacks specified in iotype. |
| 383 | * Must already hold the bufev lock. Honors watermarks unless |
| 384 | * BEV_TRIG_IGNORE_WATERMARKS is in options. */ |
| 385 | static inline void bufferevent_trigger_nolock_(struct bufferevent *bufev, short iotype, int options); |
| 386 | |
| 387 | /* Making this inline since all of the common-case calls to this function in |
| 388 | * libevent use constant arguments. */ |
| 389 | static inline void |
| 390 | bufferevent_trigger_nolock_(struct bufferevent *bufev, short iotype, int options) |
| 391 | { |
| 392 | if ((iotype & EV_READ) && ((options & BEV_TRIG_IGNORE_WATERMARKS) || |
| 393 | evbuffer_get_length(bufev->input) >= bufev->wm_read.low)) |
| 394 | bufferevent_run_readcb_(bufev, options); |
| 395 | if ((iotype & EV_WRITE) && ((options & BEV_TRIG_IGNORE_WATERMARKS) || |
| 396 | evbuffer_get_length(bufev->output) <= bufev->wm_write.low)) |
| 397 | bufferevent_run_writecb_(bufev, options); |
| 398 | } |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 399 | |
| 400 | /** Internal: Add the event 'ev' with timeout tv, unless tv is set to 0, in |
| 401 | * which case add ev with no timeout. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 402 | int bufferevent_add_event_(struct event *ev, const struct timeval *tv); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 403 | |
| 404 | /* ========= |
| 405 | * These next functions implement timeouts for bufferevents that aren't doing |
| 406 | * anything else with ev_read and ev_write, to handle timeouts. |
| 407 | * ========= */ |
| 408 | /** Internal use: Set up the ev_read and ev_write callbacks so that |
| 409 | * the other "generic_timeout" functions will work on it. Call this from |
| 410 | * the constructor function. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 411 | void bufferevent_init_generic_timeout_cbs_(struct bufferevent *bev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 412 | /** Internal use: Add or delete the generic timeout events as appropriate. |
| 413 | * (If an event is enabled and a timeout is set, we add the event. Otherwise |
| 414 | * we delete it.) Call this from anything that changes the timeout values, |
| 415 | * that enabled EV_READ or EV_WRITE, or that disables EV_READ or EV_WRITE. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 416 | int bufferevent_generic_adj_timeouts_(struct bufferevent *bev); |
| 417 | int bufferevent_generic_adj_existing_timeouts_(struct bufferevent *bev); |
| 418 | |
| 419 | enum bufferevent_options bufferevent_get_options_(struct bufferevent *bev); |
| 420 | |
| 421 | const struct sockaddr* |
| 422 | bufferevent_socket_get_conn_address_(struct bufferevent *bev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 423 | |
| 424 | /** Internal use: We have just successfully read data into an inbuf, so |
| 425 | * reset the read timeout (if any). */ |
| 426 | #define BEV_RESET_GENERIC_READ_TIMEOUT(bev) \ |
| 427 | do { \ |
| 428 | if (evutil_timerisset(&(bev)->timeout_read)) \ |
| 429 | event_add(&(bev)->ev_read, &(bev)->timeout_read); \ |
| 430 | } while (0) |
| 431 | /** Internal use: We have just successfully written data from an inbuf, so |
| 432 | * reset the read timeout (if any). */ |
| 433 | #define BEV_RESET_GENERIC_WRITE_TIMEOUT(bev) \ |
| 434 | do { \ |
| 435 | if (evutil_timerisset(&(bev)->timeout_write)) \ |
| 436 | event_add(&(bev)->ev_write, &(bev)->timeout_write); \ |
| 437 | } while (0) |
| 438 | #define BEV_DEL_GENERIC_READ_TIMEOUT(bev) \ |
| 439 | event_del(&(bev)->ev_read) |
| 440 | #define BEV_DEL_GENERIC_WRITE_TIMEOUT(bev) \ |
| 441 | event_del(&(bev)->ev_write) |
| 442 | |
| 443 | |
| 444 | /** Internal: Given a bufferevent, return its corresponding |
| 445 | * bufferevent_private. */ |
| 446 | #define BEV_UPCAST(b) EVUTIL_UPCAST((b), struct bufferevent_private, bev) |
| 447 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 448 | #ifdef EVENT__DISABLE_THREAD_SUPPORT |
| 449 | #define BEV_LOCK(b) EVUTIL_NIL_STMT_ |
| 450 | #define BEV_UNLOCK(b) EVUTIL_NIL_STMT_ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 451 | #else |
| 452 | /** Internal: Grab the lock (if any) on a bufferevent */ |
| 453 | #define BEV_LOCK(b) do { \ |
| 454 | struct bufferevent_private *locking = BEV_UPCAST(b); \ |
| 455 | EVLOCK_LOCK(locking->lock, 0); \ |
| 456 | } while (0) |
| 457 | |
| 458 | /** Internal: Release the lock (if any) on a bufferevent */ |
| 459 | #define BEV_UNLOCK(b) do { \ |
| 460 | struct bufferevent_private *locking = BEV_UPCAST(b); \ |
| 461 | EVLOCK_UNLOCK(locking->lock, 0); \ |
| 462 | } while (0) |
| 463 | #endif |
| 464 | |
| 465 | |
| 466 | /* ==== For rate-limiting. */ |
| 467 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 468 | int bufferevent_decrement_write_buckets_(struct bufferevent_private *bev, |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 469 | ev_ssize_t bytes); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 470 | int bufferevent_decrement_read_buckets_(struct bufferevent_private *bev, |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 471 | ev_ssize_t bytes); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 472 | ev_ssize_t bufferevent_get_read_max_(struct bufferevent_private *bev); |
| 473 | ev_ssize_t bufferevent_get_write_max_(struct bufferevent_private *bev); |
| 474 | |
| 475 | int bufferevent_ratelim_init_(struct bufferevent_private *bev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 476 | |
| 477 | #ifdef __cplusplus |
| 478 | } |
| 479 | #endif |
| 480 | |
| 481 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 482 | #endif /* BUFFEREVENT_INTERNAL_H_INCLUDED_ */ |