blob: af94a19911572806ee7c80525cdb1400ccacb036 [file] [log] [blame]
Craig Tiller20afa3d2016-10-17 14:52:14 -07001/*
2 *
3 * Copyright 2016, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tiller2f1d8702016-10-18 16:09:26 -070034#ifndef GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H
35#define GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H
Craig Tiller20afa3d2016-10-17 14:52:14 -070036
37#include <grpc/grpc.h>
38
39#include "src/core/lib/iomgr/exec_ctx.h"
40
Craig Tiller3798e602016-10-21 14:39:27 -070041/** \file Tracks resource usage against a pool.
42
43 The current implementation tracks only memory usage, but in the future
44 this may be extended to (for example) threads and file descriptors.
45
46 A grpc_resource_quota represents the pooled resources, and
47 grpc_resource_user instances attach to the quota and consume those
48 resources. They also offer a vector for reclamation: if we become
49 resource constrained, grpc_resource_user instances are asked (in turn) to
50 free up whatever they can so that the system as a whole can make progress.
51
52 There are three kinds of reclamation that take place:
53 - an internal reclamation, where cached resource at the resource user level
54 is returned to the quota
55 - a benign reclamation phase, whereby resources that are in use but are not
56 helping anything make progress are reclaimed
57 - a destructive reclamation, whereby resources that are helping something
58 make progress may be enacted so that at least one part of the system can
59 complete.
60
61 These reclamations are tried in priority order, and only one reclamation
62 is outstanding for a quota at any given time (meaning that if a destructive
63 reclamation makes progress, we may follow up with a benign reclamation).
64
65 Future work will be to expose the current resource pressure so that back
66 pressure can be applied to avoid reclamation phases starting.
67
68 Resource users own references to resource quotas, and resource quotas
69 maintain lists of users (which users arrange to leave before they are
70 destroyed) */
71
Craig Tiller20afa3d2016-10-17 14:52:14 -070072extern int grpc_resource_quota_trace;
73
74grpc_resource_quota *grpc_resource_quota_internal_ref(
75 grpc_resource_quota *resource_quota);
76void grpc_resource_quota_internal_unref(grpc_exec_ctx *exec_ctx,
77 grpc_resource_quota *resource_quota);
78grpc_resource_quota *grpc_resource_quota_from_channel_args(
79 const grpc_channel_args *channel_args);
80
Craig Tiller3798e602016-10-21 14:39:27 -070081/* Resource users are kept in (potentially) several intrusive linked lists
82 at once. These are the list names. */
Craig Tiller20afa3d2016-10-17 14:52:14 -070083typedef enum {
Craig Tiller3798e602016-10-21 14:39:27 -070084 /* Resource users that are waiting for an allocation */
85 GRPC_RULIST_AWAITING_ALLOCATION,
86 /* Resource users that have free memory available for internal reclamation */
87 GRPC_RULIST_NON_EMPTY_FREE_POOL,
88 /* Resource users that have published a benign reclamation is available */
89 GRPC_RULIST_RECLAIMER_BENIGN,
90 /* Resource users that have published a destructive reclamation is
91 available */
92 GRPC_RULIST_RECLAIMER_DESTRUCTIVE,
93 /* Number of lists: must be last */
94 GRPC_RULIST_COUNT
95} grpc_rulist;
Craig Tiller20afa3d2016-10-17 14:52:14 -070096
97typedef struct grpc_resource_user grpc_resource_user;
98
Craig Tiller3798e602016-10-21 14:39:27 -070099/* Internal linked list pointers for a resource user */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700100typedef struct {
101 grpc_resource_user *next;
102 grpc_resource_user *prev;
103} grpc_resource_user_link;
104
105struct grpc_resource_user {
Craig Tiller3798e602016-10-21 14:39:27 -0700106 /* The quota this resource user consumes from */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700107 grpc_resource_quota *resource_quota;
108
Craig Tiller3798e602016-10-21 14:39:27 -0700109 /* Closure to schedule an allocation onder the resource quota combiner lock */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700110 grpc_closure allocate_closure;
Craig Tiller3798e602016-10-21 14:39:27 -0700111 /* Closure to publish a non empty free pool under the resource quota combiner
112 lock */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700113 grpc_closure add_to_free_pool_closure;
114
115#ifndef NDEBUG
Craig Tiller3798e602016-10-21 14:39:27 -0700116 /* Canary object to detect leaked resource users with ASAN */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700117 void *asan_canary;
118#endif
119
120 gpr_mu mu;
Craig Tiller3798e602016-10-21 14:39:27 -0700121 /* Total allocated memory outstanding by this resource user;
122 always positive */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700123 int64_t allocated;
Craig Tiller3798e602016-10-21 14:39:27 -0700124 /* The amount of memory this user has cached for its own use: to avoid quota
125 contention, each resource user can keep some memory aside from the quota,
126 and the quota can pull it back under memory pressure.
127 This value can become negative if more memory has been requested than
128 existed in the free pool, at which point the quota is consulted to bring
129 this value non-negative (asynchronously). */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700130 int64_t free_pool;
Craig Tiller3798e602016-10-21 14:39:27 -0700131 /* A list of closures to call once free_pool becomes non-negative - ie when
132 all outstanding allocations have been granted. */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700133 grpc_closure_list on_allocated;
Craig Tiller3798e602016-10-21 14:39:27 -0700134 /* True if we are currently trying to allocate from the quota, false if not */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700135 bool allocating;
Craig Tiller3798e602016-10-21 14:39:27 -0700136 /* True if we are currently trying to add ourselves to the non-free quota
137 list, false otherwise */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700138 bool added_to_free_pool;
139
Craig Tiller3798e602016-10-21 14:39:27 -0700140 /* Reclaimers: index 0 is the benign reclaimer, 1 is the destructive reclaimer
141 */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700142 grpc_closure *reclaimers[2];
Craig Tiller3798e602016-10-21 14:39:27 -0700143 /* Trampoline closures to finish reclamation and re-enter the quota combiner
144 lock */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700145 grpc_closure post_reclaimer_closure[2];
146
Craig Tiller3798e602016-10-21 14:39:27 -0700147 /* Closure to execute under the quota combiner to de-register and shutdown the
148 resource user */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700149 grpc_closure destroy_closure;
Craig Tiller3798e602016-10-21 14:39:27 -0700150 /* User supplied closure to call once the user has finished shutting down AND
151 all outstanding allocations have been freed */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700152 gpr_atm on_done_destroy_closure;
153
Craig Tiller3798e602016-10-21 14:39:27 -0700154 /* Links in the various grpc_rulist lists */
155 grpc_resource_user_link links[GRPC_RULIST_COUNT];
Craig Tiller20afa3d2016-10-17 14:52:14 -0700156
Craig Tiller3798e602016-10-21 14:39:27 -0700157 /* The name of this resource user, for debugging/tracing */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700158 char *name;
159};
160
161void grpc_resource_user_init(grpc_resource_user *resource_user,
162 grpc_resource_quota *resource_quota,
163 const char *name);
164void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
165 grpc_resource_user *resource_user,
166 grpc_closure *on_done);
167void grpc_resource_user_destroy(grpc_exec_ctx *exec_ctx,
168 grpc_resource_user *resource_user);
169
Craig Tiller3798e602016-10-21 14:39:27 -0700170/* Allocate from the resource user (and it's quota).
171 If optional_on_done is NULL, then allocate immediately. This may push the
172 quota over-limit, at which point reclamation will kick in.
173 If optional_on_done is non-NULL, it will be scheduled when the allocation has
174 been granted by the quota. */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700175void grpc_resource_user_alloc(grpc_exec_ctx *exec_ctx,
176 grpc_resource_user *resource_user, size_t size,
177 grpc_closure *optional_on_done);
Craig Tiller3798e602016-10-21 14:39:27 -0700178/* Release memory back to the quota */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700179void grpc_resource_user_free(grpc_exec_ctx *exec_ctx,
180 grpc_resource_user *resource_user, size_t size);
Craig Tiller3798e602016-10-21 14:39:27 -0700181/* Post a memory reclaimer to the resource user. Only one benign and one
182 destructive reclaimer can be posted at once. When executed, the reclaimer
183 MUST call grpc_resource_user_finish_reclamation before it completes, to
184 return control to the resource quota. */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700185void grpc_resource_user_post_reclaimer(grpc_exec_ctx *exec_ctx,
186 grpc_resource_user *resource_user,
187 bool destructive, grpc_closure *closure);
Craig Tiller3798e602016-10-21 14:39:27 -0700188/* Finish a reclamation step */
189void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
190 grpc_resource_user *resource_user);
Craig Tiller20afa3d2016-10-17 14:52:14 -0700191
Craig Tiller3798e602016-10-21 14:39:27 -0700192/* Helper to allocate slices from a resource user */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700193typedef struct grpc_resource_user_slice_allocator {
194 grpc_closure on_allocated;
195 grpc_closure on_done;
196 size_t length;
197 size_t count;
198 gpr_slice_buffer *dest;
199 grpc_resource_user *resource_user;
200} grpc_resource_user_slice_allocator;
201
Craig Tiller3798e602016-10-21 14:39:27 -0700202/* Initialize a slice allocator */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700203void grpc_resource_user_slice_allocator_init(
204 grpc_resource_user_slice_allocator *slice_allocator,
205 grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p);
206
Craig Tiller3798e602016-10-21 14:39:27 -0700207/* Allocate \a count slices of length \a length into \a dest. */
Craig Tiller20afa3d2016-10-17 14:52:14 -0700208void grpc_resource_user_alloc_slices(
209 grpc_exec_ctx *exec_ctx,
210 grpc_resource_user_slice_allocator *slice_allocator, size_t length,
211 size_t count, gpr_slice_buffer *dest);
212
Craig Tiller2f1d8702016-10-18 16:09:26 -0700213#endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */