Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1 | /* |
| 2 | * |
Craig Tiller | 0605995 | 2015-02-18 08:34:56 -0800 | [diff] [blame] | 3 | * Copyright 2015, Google Inc. |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 4 | * All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions are |
| 8 | * met: |
| 9 | * |
| 10 | * * Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * * Redistributions in binary form must reproduce the above |
| 13 | * copyright notice, this list of conditions and the following disclaimer |
| 14 | * in the documentation and/or other materials provided with the |
| 15 | * distribution. |
| 16 | * * Neither the name of Google Inc. nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | */ |
| 33 | |
Nicolas "Pixel" Noble | d5a9985 | 2015-01-24 01:27:48 -0800 | [diff] [blame] | 34 | #include "src/core/iomgr/sockaddr.h" |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 35 | #include "src/core/transport/metadata.h" |
| 36 | |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 37 | #include <assert.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 38 | #include <stddef.h> |
| 39 | #include <string.h> |
| 40 | |
| 41 | #include <grpc/support/alloc.h> |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 42 | #include <grpc/support/atm.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 43 | #include <grpc/support/log.h> |
| 44 | #include "src/core/support/murmur_hash.h" |
ctiller | 430c499 | 2014-12-11 09:15:41 -0800 | [diff] [blame] | 45 | #include "src/core/transport/chttp2/bin_encoder.h" |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 46 | #include <grpc/support/time.h> |
| 47 | |
| 48 | #define INITIAL_STRTAB_CAPACITY 4 |
| 49 | #define INITIAL_MDTAB_CAPACITY 4 |
| 50 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 51 | typedef struct internal_string { |
| 52 | /* must be byte compatible with grpc_mdstr */ |
| 53 | gpr_slice slice; |
| 54 | gpr_uint32 hash; |
| 55 | |
| 56 | /* private only data */ |
| 57 | gpr_uint32 refs; |
ctiller | 430c499 | 2014-12-11 09:15:41 -0800 | [diff] [blame] | 58 | gpr_uint8 has_base64_and_huffman_encoded; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 59 | gpr_slice_refcount refcount; |
| 60 | |
ctiller | 430c499 | 2014-12-11 09:15:41 -0800 | [diff] [blame] | 61 | gpr_slice base64_and_huffman; |
| 62 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 63 | grpc_mdctx *context; |
| 64 | |
| 65 | struct internal_string *bucket_next; |
| 66 | } internal_string; |
| 67 | |
| 68 | typedef struct internal_metadata { |
| 69 | /* must be byte compatible with grpc_mdelem */ |
| 70 | internal_string *key; |
| 71 | internal_string *value; |
| 72 | |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 73 | gpr_atm refcnt; |
| 74 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 75 | /* private only data */ |
| 76 | void *user_data; |
| 77 | void (*destroy_user_data)(void *user_data); |
| 78 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 79 | grpc_mdctx *context; |
| 80 | struct internal_metadata *bucket_next; |
| 81 | } internal_metadata; |
| 82 | |
| 83 | struct grpc_mdctx { |
| 84 | gpr_uint32 hash_seed; |
Craig Tiller | 9be83ee | 2015-02-18 14:16:15 -0800 | [diff] [blame] | 85 | int refs; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 86 | |
| 87 | gpr_mu mu; |
| 88 | |
| 89 | internal_string **strtab; |
| 90 | size_t strtab_count; |
| 91 | size_t strtab_capacity; |
| 92 | |
| 93 | internal_metadata **mdtab; |
| 94 | size_t mdtab_count; |
| 95 | size_t mdtab_free; |
| 96 | size_t mdtab_capacity; |
| 97 | }; |
| 98 | |
| 99 | static void internal_string_ref(internal_string *s); |
| 100 | static void internal_string_unref(internal_string *s); |
| 101 | static void discard_metadata(grpc_mdctx *ctx); |
| 102 | static void gc_mdtab(grpc_mdctx *ctx); |
Vijay Pai | 7d3d9ca | 2015-04-02 14:34:27 -0700 | [diff] [blame] | 103 | static void metadata_context_destroy_locked(grpc_mdctx *ctx); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 104 | |
| 105 | static void lock(grpc_mdctx *ctx) { gpr_mu_lock(&ctx->mu); } |
| 106 | |
| 107 | static void unlock(grpc_mdctx *ctx) { |
| 108 | /* If the context has been orphaned we'd like to delete it soon. We check |
| 109 | conditions in unlock as it signals the end of mutations on a context. |
| 110 | |
| 111 | We need to ensure all grpc_mdelem and grpc_mdstr elements have been deleted |
| 112 | first. This is equivalent to saying that both tables have zero counts, |
| 113 | which is equivalent to saying that strtab_count is zero (as mdelem's MUST |
| 114 | reference an mdstr for their key and value slots). |
| 115 | |
| 116 | To encourage that to happen, we start discarding zero reference count |
| 117 | mdelems on every unlock (instead of the usual 'I'm too loaded' trigger |
| 118 | case), since otherwise we can be stuck waiting for a garbage collection |
| 119 | that will never happen. */ |
Craig Tiller | 9be83ee | 2015-02-18 14:16:15 -0800 | [diff] [blame] | 120 | if (ctx->refs == 0) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 121 | /* uncomment if you're having trouble diagnosing an mdelem leak to make |
| 122 | things clearer (slows down destruction a lot, however) */ |
Craig Tiller | 0d4836d | 2015-06-30 15:15:43 -0700 | [diff] [blame] | 123 | gc_mdtab(ctx); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 124 | if (ctx->mdtab_count && ctx->mdtab_count == ctx->mdtab_free) { |
| 125 | discard_metadata(ctx); |
| 126 | } |
| 127 | if (ctx->strtab_count == 0) { |
Vijay Pai | 7d3d9ca | 2015-04-02 14:34:27 -0700 | [diff] [blame] | 128 | metadata_context_destroy_locked(ctx); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 129 | return; |
| 130 | } |
| 131 | } |
| 132 | gpr_mu_unlock(&ctx->mu); |
| 133 | } |
| 134 | |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 135 | static void ref_md_locked(internal_metadata *md) { |
| 136 | if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 137 | md->context->mdtab_free--; |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | grpc_mdctx *grpc_mdctx_create_with_seed(gpr_uint32 seed) { |
| 142 | grpc_mdctx *ctx = gpr_malloc(sizeof(grpc_mdctx)); |
| 143 | |
Craig Tiller | 9be83ee | 2015-02-18 14:16:15 -0800 | [diff] [blame] | 144 | ctx->refs = 1; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 145 | ctx->hash_seed = seed; |
| 146 | gpr_mu_init(&ctx->mu); |
| 147 | ctx->strtab = gpr_malloc(sizeof(internal_string *) * INITIAL_STRTAB_CAPACITY); |
| 148 | memset(ctx->strtab, 0, sizeof(grpc_mdstr *) * INITIAL_STRTAB_CAPACITY); |
| 149 | ctx->strtab_count = 0; |
| 150 | ctx->strtab_capacity = INITIAL_STRTAB_CAPACITY; |
| 151 | ctx->mdtab = gpr_malloc(sizeof(internal_metadata *) * INITIAL_MDTAB_CAPACITY); |
| 152 | memset(ctx->mdtab, 0, sizeof(grpc_mdelem *) * INITIAL_MDTAB_CAPACITY); |
| 153 | ctx->mdtab_count = 0; |
| 154 | ctx->mdtab_capacity = INITIAL_MDTAB_CAPACITY; |
| 155 | ctx->mdtab_free = 0; |
| 156 | |
| 157 | return ctx; |
| 158 | } |
| 159 | |
Craig Tiller | 32946d3 | 2015-01-15 11:37:30 -0800 | [diff] [blame] | 160 | grpc_mdctx *grpc_mdctx_create(void) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 161 | /* This seed is used to prevent remote connections from controlling hash table |
| 162 | * collisions. It needs to be somewhat unpredictable to a remote connection. |
| 163 | */ |
| 164 | return grpc_mdctx_create_with_seed(gpr_now().tv_nsec); |
| 165 | } |
| 166 | |
| 167 | static void discard_metadata(grpc_mdctx *ctx) { |
| 168 | size_t i; |
| 169 | internal_metadata *next, *cur; |
| 170 | |
| 171 | for (i = 0; i < ctx->mdtab_capacity; i++) { |
| 172 | cur = ctx->mdtab[i]; |
| 173 | while (cur) { |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 174 | GPR_ASSERT(gpr_atm_acq_load(&cur->refcnt) == 0); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 175 | next = cur->bucket_next; |
| 176 | internal_string_unref(cur->key); |
| 177 | internal_string_unref(cur->value); |
| 178 | if (cur->user_data) { |
| 179 | cur->destroy_user_data(cur->user_data); |
| 180 | } |
| 181 | gpr_free(cur); |
| 182 | cur = next; |
| 183 | ctx->mdtab_free--; |
| 184 | ctx->mdtab_count--; |
| 185 | } |
| 186 | ctx->mdtab[i] = NULL; |
| 187 | } |
| 188 | } |
| 189 | |
Vijay Pai | 7d3d9ca | 2015-04-02 14:34:27 -0700 | [diff] [blame] | 190 | static void metadata_context_destroy_locked(grpc_mdctx *ctx) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 191 | GPR_ASSERT(ctx->strtab_count == 0); |
| 192 | GPR_ASSERT(ctx->mdtab_count == 0); |
| 193 | GPR_ASSERT(ctx->mdtab_free == 0); |
| 194 | gpr_free(ctx->strtab); |
| 195 | gpr_free(ctx->mdtab); |
| 196 | gpr_mu_unlock(&ctx->mu); |
| 197 | gpr_mu_destroy(&ctx->mu); |
| 198 | gpr_free(ctx); |
| 199 | } |
| 200 | |
Craig Tiller | 9be83ee | 2015-02-18 14:16:15 -0800 | [diff] [blame] | 201 | void grpc_mdctx_ref(grpc_mdctx *ctx) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 202 | lock(ctx); |
Craig Tiller | 9be83ee | 2015-02-18 14:16:15 -0800 | [diff] [blame] | 203 | GPR_ASSERT(ctx->refs > 0); |
| 204 | ctx->refs++; |
| 205 | unlock(ctx); |
| 206 | } |
| 207 | |
| 208 | void grpc_mdctx_unref(grpc_mdctx *ctx) { |
| 209 | lock(ctx); |
| 210 | GPR_ASSERT(ctx->refs > 0); |
| 211 | ctx->refs--; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 212 | unlock(ctx); |
| 213 | } |
| 214 | |
| 215 | static void grow_strtab(grpc_mdctx *ctx) { |
| 216 | size_t capacity = ctx->strtab_capacity * 2; |
| 217 | size_t i; |
| 218 | internal_string **strtab = gpr_malloc(sizeof(internal_string *) * capacity); |
| 219 | internal_string *s, *next; |
| 220 | memset(strtab, 0, sizeof(internal_string *) * capacity); |
| 221 | |
| 222 | for (i = 0; i < ctx->strtab_capacity; i++) { |
| 223 | for (s = ctx->strtab[i]; s; s = next) { |
| 224 | next = s->bucket_next; |
| 225 | s->bucket_next = strtab[s->hash % capacity]; |
| 226 | strtab[s->hash % capacity] = s; |
| 227 | } |
| 228 | } |
| 229 | |
| 230 | gpr_free(ctx->strtab); |
| 231 | ctx->strtab = strtab; |
| 232 | ctx->strtab_capacity = capacity; |
| 233 | } |
| 234 | |
| 235 | static void internal_destroy_string(internal_string *is) { |
| 236 | internal_string **prev_next; |
| 237 | internal_string *cur; |
| 238 | grpc_mdctx *ctx = is->context; |
ctiller | 430c499 | 2014-12-11 09:15:41 -0800 | [diff] [blame] | 239 | if (is->has_base64_and_huffman_encoded) { |
| 240 | gpr_slice_unref(is->base64_and_huffman); |
| 241 | } |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 242 | for (prev_next = &ctx->strtab[is->hash % ctx->strtab_capacity], |
| 243 | cur = *prev_next; |
| 244 | cur != is; prev_next = &cur->bucket_next, cur = cur->bucket_next) |
| 245 | ; |
| 246 | *prev_next = cur->bucket_next; |
| 247 | ctx->strtab_count--; |
| 248 | gpr_free(is); |
| 249 | } |
| 250 | |
| 251 | static void internal_string_ref(internal_string *s) { ++s->refs; } |
| 252 | |
| 253 | static void internal_string_unref(internal_string *s) { |
| 254 | GPR_ASSERT(s->refs > 0); |
| 255 | if (0 == --s->refs) { |
| 256 | internal_destroy_string(s); |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | static void slice_ref(void *p) { |
| 261 | internal_string *is = |
| 262 | (internal_string *)((char *)p - offsetof(internal_string, refcount)); |
| 263 | grpc_mdctx *ctx = is->context; |
| 264 | lock(ctx); |
| 265 | internal_string_ref(is); |
| 266 | unlock(ctx); |
| 267 | } |
| 268 | |
| 269 | static void slice_unref(void *p) { |
| 270 | internal_string *is = |
| 271 | (internal_string *)((char *)p - offsetof(internal_string, refcount)); |
| 272 | grpc_mdctx *ctx = is->context; |
| 273 | lock(ctx); |
| 274 | internal_string_unref(is); |
| 275 | unlock(ctx); |
| 276 | } |
| 277 | |
| 278 | grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str) { |
| 279 | return grpc_mdstr_from_buffer(ctx, (const gpr_uint8 *)str, strlen(str)); |
| 280 | } |
| 281 | |
| 282 | grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice) { |
| 283 | grpc_mdstr *result = grpc_mdstr_from_buffer(ctx, GPR_SLICE_START_PTR(slice), |
| 284 | GPR_SLICE_LENGTH(slice)); |
| 285 | gpr_slice_unref(slice); |
| 286 | return result; |
| 287 | } |
| 288 | |
| 289 | grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf, |
| 290 | size_t length) { |
| 291 | gpr_uint32 hash = gpr_murmur_hash3(buf, length, ctx->hash_seed); |
| 292 | internal_string *s; |
| 293 | |
| 294 | lock(ctx); |
| 295 | |
| 296 | /* search for an existing string */ |
| 297 | for (s = ctx->strtab[hash % ctx->strtab_capacity]; s; s = s->bucket_next) { |
| 298 | if (s->hash == hash && GPR_SLICE_LENGTH(s->slice) == length && |
| 299 | 0 == memcmp(buf, GPR_SLICE_START_PTR(s->slice), length)) { |
| 300 | internal_string_ref(s); |
| 301 | unlock(ctx); |
| 302 | return (grpc_mdstr *)s; |
| 303 | } |
| 304 | } |
| 305 | |
| 306 | /* not found: create a new string */ |
| 307 | if (length + 1 < GPR_SLICE_INLINED_SIZE) { |
| 308 | /* string data goes directly into the slice */ |
| 309 | s = gpr_malloc(sizeof(internal_string)); |
| 310 | s->refs = 1; |
| 311 | s->slice.refcount = NULL; |
| 312 | memcpy(s->slice.data.inlined.bytes, buf, length); |
| 313 | s->slice.data.inlined.bytes[length] = 0; |
| 314 | s->slice.data.inlined.length = length; |
| 315 | } else { |
| 316 | /* string data goes after the internal_string header, and we +1 for null |
| 317 | terminator */ |
| 318 | s = gpr_malloc(sizeof(internal_string) + length + 1); |
| 319 | s->refs = 1; |
| 320 | s->refcount.ref = slice_ref; |
| 321 | s->refcount.unref = slice_unref; |
| 322 | s->slice.refcount = &s->refcount; |
| 323 | s->slice.data.refcounted.bytes = (gpr_uint8 *)(s + 1); |
| 324 | s->slice.data.refcounted.length = length; |
| 325 | memcpy(s->slice.data.refcounted.bytes, buf, length); |
| 326 | /* add a null terminator for cheap c string conversion when desired */ |
| 327 | s->slice.data.refcounted.bytes[length] = 0; |
| 328 | } |
ctiller | 430c499 | 2014-12-11 09:15:41 -0800 | [diff] [blame] | 329 | s->has_base64_and_huffman_encoded = 0; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 330 | s->hash = hash; |
| 331 | s->context = ctx; |
| 332 | s->bucket_next = ctx->strtab[hash % ctx->strtab_capacity]; |
| 333 | ctx->strtab[hash % ctx->strtab_capacity] = s; |
| 334 | |
| 335 | ctx->strtab_count++; |
| 336 | |
| 337 | if (ctx->strtab_count > ctx->strtab_capacity * 2) { |
| 338 | grow_strtab(ctx); |
| 339 | } |
| 340 | |
| 341 | unlock(ctx); |
| 342 | |
| 343 | return (grpc_mdstr *)s; |
| 344 | } |
| 345 | |
| 346 | static void gc_mdtab(grpc_mdctx *ctx) { |
| 347 | size_t i; |
| 348 | internal_metadata **prev_next; |
| 349 | internal_metadata *md, *next; |
| 350 | |
| 351 | for (i = 0; i < ctx->mdtab_capacity; i++) { |
| 352 | prev_next = &ctx->mdtab[i]; |
| 353 | for (md = ctx->mdtab[i]; md; md = next) { |
| 354 | next = md->bucket_next; |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 355 | if (gpr_atm_acq_load(&md->refcnt) == 0) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 356 | internal_string_unref(md->key); |
| 357 | internal_string_unref(md->value); |
| 358 | if (md->user_data) { |
| 359 | md->destroy_user_data(md->user_data); |
| 360 | } |
| 361 | gpr_free(md); |
| 362 | *prev_next = next; |
| 363 | ctx->mdtab_free--; |
| 364 | ctx->mdtab_count--; |
| 365 | } else { |
| 366 | prev_next = &md->bucket_next; |
| 367 | } |
| 368 | } |
| 369 | } |
| 370 | |
| 371 | GPR_ASSERT(ctx->mdtab_free == 0); |
| 372 | } |
| 373 | |
| 374 | static void grow_mdtab(grpc_mdctx *ctx) { |
| 375 | size_t capacity = ctx->mdtab_capacity * 2; |
| 376 | size_t i; |
| 377 | internal_metadata **mdtab = |
| 378 | gpr_malloc(sizeof(internal_metadata *) * capacity); |
| 379 | internal_metadata *md, *next; |
| 380 | gpr_uint32 hash; |
| 381 | memset(mdtab, 0, sizeof(internal_metadata *) * capacity); |
| 382 | |
| 383 | for (i = 0; i < ctx->mdtab_capacity; i++) { |
| 384 | for (md = ctx->mdtab[i]; md; md = next) { |
ctiller | fb93d19 | 2014-12-15 10:40:05 -0800 | [diff] [blame] | 385 | hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 386 | next = md->bucket_next; |
| 387 | md->bucket_next = mdtab[hash % capacity]; |
| 388 | mdtab[hash % capacity] = md; |
| 389 | } |
| 390 | } |
| 391 | |
| 392 | gpr_free(ctx->mdtab); |
| 393 | ctx->mdtab = mdtab; |
| 394 | ctx->mdtab_capacity = capacity; |
| 395 | } |
| 396 | |
| 397 | static void rehash_mdtab(grpc_mdctx *ctx) { |
| 398 | if (ctx->mdtab_free > ctx->mdtab_capacity / 4) { |
| 399 | gc_mdtab(ctx); |
| 400 | } else { |
| 401 | grow_mdtab(ctx); |
| 402 | } |
| 403 | } |
| 404 | |
| 405 | grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx, |
| 406 | grpc_mdstr *mkey, |
| 407 | grpc_mdstr *mvalue) { |
| 408 | internal_string *key = (internal_string *)mkey; |
| 409 | internal_string *value = (internal_string *)mvalue; |
ctiller | fb93d19 | 2014-12-15 10:40:05 -0800 | [diff] [blame] | 410 | gpr_uint32 hash = GRPC_MDSTR_KV_HASH(mkey->hash, mvalue->hash); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 411 | internal_metadata *md; |
| 412 | |
| 413 | GPR_ASSERT(key->context == ctx); |
| 414 | GPR_ASSERT(value->context == ctx); |
| 415 | |
| 416 | lock(ctx); |
| 417 | |
| 418 | /* search for an existing pair */ |
| 419 | for (md = ctx->mdtab[hash % ctx->mdtab_capacity]; md; md = md->bucket_next) { |
| 420 | if (md->key == key && md->value == value) { |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 421 | ref_md_locked(md); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 422 | internal_string_unref(key); |
| 423 | internal_string_unref(value); |
| 424 | unlock(ctx); |
| 425 | return (grpc_mdelem *)md; |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | /* not found: create a new pair */ |
| 430 | md = gpr_malloc(sizeof(internal_metadata)); |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 431 | gpr_atm_rel_store(&md->refcnt, 1); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 432 | md->context = ctx; |
| 433 | md->key = key; |
| 434 | md->value = value; |
| 435 | md->user_data = NULL; |
| 436 | md->destroy_user_data = NULL; |
| 437 | md->bucket_next = ctx->mdtab[hash % ctx->mdtab_capacity]; |
| 438 | ctx->mdtab[hash % ctx->mdtab_capacity] = md; |
| 439 | ctx->mdtab_count++; |
| 440 | |
| 441 | if (ctx->mdtab_count > ctx->mdtab_capacity * 2) { |
| 442 | rehash_mdtab(ctx); |
| 443 | } |
| 444 | |
| 445 | unlock(ctx); |
| 446 | |
| 447 | return (grpc_mdelem *)md; |
| 448 | } |
| 449 | |
| 450 | grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key, |
| 451 | const char *value) { |
| 452 | return grpc_mdelem_from_metadata_strings(ctx, |
| 453 | grpc_mdstr_from_string(ctx, key), |
| 454 | grpc_mdstr_from_string(ctx, value)); |
| 455 | } |
| 456 | |
| 457 | grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key, |
| 458 | gpr_slice value) { |
| 459 | return grpc_mdelem_from_metadata_strings(ctx, grpc_mdstr_from_slice(ctx, key), |
| 460 | grpc_mdstr_from_slice(ctx, value)); |
| 461 | } |
| 462 | |
| 463 | grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx, |
| 464 | const char *key, |
| 465 | const gpr_uint8 *value, |
| 466 | size_t value_length) { |
| 467 | return grpc_mdelem_from_metadata_strings( |
| 468 | ctx, grpc_mdstr_from_string(ctx, key), |
| 469 | grpc_mdstr_from_buffer(ctx, value, value_length)); |
| 470 | } |
| 471 | |
| 472 | grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd) { |
| 473 | internal_metadata *md = (internal_metadata *)gmd; |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 474 | /* we can assume the ref count is >= 1 as the application is calling |
| 475 | this function - meaning that no adjustment to mdtab_free is necessary, |
| 476 | simplifying the logic here to be just an atomic increment */ |
| 477 | /* use C assert to have this removed in opt builds */ |
Craig Tiller | 1ae46a2 | 2015-04-13 10:48:55 -0700 | [diff] [blame] | 478 | assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1); |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 479 | gpr_atm_no_barrier_fetch_add(&md->refcnt, 1); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 480 | return gmd; |
| 481 | } |
| 482 | |
| 483 | void grpc_mdelem_unref(grpc_mdelem *gmd) { |
| 484 | internal_metadata *md = (internal_metadata *)gmd; |
| 485 | grpc_mdctx *ctx = md->context; |
| 486 | lock(ctx); |
Craig Tiller | ba63e8a | 2015-04-13 15:02:29 -0700 | [diff] [blame] | 487 | assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1); |
Craig Tiller | 9fa41b9 | 2015-04-10 15:08:03 -0700 | [diff] [blame] | 488 | if (1 == gpr_atm_full_fetch_add(&md->refcnt, -1)) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 489 | ctx->mdtab_free++; |
| 490 | } |
| 491 | unlock(ctx); |
| 492 | } |
| 493 | |
| 494 | const char *grpc_mdstr_as_c_string(grpc_mdstr *s) { |
| 495 | return (const char *)GPR_SLICE_START_PTR(s->slice); |
| 496 | } |
| 497 | |
| 498 | grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs) { |
| 499 | internal_string *s = (internal_string *)gs; |
| 500 | grpc_mdctx *ctx = s->context; |
| 501 | lock(ctx); |
| 502 | internal_string_ref(s); |
| 503 | unlock(ctx); |
| 504 | return gs; |
| 505 | } |
| 506 | |
| 507 | void grpc_mdstr_unref(grpc_mdstr *gs) { |
| 508 | internal_string *s = (internal_string *)gs; |
| 509 | grpc_mdctx *ctx = s->context; |
| 510 | lock(ctx); |
| 511 | internal_string_unref(s); |
| 512 | unlock(ctx); |
| 513 | } |
| 514 | |
| 515 | size_t grpc_mdctx_get_mdtab_capacity_test_only(grpc_mdctx *ctx) { |
| 516 | return ctx->mdtab_capacity; |
| 517 | } |
| 518 | |
| 519 | size_t grpc_mdctx_get_mdtab_count_test_only(grpc_mdctx *ctx) { |
| 520 | return ctx->mdtab_count; |
| 521 | } |
| 522 | |
| 523 | size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *ctx) { |
| 524 | return ctx->mdtab_free; |
| 525 | } |
| 526 | |
| 527 | void *grpc_mdelem_get_user_data(grpc_mdelem *md, |
| 528 | void (*if_destroy_func)(void *)) { |
| 529 | internal_metadata *im = (internal_metadata *)md; |
| 530 | return im->destroy_user_data == if_destroy_func ? im->user_data : NULL; |
| 531 | } |
| 532 | |
| 533 | void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *), |
| 534 | void *user_data) { |
| 535 | internal_metadata *im = (internal_metadata *)md; |
| 536 | GPR_ASSERT((user_data == NULL) == (destroy_func == NULL)); |
| 537 | if (im->destroy_user_data) { |
| 538 | im->destroy_user_data(im->user_data); |
| 539 | } |
| 540 | im->destroy_user_data = destroy_func; |
| 541 | im->user_data = user_data; |
| 542 | } |
ctiller | 430c499 | 2014-12-11 09:15:41 -0800 | [diff] [blame] | 543 | |
| 544 | gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) { |
| 545 | internal_string *s = (internal_string *)gs; |
| 546 | gpr_slice slice; |
| 547 | grpc_mdctx *ctx = s->context; |
| 548 | lock(ctx); |
| 549 | if (!s->has_base64_and_huffman_encoded) { |
| 550 | s->base64_and_huffman = |
| 551 | grpc_chttp2_base64_encode_and_huffman_compress(s->slice); |
ctiller | 33023c4 | 2014-12-12 16:28:33 -0800 | [diff] [blame] | 552 | s->has_base64_and_huffman_encoded = 1; |
ctiller | 430c499 | 2014-12-11 09:15:41 -0800 | [diff] [blame] | 553 | } |
| 554 | slice = s->base64_and_huffman; |
| 555 | unlock(ctx); |
| 556 | return slice; |
Craig Tiller | 190d360 | 2015-02-18 09:23:38 -0800 | [diff] [blame] | 557 | } |
Craig Tiller | fe0104a | 2015-04-14 09:19:12 -0700 | [diff] [blame] | 558 | |
| 559 | void grpc_mdctx_lock(grpc_mdctx *ctx) { lock(ctx); } |
| 560 | |
| 561 | void grpc_mdctx_locked_mdelem_unref(grpc_mdctx *ctx, grpc_mdelem *gmd) { |
| 562 | internal_metadata *md = (internal_metadata *)gmd; |
| 563 | grpc_mdctx *elem_ctx = md->context; |
Craig Tiller | fe0104a | 2015-04-14 09:19:12 -0700 | [diff] [blame] | 564 | GPR_ASSERT(ctx == elem_ctx); |
Craig Tiller | 634346b | 2015-04-14 11:22:35 -0700 | [diff] [blame] | 565 | assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1); |
| 566 | if (1 == gpr_atm_full_fetch_add(&md->refcnt, -1)) { |
Craig Tiller | fe0104a | 2015-04-14 09:19:12 -0700 | [diff] [blame] | 567 | ctx->mdtab_free++; |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | void grpc_mdctx_unlock(grpc_mdctx *ctx) { unlock(ctx); } |
Craig Tiller | b96d001 | 2015-05-06 15:33:23 -0700 | [diff] [blame] | 572 | |
| 573 | int grpc_mdstr_is_legal_header(grpc_mdstr *s) { |
| 574 | /* TODO(ctiller): consider caching this, or computing it on construction */ |
| 575 | const gpr_uint8 *p = GPR_SLICE_START_PTR(s->slice); |
| 576 | const gpr_uint8 *e = GPR_SLICE_END_PTR(s->slice); |
| 577 | for (; p != e; p++) { |
| 578 | if (*p < 32 || *p > 126) return 0; |
| 579 | } |
| 580 | return 1; |
| 581 | } |
| 582 | |
| 583 | int grpc_mdstr_is_bin_suffixed(grpc_mdstr *s) { |
| 584 | /* TODO(ctiller): consider caching this */ |
| 585 | return grpc_is_binary_header((const char *)GPR_SLICE_START_PTR(s->slice), |
| 586 | GPR_SLICE_LENGTH(s->slice)); |
| 587 | } |