blob: 1b48b48e097a96e2282ebf5f59508e3e20383c27 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Nicolas "Pixel" Nobled5a99852015-01-24 01:27:48 -080034#include "src/core/iomgr/sockaddr.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080035#include "src/core/transport/metadata.h"
36
Craig Tiller9fa41b92015-04-10 15:08:03 -070037#include <assert.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080038#include <stddef.h>
39#include <string.h>
40
41#include <grpc/support/alloc.h>
Craig Tiller9fa41b92015-04-10 15:08:03 -070042#include <grpc/support/atm.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080043#include <grpc/support/log.h>
44#include "src/core/support/murmur_hash.h"
ctiller430c4992014-12-11 09:15:41 -080045#include "src/core/transport/chttp2/bin_encoder.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080046#include <grpc/support/time.h>
47
48#define INITIAL_STRTAB_CAPACITY 4
49#define INITIAL_MDTAB_CAPACITY 4
50
Craig Tiller1a65a232015-07-06 10:22:32 -070051#ifdef GRPC_METADATA_REFCOUNT_DEBUG
52#define DEBUG_ARGS , const char *file, int line
53#define FWD_DEBUG_ARGS , file, line
54#define INTERNAL_STRING_REF(s) internal_string_ref((s), __FILE__, __LINE__)
55#define INTERNAL_STRING_UNREF(s) internal_string_unref((s), __FILE__, __LINE__)
56#define REF_MD_LOCKED(s) ref_md_locked((s), __FILE__, __LINE__)
57#else
58#define DEBUG_ARGS
59#define FWD_DEBUG_ARGS
60#define INTERNAL_STRING_REF(s) internal_string_ref((s))
61#define INTERNAL_STRING_UNREF(s) internal_string_unref((s))
62#define REF_MD_LOCKED(s) ref_md_locked((s))
63#endif
64
Craig Tiller8344daa2015-10-09 18:10:57 -070065typedef void (*destroy_user_data_func)(void *user_data);
66
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080067typedef struct internal_string {
68 /* must be byte compatible with grpc_mdstr */
69 gpr_slice slice;
70 gpr_uint32 hash;
71
72 /* private only data */
73 gpr_uint32 refs;
ctiller430c4992014-12-11 09:15:41 -080074 gpr_uint8 has_base64_and_huffman_encoded;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080075 gpr_slice_refcount refcount;
76
ctiller430c4992014-12-11 09:15:41 -080077 gpr_slice base64_and_huffman;
78
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080079 grpc_mdctx *context;
80
81 struct internal_string *bucket_next;
82} internal_string;
83
84typedef struct internal_metadata {
85 /* must be byte compatible with grpc_mdelem */
86 internal_string *key;
87 internal_string *value;
88
Craig Tiller9fa41b92015-04-10 15:08:03 -070089 gpr_atm refcnt;
90
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080091 /* private only data */
Craig Tiller83901532015-07-10 14:02:45 -070092 gpr_mu mu_user_data;
Craig Tiller8344daa2015-10-09 18:10:57 -070093 gpr_atm destroy_user_data;
94 gpr_atm user_data;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080095
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080096 grpc_mdctx *context;
97 struct internal_metadata *bucket_next;
98} internal_metadata;
99
100struct grpc_mdctx {
101 gpr_uint32 hash_seed;
Craig Tiller9be83ee2015-02-18 14:16:15 -0800102 int refs;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800103
104 gpr_mu mu;
105
106 internal_string **strtab;
107 size_t strtab_count;
108 size_t strtab_capacity;
109
110 internal_metadata **mdtab;
111 size_t mdtab_count;
112 size_t mdtab_free;
113 size_t mdtab_capacity;
114};
115
Craig Tiller1a65a232015-07-06 10:22:32 -0700116static void internal_string_ref(internal_string *s DEBUG_ARGS);
117static void internal_string_unref(internal_string *s DEBUG_ARGS);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800118static void discard_metadata(grpc_mdctx *ctx);
119static void gc_mdtab(grpc_mdctx *ctx);
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700120static void metadata_context_destroy_locked(grpc_mdctx *ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800121
122static void lock(grpc_mdctx *ctx) { gpr_mu_lock(&ctx->mu); }
123
124static void unlock(grpc_mdctx *ctx) {
125 /* If the context has been orphaned we'd like to delete it soon. We check
126 conditions in unlock as it signals the end of mutations on a context.
127
128 We need to ensure all grpc_mdelem and grpc_mdstr elements have been deleted
129 first. This is equivalent to saying that both tables have zero counts,
130 which is equivalent to saying that strtab_count is zero (as mdelem's MUST
131 reference an mdstr for their key and value slots).
132
133 To encourage that to happen, we start discarding zero reference count
134 mdelems on every unlock (instead of the usual 'I'm too loaded' trigger
135 case), since otherwise we can be stuck waiting for a garbage collection
136 that will never happen. */
Craig Tiller9be83ee2015-02-18 14:16:15 -0800137 if (ctx->refs == 0) {
Craig Tillerd6c98df2015-08-18 09:33:44 -0700138/* uncomment if you're having trouble diagnosing an mdelem leak to make
139 things clearer (slows down destruction a lot, however) */
Craig Tiller45ce9272015-07-31 11:22:35 -0700140#ifdef GRPC_METADATA_REFCOUNT_DEBUG
Craig Tiller0d4836d2015-06-30 15:15:43 -0700141 gc_mdtab(ctx);
Craig Tiller45ce9272015-07-31 11:22:35 -0700142#endif
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800143 if (ctx->mdtab_count && ctx->mdtab_count == ctx->mdtab_free) {
144 discard_metadata(ctx);
145 }
146 if (ctx->strtab_count == 0) {
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700147 metadata_context_destroy_locked(ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800148 return;
149 }
150 }
151 gpr_mu_unlock(&ctx->mu);
152}
153
Craig Tiller1a65a232015-07-06 10:22:32 -0700154static void ref_md_locked(internal_metadata *md DEBUG_ARGS) {
155#ifdef GRPC_METADATA_REFCOUNT_DEBUG
156 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
157 "ELM REF:%p:%d->%d: '%s' = '%s'", md,
158 gpr_atm_no_barrier_load(&md->refcnt),
159 gpr_atm_no_barrier_load(&md->refcnt) + 1,
160 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
161 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
162#endif
Craig Tiller9fa41b92015-04-10 15:08:03 -0700163 if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) {
Craig Tiller63bda562015-10-09 17:40:19 -0700164 /* This ref is dropped if grpc_mdelem_unref reaches 1,
165 but allows us to safely unref without taking the mdctx lock
166 until such time */
167 gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800168 md->context->mdtab_free--;
169 }
170}
171
172grpc_mdctx *grpc_mdctx_create_with_seed(gpr_uint32 seed) {
173 grpc_mdctx *ctx = gpr_malloc(sizeof(grpc_mdctx));
174
Craig Tiller9be83ee2015-02-18 14:16:15 -0800175 ctx->refs = 1;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800176 ctx->hash_seed = seed;
177 gpr_mu_init(&ctx->mu);
178 ctx->strtab = gpr_malloc(sizeof(internal_string *) * INITIAL_STRTAB_CAPACITY);
179 memset(ctx->strtab, 0, sizeof(grpc_mdstr *) * INITIAL_STRTAB_CAPACITY);
180 ctx->strtab_count = 0;
181 ctx->strtab_capacity = INITIAL_STRTAB_CAPACITY;
182 ctx->mdtab = gpr_malloc(sizeof(internal_metadata *) * INITIAL_MDTAB_CAPACITY);
183 memset(ctx->mdtab, 0, sizeof(grpc_mdelem *) * INITIAL_MDTAB_CAPACITY);
184 ctx->mdtab_count = 0;
185 ctx->mdtab_capacity = INITIAL_MDTAB_CAPACITY;
186 ctx->mdtab_free = 0;
187
188 return ctx;
189}
190
Craig Tiller32946d32015-01-15 11:37:30 -0800191grpc_mdctx *grpc_mdctx_create(void) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800192 /* This seed is used to prevent remote connections from controlling hash table
193 * collisions. It needs to be somewhat unpredictable to a remote connection.
194 */
Craig Tiller32ca48c2015-09-10 11:47:15 -0700195 return grpc_mdctx_create_with_seed(
196 (gpr_uint32)gpr_now(GPR_CLOCK_REALTIME).tv_nsec);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800197}
198
199static void discard_metadata(grpc_mdctx *ctx) {
200 size_t i;
201 internal_metadata *next, *cur;
202
203 for (i = 0; i < ctx->mdtab_capacity; i++) {
204 cur = ctx->mdtab[i];
205 while (cur) {
Craig Tiller8344daa2015-10-09 18:10:57 -0700206 void *user_data = (void *)gpr_atm_no_barrier_load(&cur->user_data);
Craig Tiller9fa41b92015-04-10 15:08:03 -0700207 GPR_ASSERT(gpr_atm_acq_load(&cur->refcnt) == 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800208 next = cur->bucket_next;
Craig Tiller1a65a232015-07-06 10:22:32 -0700209 INTERNAL_STRING_UNREF(cur->key);
210 INTERNAL_STRING_UNREF(cur->value);
Craig Tiller8344daa2015-10-09 18:10:57 -0700211 if (user_data != NULL) {
212 ((destroy_user_data_func)gpr_atm_no_barrier_load(
213 &cur->destroy_user_data))(user_data);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800214 }
Craig Tiller83901532015-07-10 14:02:45 -0700215 gpr_mu_destroy(&cur->mu_user_data);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800216 gpr_free(cur);
217 cur = next;
218 ctx->mdtab_free--;
219 ctx->mdtab_count--;
220 }
221 ctx->mdtab[i] = NULL;
222 }
223}
224
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700225static void metadata_context_destroy_locked(grpc_mdctx *ctx) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800226 GPR_ASSERT(ctx->strtab_count == 0);
227 GPR_ASSERT(ctx->mdtab_count == 0);
228 GPR_ASSERT(ctx->mdtab_free == 0);
229 gpr_free(ctx->strtab);
230 gpr_free(ctx->mdtab);
231 gpr_mu_unlock(&ctx->mu);
232 gpr_mu_destroy(&ctx->mu);
233 gpr_free(ctx);
234}
235
Craig Tiller9be83ee2015-02-18 14:16:15 -0800236void grpc_mdctx_ref(grpc_mdctx *ctx) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800237 lock(ctx);
Craig Tiller9be83ee2015-02-18 14:16:15 -0800238 GPR_ASSERT(ctx->refs > 0);
239 ctx->refs++;
240 unlock(ctx);
241}
242
243void grpc_mdctx_unref(grpc_mdctx *ctx) {
244 lock(ctx);
245 GPR_ASSERT(ctx->refs > 0);
246 ctx->refs--;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800247 unlock(ctx);
248}
249
250static void grow_strtab(grpc_mdctx *ctx) {
251 size_t capacity = ctx->strtab_capacity * 2;
252 size_t i;
253 internal_string **strtab = gpr_malloc(sizeof(internal_string *) * capacity);
254 internal_string *s, *next;
255 memset(strtab, 0, sizeof(internal_string *) * capacity);
256
257 for (i = 0; i < ctx->strtab_capacity; i++) {
258 for (s = ctx->strtab[i]; s; s = next) {
259 next = s->bucket_next;
260 s->bucket_next = strtab[s->hash % capacity];
261 strtab[s->hash % capacity] = s;
262 }
263 }
264
265 gpr_free(ctx->strtab);
266 ctx->strtab = strtab;
267 ctx->strtab_capacity = capacity;
268}
269
270static void internal_destroy_string(internal_string *is) {
271 internal_string **prev_next;
272 internal_string *cur;
273 grpc_mdctx *ctx = is->context;
ctiller430c4992014-12-11 09:15:41 -0800274 if (is->has_base64_and_huffman_encoded) {
275 gpr_slice_unref(is->base64_and_huffman);
276 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800277 for (prev_next = &ctx->strtab[is->hash % ctx->strtab_capacity],
278 cur = *prev_next;
279 cur != is; prev_next = &cur->bucket_next, cur = cur->bucket_next)
280 ;
281 *prev_next = cur->bucket_next;
282 ctx->strtab_count--;
283 gpr_free(is);
284}
285
Craig Tiller1a65a232015-07-06 10:22:32 -0700286static void internal_string_ref(internal_string *s DEBUG_ARGS) {
287#ifdef GRPC_METADATA_REFCOUNT_DEBUG
288 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR REF:%p:%d->%d: '%s'", s,
289 s->refs, s->refs + 1, grpc_mdstr_as_c_string((grpc_mdstr *)s));
290#endif
291 ++s->refs;
292}
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800293
Craig Tiller1a65a232015-07-06 10:22:32 -0700294static void internal_string_unref(internal_string *s DEBUG_ARGS) {
295#ifdef GRPC_METADATA_REFCOUNT_DEBUG
296 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR UNREF:%p:%d->%d: '%s'", s,
297 s->refs, s->refs - 1, grpc_mdstr_as_c_string((grpc_mdstr *)s));
298#endif
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800299 GPR_ASSERT(s->refs > 0);
300 if (0 == --s->refs) {
301 internal_destroy_string(s);
302 }
303}
304
305static void slice_ref(void *p) {
306 internal_string *is =
307 (internal_string *)((char *)p - offsetof(internal_string, refcount));
308 grpc_mdctx *ctx = is->context;
309 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700310 INTERNAL_STRING_REF(is);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800311 unlock(ctx);
312}
313
314static void slice_unref(void *p) {
315 internal_string *is =
316 (internal_string *)((char *)p - offsetof(internal_string, refcount));
317 grpc_mdctx *ctx = is->context;
318 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700319 INTERNAL_STRING_UNREF(is);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800320 unlock(ctx);
321}
322
Craig Tiller4dbdd6a2015-09-25 15:12:16 -0700323grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800324 return grpc_mdstr_from_buffer(ctx, (const gpr_uint8 *)str, strlen(str));
325}
326
327grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice) {
328 grpc_mdstr *result = grpc_mdstr_from_buffer(ctx, GPR_SLICE_START_PTR(slice),
329 GPR_SLICE_LENGTH(slice));
330 gpr_slice_unref(slice);
331 return result;
332}
333
334grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
335 size_t length) {
336 gpr_uint32 hash = gpr_murmur_hash3(buf, length, ctx->hash_seed);
337 internal_string *s;
338
339 lock(ctx);
340
341 /* search for an existing string */
342 for (s = ctx->strtab[hash % ctx->strtab_capacity]; s; s = s->bucket_next) {
343 if (s->hash == hash && GPR_SLICE_LENGTH(s->slice) == length &&
344 0 == memcmp(buf, GPR_SLICE_START_PTR(s->slice), length)) {
Craig Tiller1a65a232015-07-06 10:22:32 -0700345 INTERNAL_STRING_REF(s);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800346 unlock(ctx);
347 return (grpc_mdstr *)s;
348 }
349 }
350
351 /* not found: create a new string */
352 if (length + 1 < GPR_SLICE_INLINED_SIZE) {
353 /* string data goes directly into the slice */
354 s = gpr_malloc(sizeof(internal_string));
355 s->refs = 1;
356 s->slice.refcount = NULL;
357 memcpy(s->slice.data.inlined.bytes, buf, length);
358 s->slice.data.inlined.bytes[length] = 0;
Craig Tiller6a6b36c2015-09-10 16:00:22 -0700359 s->slice.data.inlined.length = (gpr_uint8)length;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800360 } else {
361 /* string data goes after the internal_string header, and we +1 for null
362 terminator */
363 s = gpr_malloc(sizeof(internal_string) + length + 1);
364 s->refs = 1;
365 s->refcount.ref = slice_ref;
366 s->refcount.unref = slice_unref;
367 s->slice.refcount = &s->refcount;
368 s->slice.data.refcounted.bytes = (gpr_uint8 *)(s + 1);
369 s->slice.data.refcounted.length = length;
370 memcpy(s->slice.data.refcounted.bytes, buf, length);
371 /* add a null terminator for cheap c string conversion when desired */
372 s->slice.data.refcounted.bytes[length] = 0;
373 }
ctiller430c4992014-12-11 09:15:41 -0800374 s->has_base64_and_huffman_encoded = 0;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800375 s->hash = hash;
376 s->context = ctx;
377 s->bucket_next = ctx->strtab[hash % ctx->strtab_capacity];
378 ctx->strtab[hash % ctx->strtab_capacity] = s;
379
380 ctx->strtab_count++;
381
382 if (ctx->strtab_count > ctx->strtab_capacity * 2) {
383 grow_strtab(ctx);
384 }
385
386 unlock(ctx);
387
388 return (grpc_mdstr *)s;
389}
390
391static void gc_mdtab(grpc_mdctx *ctx) {
392 size_t i;
393 internal_metadata **prev_next;
394 internal_metadata *md, *next;
395
396 for (i = 0; i < ctx->mdtab_capacity; i++) {
397 prev_next = &ctx->mdtab[i];
398 for (md = ctx->mdtab[i]; md; md = next) {
Craig Tiller8344daa2015-10-09 18:10:57 -0700399 void *user_data = (void *)gpr_atm_no_barrier_load(&md->user_data);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800400 next = md->bucket_next;
Craig Tiller9fa41b92015-04-10 15:08:03 -0700401 if (gpr_atm_acq_load(&md->refcnt) == 0) {
Craig Tiller1a65a232015-07-06 10:22:32 -0700402 INTERNAL_STRING_UNREF(md->key);
403 INTERNAL_STRING_UNREF(md->value);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800404 if (md->user_data) {
Craig Tiller8344daa2015-10-09 18:10:57 -0700405 ((destroy_user_data_func)gpr_atm_no_barrier_load(
406 &md->destroy_user_data))(user_data);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800407 }
408 gpr_free(md);
409 *prev_next = next;
410 ctx->mdtab_free--;
411 ctx->mdtab_count--;
412 } else {
413 prev_next = &md->bucket_next;
414 }
415 }
416 }
417
418 GPR_ASSERT(ctx->mdtab_free == 0);
419}
420
421static void grow_mdtab(grpc_mdctx *ctx) {
422 size_t capacity = ctx->mdtab_capacity * 2;
423 size_t i;
424 internal_metadata **mdtab =
425 gpr_malloc(sizeof(internal_metadata *) * capacity);
426 internal_metadata *md, *next;
427 gpr_uint32 hash;
428 memset(mdtab, 0, sizeof(internal_metadata *) * capacity);
429
430 for (i = 0; i < ctx->mdtab_capacity; i++) {
431 for (md = ctx->mdtab[i]; md; md = next) {
ctillerfb93d192014-12-15 10:40:05 -0800432 hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800433 next = md->bucket_next;
434 md->bucket_next = mdtab[hash % capacity];
435 mdtab[hash % capacity] = md;
436 }
437 }
438
439 gpr_free(ctx->mdtab);
440 ctx->mdtab = mdtab;
441 ctx->mdtab_capacity = capacity;
442}
443
444static void rehash_mdtab(grpc_mdctx *ctx) {
445 if (ctx->mdtab_free > ctx->mdtab_capacity / 4) {
446 gc_mdtab(ctx);
447 } else {
448 grow_mdtab(ctx);
449 }
450}
451
452grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
453 grpc_mdstr *mkey,
454 grpc_mdstr *mvalue) {
455 internal_string *key = (internal_string *)mkey;
456 internal_string *value = (internal_string *)mvalue;
ctillerfb93d192014-12-15 10:40:05 -0800457 gpr_uint32 hash = GRPC_MDSTR_KV_HASH(mkey->hash, mvalue->hash);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800458 internal_metadata *md;
459
460 GPR_ASSERT(key->context == ctx);
461 GPR_ASSERT(value->context == ctx);
462
463 lock(ctx);
464
465 /* search for an existing pair */
466 for (md = ctx->mdtab[hash % ctx->mdtab_capacity]; md; md = md->bucket_next) {
467 if (md->key == key && md->value == value) {
Craig Tiller1a65a232015-07-06 10:22:32 -0700468 REF_MD_LOCKED(md);
469 INTERNAL_STRING_UNREF(key);
470 INTERNAL_STRING_UNREF(value);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800471 unlock(ctx);
472 return (grpc_mdelem *)md;
473 }
474 }
475
476 /* not found: create a new pair */
477 md = gpr_malloc(sizeof(internal_metadata));
Craig Tiller63bda562015-10-09 17:40:19 -0700478 gpr_atm_rel_store(&md->refcnt, 2);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800479 md->context = ctx;
480 md->key = key;
481 md->value = value;
Craig Tiller8344daa2015-10-09 18:10:57 -0700482 md->user_data = 0;
483 md->destroy_user_data = 0;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800484 md->bucket_next = ctx->mdtab[hash % ctx->mdtab_capacity];
Craig Tiller83901532015-07-10 14:02:45 -0700485 gpr_mu_init(&md->mu_user_data);
Craig Tiller1a65a232015-07-06 10:22:32 -0700486#ifdef GRPC_METADATA_REFCOUNT_DEBUG
487 gpr_log(GPR_DEBUG, "ELM NEW:%p:%d: '%s' = '%s'", md,
488 gpr_atm_no_barrier_load(&md->refcnt),
489 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
490 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
491#endif
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800492 ctx->mdtab[hash % ctx->mdtab_capacity] = md;
493 ctx->mdtab_count++;
494
495 if (ctx->mdtab_count > ctx->mdtab_capacity * 2) {
496 rehash_mdtab(ctx);
497 }
498
499 unlock(ctx);
500
501 return (grpc_mdelem *)md;
502}
503
504grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
505 const char *value) {
Craig Tiller4dbdd6a2015-09-25 15:12:16 -0700506 return grpc_mdelem_from_metadata_strings(ctx,
507 grpc_mdstr_from_string(ctx, key),
508 grpc_mdstr_from_string(ctx, value));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800509}
510
511grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
512 gpr_slice value) {
513 return grpc_mdelem_from_metadata_strings(ctx, grpc_mdstr_from_slice(ctx, key),
514 grpc_mdstr_from_slice(ctx, value));
515}
516
517grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,
518 const char *key,
519 const gpr_uint8 *value,
Craig Tiller4dbdd6a2015-09-25 15:12:16 -0700520 size_t value_length) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800521 return grpc_mdelem_from_metadata_strings(
Craig Tiller4dbdd6a2015-09-25 15:12:16 -0700522 ctx, grpc_mdstr_from_string(ctx, key),
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800523 grpc_mdstr_from_buffer(ctx, value, value_length));
524}
525
Craig Tiller1a65a232015-07-06 10:22:32 -0700526grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800527 internal_metadata *md = (internal_metadata *)gmd;
Craig Tiller1a65a232015-07-06 10:22:32 -0700528#ifdef GRPC_METADATA_REFCOUNT_DEBUG
529 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
530 "ELM REF:%p:%d->%d: '%s' = '%s'", md,
531 gpr_atm_no_barrier_load(&md->refcnt),
532 gpr_atm_no_barrier_load(&md->refcnt) + 1,
533 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
534 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
535#endif
Craig Tiller9fa41b92015-04-10 15:08:03 -0700536 /* we can assume the ref count is >= 1 as the application is calling
537 this function - meaning that no adjustment to mdtab_free is necessary,
538 simplifying the logic here to be just an atomic increment */
539 /* use C assert to have this removed in opt builds */
Craig Tiller1ae46a22015-04-13 10:48:55 -0700540 assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
Craig Tiller9fa41b92015-04-10 15:08:03 -0700541 gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800542 return gmd;
543}
544
Craig Tiller1a65a232015-07-06 10:22:32 -0700545void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800546 internal_metadata *md = (internal_metadata *)gmd;
Craig Tiller1a65a232015-07-06 10:22:32 -0700547#ifdef GRPC_METADATA_REFCOUNT_DEBUG
548 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
549 "ELM UNREF:%p:%d->%d: '%s' = '%s'", md,
550 gpr_atm_no_barrier_load(&md->refcnt),
551 gpr_atm_no_barrier_load(&md->refcnt) - 1,
552 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
553 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
554#endif
Craig Tiller63bda562015-10-09 17:40:19 -0700555 if (2 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
556 grpc_mdctx *ctx = md->context;
557 lock(ctx);
558 GPR_ASSERT(1 == gpr_atm_full_fetch_add(&md->refcnt, -1));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800559 ctx->mdtab_free++;
Craig Tiller63bda562015-10-09 17:40:19 -0700560 unlock(ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800561 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800562}
563
564const char *grpc_mdstr_as_c_string(grpc_mdstr *s) {
565 return (const char *)GPR_SLICE_START_PTR(s->slice);
566}
567
Craig Tiller1a65a232015-07-06 10:22:32 -0700568grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800569 internal_string *s = (internal_string *)gs;
570 grpc_mdctx *ctx = s->context;
571 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700572 internal_string_ref(s FWD_DEBUG_ARGS);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800573 unlock(ctx);
574 return gs;
575}
576
Craig Tiller1a65a232015-07-06 10:22:32 -0700577void grpc_mdstr_unref(grpc_mdstr *gs DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800578 internal_string *s = (internal_string *)gs;
579 grpc_mdctx *ctx = s->context;
580 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700581 internal_string_unref(s FWD_DEBUG_ARGS);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800582 unlock(ctx);
583}
584
585size_t grpc_mdctx_get_mdtab_capacity_test_only(grpc_mdctx *ctx) {
586 return ctx->mdtab_capacity;
587}
588
589size_t grpc_mdctx_get_mdtab_count_test_only(grpc_mdctx *ctx) {
590 return ctx->mdtab_count;
591}
592
593size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *ctx) {
594 return ctx->mdtab_free;
595}
596
Craig Tiller8344daa2015-10-09 18:10:57 -0700597void *grpc_mdelem_get_user_data(grpc_mdelem *md, void (*destroy_func)(void *)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800598 internal_metadata *im = (internal_metadata *)md;
Craig Tiller83901532015-07-10 14:02:45 -0700599 void *result;
Craig Tiller8344daa2015-10-09 18:10:57 -0700600 if (gpr_atm_acq_load(&im->destroy_user_data) == (gpr_atm)destroy_func) {
601 return (void *)gpr_atm_no_barrier_load(&im->user_data);
602 } else {
603 return NULL;
604 }
Craig Tiller83901532015-07-10 14:02:45 -0700605 return result;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800606}
607
608void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
609 void *user_data) {
610 internal_metadata *im = (internal_metadata *)md;
611 GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
Craig Tiller83901532015-07-10 14:02:45 -0700612 gpr_mu_lock(&im->mu_user_data);
Craig Tiller8344daa2015-10-09 18:10:57 -0700613 if (gpr_atm_no_barrier_load(&im->destroy_user_data)) {
Craig Tiller83901532015-07-10 14:02:45 -0700614 /* user data can only be set once */
615 gpr_mu_unlock(&im->mu_user_data);
616 if (destroy_func != NULL) {
617 destroy_func(user_data);
618 }
619 return;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800620 }
Craig Tiller8344daa2015-10-09 18:10:57 -0700621 gpr_atm_no_barrier_store(&im->user_data, (gpr_atm)user_data);
622 gpr_atm_rel_store(&im->destroy_user_data, (gpr_atm)destroy_func);
Craig Tiller83901532015-07-10 14:02:45 -0700623 gpr_mu_unlock(&im->mu_user_data);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800624}
ctiller430c4992014-12-11 09:15:41 -0800625
626gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {
627 internal_string *s = (internal_string *)gs;
628 gpr_slice slice;
629 grpc_mdctx *ctx = s->context;
630 lock(ctx);
631 if (!s->has_base64_and_huffman_encoded) {
632 s->base64_and_huffman =
633 grpc_chttp2_base64_encode_and_huffman_compress(s->slice);
ctiller33023c42014-12-12 16:28:33 -0800634 s->has_base64_and_huffman_encoded = 1;
ctiller430c4992014-12-11 09:15:41 -0800635 }
636 slice = s->base64_and_huffman;
637 unlock(ctx);
638 return slice;
Craig Tiller190d3602015-02-18 09:23:38 -0800639}
Craig Tillerfe0104a2015-04-14 09:19:12 -0700640
Craig Tiller49772e02015-08-21 08:08:37 -0700641static int conforms_to(grpc_mdstr *s, const gpr_uint8 *legal_bits) {
Craig Tillerb96d0012015-05-06 15:33:23 -0700642 const gpr_uint8 *p = GPR_SLICE_START_PTR(s->slice);
643 const gpr_uint8 *e = GPR_SLICE_END_PTR(s->slice);
644 for (; p != e; p++) {
Craig Tiller49772e02015-08-21 08:08:37 -0700645 int idx = *p;
646 int byte = idx / 8;
647 int bit = idx % 8;
648 if ((legal_bits[byte] & (1 << bit)) == 0) return 0;
Craig Tillerb96d0012015-05-06 15:33:23 -0700649 }
650 return 1;
651}
652
Craig Tiller49772e02015-08-21 08:08:37 -0700653int grpc_mdstr_is_legal_header(grpc_mdstr *s) {
654 static const gpr_uint8 legal_header_bits[256 / 8] = {
Craig Tiller80aa2802015-08-21 08:50:51 -0700655 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xff, 0x03, 0x00, 0x00, 0x00,
656 0x80, 0xfe, 0xff, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
Craig Tiller49772e02015-08-21 08:08:37 -0700657 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
Craig Tiller49772e02015-08-21 08:08:37 -0700658 return conforms_to(s, legal_header_bits);
659}
660
661int grpc_mdstr_is_legal_nonbin_header(grpc_mdstr *s) {
662 static const gpr_uint8 legal_header_bits[256 / 8] = {
Craig Tiller240b7db2015-08-27 15:35:32 -0700663 0x00, 0x00, 0x00, 0x00, 0xff, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff,
Craig Tiller49772e02015-08-21 08:08:37 -0700664 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
666 return conforms_to(s, legal_header_bits);
667}
668
Craig Tillerb96d0012015-05-06 15:33:23 -0700669int grpc_mdstr_is_bin_suffixed(grpc_mdstr *s) {
670 /* TODO(ctiller): consider caching this */
671 return grpc_is_binary_header((const char *)GPR_SLICE_START_PTR(s->slice),
672 GPR_SLICE_LENGTH(s->slice));
673}