blob: 9cbb0952d0386469391f784d5f59ea97ddd061d5 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Nicolas "Pixel" Nobled5a99852015-01-24 01:27:48 -080034#include "src/core/iomgr/sockaddr.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080035#include "src/core/transport/metadata.h"
36
Craig Tiller9fa41b92015-04-10 15:08:03 -070037#include <assert.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080038#include <stddef.h>
39#include <string.h>
40
41#include <grpc/support/alloc.h>
Craig Tiller9fa41b92015-04-10 15:08:03 -070042#include <grpc/support/atm.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080043#include <grpc/support/log.h>
44#include "src/core/support/murmur_hash.h"
ctiller430c4992014-12-11 09:15:41 -080045#include "src/core/transport/chttp2/bin_encoder.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080046#include <grpc/support/time.h>
47
48#define INITIAL_STRTAB_CAPACITY 4
49#define INITIAL_MDTAB_CAPACITY 4
50
Craig Tiller1a65a232015-07-06 10:22:32 -070051#ifdef GRPC_METADATA_REFCOUNT_DEBUG
52#define DEBUG_ARGS , const char *file, int line
53#define FWD_DEBUG_ARGS , file, line
54#define INTERNAL_STRING_REF(s) internal_string_ref((s), __FILE__, __LINE__)
55#define INTERNAL_STRING_UNREF(s) internal_string_unref((s), __FILE__, __LINE__)
56#define REF_MD_LOCKED(s) ref_md_locked((s), __FILE__, __LINE__)
57#else
58#define DEBUG_ARGS
59#define FWD_DEBUG_ARGS
60#define INTERNAL_STRING_REF(s) internal_string_ref((s))
61#define INTERNAL_STRING_UNREF(s) internal_string_unref((s))
62#define REF_MD_LOCKED(s) ref_md_locked((s))
63#endif
64
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080065typedef struct internal_string {
66 /* must be byte compatible with grpc_mdstr */
67 gpr_slice slice;
68 gpr_uint32 hash;
69
70 /* private only data */
71 gpr_uint32 refs;
ctiller430c4992014-12-11 09:15:41 -080072 gpr_uint8 has_base64_and_huffman_encoded;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080073 gpr_slice_refcount refcount;
74
ctiller430c4992014-12-11 09:15:41 -080075 gpr_slice base64_and_huffman;
76
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080077 grpc_mdctx *context;
78
79 struct internal_string *bucket_next;
80} internal_string;
81
82typedef struct internal_metadata {
83 /* must be byte compatible with grpc_mdelem */
84 internal_string *key;
85 internal_string *value;
86
Craig Tiller9fa41b92015-04-10 15:08:03 -070087 gpr_atm refcnt;
88
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080089 /* private only data */
90 void *user_data;
91 void (*destroy_user_data)(void *user_data);
92
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080093 grpc_mdctx *context;
94 struct internal_metadata *bucket_next;
95} internal_metadata;
96
97struct grpc_mdctx {
98 gpr_uint32 hash_seed;
Craig Tiller9be83ee2015-02-18 14:16:15 -080099 int refs;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800100
101 gpr_mu mu;
102
103 internal_string **strtab;
104 size_t strtab_count;
105 size_t strtab_capacity;
106
107 internal_metadata **mdtab;
108 size_t mdtab_count;
109 size_t mdtab_free;
110 size_t mdtab_capacity;
111};
112
Craig Tiller1a65a232015-07-06 10:22:32 -0700113static void internal_string_ref(internal_string *s DEBUG_ARGS);
114static void internal_string_unref(internal_string *s DEBUG_ARGS);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800115static void discard_metadata(grpc_mdctx *ctx);
116static void gc_mdtab(grpc_mdctx *ctx);
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700117static void metadata_context_destroy_locked(grpc_mdctx *ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800118
119static void lock(grpc_mdctx *ctx) { gpr_mu_lock(&ctx->mu); }
120
121static void unlock(grpc_mdctx *ctx) {
122 /* If the context has been orphaned we'd like to delete it soon. We check
123 conditions in unlock as it signals the end of mutations on a context.
124
125 We need to ensure all grpc_mdelem and grpc_mdstr elements have been deleted
126 first. This is equivalent to saying that both tables have zero counts,
127 which is equivalent to saying that strtab_count is zero (as mdelem's MUST
128 reference an mdstr for their key and value slots).
129
130 To encourage that to happen, we start discarding zero reference count
131 mdelems on every unlock (instead of the usual 'I'm too loaded' trigger
132 case), since otherwise we can be stuck waiting for a garbage collection
133 that will never happen. */
Craig Tiller9be83ee2015-02-18 14:16:15 -0800134 if (ctx->refs == 0) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800135 /* uncomment if you're having trouble diagnosing an mdelem leak to make
136 things clearer (slows down destruction a lot, however) */
Craig Tiller0d4836d2015-06-30 15:15:43 -0700137 gc_mdtab(ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800138 if (ctx->mdtab_count && ctx->mdtab_count == ctx->mdtab_free) {
139 discard_metadata(ctx);
140 }
141 if (ctx->strtab_count == 0) {
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700142 metadata_context_destroy_locked(ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800143 return;
144 }
145 }
146 gpr_mu_unlock(&ctx->mu);
147}
148
Craig Tiller1a65a232015-07-06 10:22:32 -0700149static void ref_md_locked(internal_metadata *md DEBUG_ARGS) {
150#ifdef GRPC_METADATA_REFCOUNT_DEBUG
151 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
152 "ELM REF:%p:%d->%d: '%s' = '%s'", md,
153 gpr_atm_no_barrier_load(&md->refcnt),
154 gpr_atm_no_barrier_load(&md->refcnt) + 1,
155 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
156 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
157#endif
Craig Tiller9fa41b92015-04-10 15:08:03 -0700158 if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800159 md->context->mdtab_free--;
160 }
161}
162
163grpc_mdctx *grpc_mdctx_create_with_seed(gpr_uint32 seed) {
164 grpc_mdctx *ctx = gpr_malloc(sizeof(grpc_mdctx));
165
Craig Tiller9be83ee2015-02-18 14:16:15 -0800166 ctx->refs = 1;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800167 ctx->hash_seed = seed;
168 gpr_mu_init(&ctx->mu);
169 ctx->strtab = gpr_malloc(sizeof(internal_string *) * INITIAL_STRTAB_CAPACITY);
170 memset(ctx->strtab, 0, sizeof(grpc_mdstr *) * INITIAL_STRTAB_CAPACITY);
171 ctx->strtab_count = 0;
172 ctx->strtab_capacity = INITIAL_STRTAB_CAPACITY;
173 ctx->mdtab = gpr_malloc(sizeof(internal_metadata *) * INITIAL_MDTAB_CAPACITY);
174 memset(ctx->mdtab, 0, sizeof(grpc_mdelem *) * INITIAL_MDTAB_CAPACITY);
175 ctx->mdtab_count = 0;
176 ctx->mdtab_capacity = INITIAL_MDTAB_CAPACITY;
177 ctx->mdtab_free = 0;
178
179 return ctx;
180}
181
Craig Tiller32946d32015-01-15 11:37:30 -0800182grpc_mdctx *grpc_mdctx_create(void) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800183 /* This seed is used to prevent remote connections from controlling hash table
184 * collisions. It needs to be somewhat unpredictable to a remote connection.
185 */
186 return grpc_mdctx_create_with_seed(gpr_now().tv_nsec);
187}
188
189static void discard_metadata(grpc_mdctx *ctx) {
190 size_t i;
191 internal_metadata *next, *cur;
192
193 for (i = 0; i < ctx->mdtab_capacity; i++) {
194 cur = ctx->mdtab[i];
195 while (cur) {
Craig Tiller9fa41b92015-04-10 15:08:03 -0700196 GPR_ASSERT(gpr_atm_acq_load(&cur->refcnt) == 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800197 next = cur->bucket_next;
Craig Tiller1a65a232015-07-06 10:22:32 -0700198 INTERNAL_STRING_UNREF(cur->key);
199 INTERNAL_STRING_UNREF(cur->value);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800200 if (cur->user_data) {
201 cur->destroy_user_data(cur->user_data);
202 }
203 gpr_free(cur);
204 cur = next;
205 ctx->mdtab_free--;
206 ctx->mdtab_count--;
207 }
208 ctx->mdtab[i] = NULL;
209 }
210}
211
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700212static void metadata_context_destroy_locked(grpc_mdctx *ctx) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800213 GPR_ASSERT(ctx->strtab_count == 0);
214 GPR_ASSERT(ctx->mdtab_count == 0);
215 GPR_ASSERT(ctx->mdtab_free == 0);
216 gpr_free(ctx->strtab);
217 gpr_free(ctx->mdtab);
218 gpr_mu_unlock(&ctx->mu);
219 gpr_mu_destroy(&ctx->mu);
220 gpr_free(ctx);
221}
222
Craig Tiller9be83ee2015-02-18 14:16:15 -0800223void grpc_mdctx_ref(grpc_mdctx *ctx) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800224 lock(ctx);
Craig Tiller9be83ee2015-02-18 14:16:15 -0800225 GPR_ASSERT(ctx->refs > 0);
226 ctx->refs++;
227 unlock(ctx);
228}
229
230void grpc_mdctx_unref(grpc_mdctx *ctx) {
231 lock(ctx);
232 GPR_ASSERT(ctx->refs > 0);
233 ctx->refs--;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800234 unlock(ctx);
235}
236
237static void grow_strtab(grpc_mdctx *ctx) {
238 size_t capacity = ctx->strtab_capacity * 2;
239 size_t i;
240 internal_string **strtab = gpr_malloc(sizeof(internal_string *) * capacity);
241 internal_string *s, *next;
242 memset(strtab, 0, sizeof(internal_string *) * capacity);
243
244 for (i = 0; i < ctx->strtab_capacity; i++) {
245 for (s = ctx->strtab[i]; s; s = next) {
246 next = s->bucket_next;
247 s->bucket_next = strtab[s->hash % capacity];
248 strtab[s->hash % capacity] = s;
249 }
250 }
251
252 gpr_free(ctx->strtab);
253 ctx->strtab = strtab;
254 ctx->strtab_capacity = capacity;
255}
256
257static void internal_destroy_string(internal_string *is) {
258 internal_string **prev_next;
259 internal_string *cur;
260 grpc_mdctx *ctx = is->context;
ctiller430c4992014-12-11 09:15:41 -0800261 if (is->has_base64_and_huffman_encoded) {
262 gpr_slice_unref(is->base64_and_huffman);
263 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800264 for (prev_next = &ctx->strtab[is->hash % ctx->strtab_capacity],
265 cur = *prev_next;
266 cur != is; prev_next = &cur->bucket_next, cur = cur->bucket_next)
267 ;
268 *prev_next = cur->bucket_next;
269 ctx->strtab_count--;
270 gpr_free(is);
271}
272
Craig Tiller1a65a232015-07-06 10:22:32 -0700273static void internal_string_ref(internal_string *s DEBUG_ARGS) {
274#ifdef GRPC_METADATA_REFCOUNT_DEBUG
275 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR REF:%p:%d->%d: '%s'", s,
276 s->refs, s->refs + 1, grpc_mdstr_as_c_string((grpc_mdstr *)s));
277#endif
278 ++s->refs;
279}
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800280
Craig Tiller1a65a232015-07-06 10:22:32 -0700281static void internal_string_unref(internal_string *s DEBUG_ARGS) {
282#ifdef GRPC_METADATA_REFCOUNT_DEBUG
283 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR UNREF:%p:%d->%d: '%s'", s,
284 s->refs, s->refs - 1, grpc_mdstr_as_c_string((grpc_mdstr *)s));
285#endif
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800286 GPR_ASSERT(s->refs > 0);
287 if (0 == --s->refs) {
288 internal_destroy_string(s);
289 }
290}
291
292static void slice_ref(void *p) {
293 internal_string *is =
294 (internal_string *)((char *)p - offsetof(internal_string, refcount));
295 grpc_mdctx *ctx = is->context;
296 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700297 INTERNAL_STRING_REF(is);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800298 unlock(ctx);
299}
300
301static void slice_unref(void *p) {
302 internal_string *is =
303 (internal_string *)((char *)p - offsetof(internal_string, refcount));
304 grpc_mdctx *ctx = is->context;
305 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700306 INTERNAL_STRING_UNREF(is);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800307 unlock(ctx);
308}
309
310grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str) {
311 return grpc_mdstr_from_buffer(ctx, (const gpr_uint8 *)str, strlen(str));
312}
313
314grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice) {
315 grpc_mdstr *result = grpc_mdstr_from_buffer(ctx, GPR_SLICE_START_PTR(slice),
316 GPR_SLICE_LENGTH(slice));
317 gpr_slice_unref(slice);
318 return result;
319}
320
321grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
322 size_t length) {
323 gpr_uint32 hash = gpr_murmur_hash3(buf, length, ctx->hash_seed);
324 internal_string *s;
325
326 lock(ctx);
327
328 /* search for an existing string */
329 for (s = ctx->strtab[hash % ctx->strtab_capacity]; s; s = s->bucket_next) {
330 if (s->hash == hash && GPR_SLICE_LENGTH(s->slice) == length &&
331 0 == memcmp(buf, GPR_SLICE_START_PTR(s->slice), length)) {
Craig Tiller1a65a232015-07-06 10:22:32 -0700332 INTERNAL_STRING_REF(s);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800333 unlock(ctx);
334 return (grpc_mdstr *)s;
335 }
336 }
337
338 /* not found: create a new string */
339 if (length + 1 < GPR_SLICE_INLINED_SIZE) {
340 /* string data goes directly into the slice */
341 s = gpr_malloc(sizeof(internal_string));
342 s->refs = 1;
343 s->slice.refcount = NULL;
344 memcpy(s->slice.data.inlined.bytes, buf, length);
345 s->slice.data.inlined.bytes[length] = 0;
346 s->slice.data.inlined.length = length;
347 } else {
348 /* string data goes after the internal_string header, and we +1 for null
349 terminator */
350 s = gpr_malloc(sizeof(internal_string) + length + 1);
351 s->refs = 1;
352 s->refcount.ref = slice_ref;
353 s->refcount.unref = slice_unref;
354 s->slice.refcount = &s->refcount;
355 s->slice.data.refcounted.bytes = (gpr_uint8 *)(s + 1);
356 s->slice.data.refcounted.length = length;
357 memcpy(s->slice.data.refcounted.bytes, buf, length);
358 /* add a null terminator for cheap c string conversion when desired */
359 s->slice.data.refcounted.bytes[length] = 0;
360 }
ctiller430c4992014-12-11 09:15:41 -0800361 s->has_base64_and_huffman_encoded = 0;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800362 s->hash = hash;
363 s->context = ctx;
364 s->bucket_next = ctx->strtab[hash % ctx->strtab_capacity];
365 ctx->strtab[hash % ctx->strtab_capacity] = s;
366
367 ctx->strtab_count++;
368
369 if (ctx->strtab_count > ctx->strtab_capacity * 2) {
370 grow_strtab(ctx);
371 }
372
373 unlock(ctx);
374
375 return (grpc_mdstr *)s;
376}
377
378static void gc_mdtab(grpc_mdctx *ctx) {
379 size_t i;
380 internal_metadata **prev_next;
381 internal_metadata *md, *next;
382
383 for (i = 0; i < ctx->mdtab_capacity; i++) {
384 prev_next = &ctx->mdtab[i];
385 for (md = ctx->mdtab[i]; md; md = next) {
386 next = md->bucket_next;
Craig Tiller9fa41b92015-04-10 15:08:03 -0700387 if (gpr_atm_acq_load(&md->refcnt) == 0) {
Craig Tiller1a65a232015-07-06 10:22:32 -0700388 INTERNAL_STRING_UNREF(md->key);
389 INTERNAL_STRING_UNREF(md->value);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800390 if (md->user_data) {
391 md->destroy_user_data(md->user_data);
392 }
393 gpr_free(md);
394 *prev_next = next;
395 ctx->mdtab_free--;
396 ctx->mdtab_count--;
397 } else {
398 prev_next = &md->bucket_next;
399 }
400 }
401 }
402
403 GPR_ASSERT(ctx->mdtab_free == 0);
404}
405
406static void grow_mdtab(grpc_mdctx *ctx) {
407 size_t capacity = ctx->mdtab_capacity * 2;
408 size_t i;
409 internal_metadata **mdtab =
410 gpr_malloc(sizeof(internal_metadata *) * capacity);
411 internal_metadata *md, *next;
412 gpr_uint32 hash;
413 memset(mdtab, 0, sizeof(internal_metadata *) * capacity);
414
415 for (i = 0; i < ctx->mdtab_capacity; i++) {
416 for (md = ctx->mdtab[i]; md; md = next) {
ctillerfb93d192014-12-15 10:40:05 -0800417 hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800418 next = md->bucket_next;
419 md->bucket_next = mdtab[hash % capacity];
420 mdtab[hash % capacity] = md;
421 }
422 }
423
424 gpr_free(ctx->mdtab);
425 ctx->mdtab = mdtab;
426 ctx->mdtab_capacity = capacity;
427}
428
429static void rehash_mdtab(grpc_mdctx *ctx) {
430 if (ctx->mdtab_free > ctx->mdtab_capacity / 4) {
431 gc_mdtab(ctx);
432 } else {
433 grow_mdtab(ctx);
434 }
435}
436
437grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
438 grpc_mdstr *mkey,
439 grpc_mdstr *mvalue) {
440 internal_string *key = (internal_string *)mkey;
441 internal_string *value = (internal_string *)mvalue;
ctillerfb93d192014-12-15 10:40:05 -0800442 gpr_uint32 hash = GRPC_MDSTR_KV_HASH(mkey->hash, mvalue->hash);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800443 internal_metadata *md;
444
445 GPR_ASSERT(key->context == ctx);
446 GPR_ASSERT(value->context == ctx);
447
448 lock(ctx);
449
450 /* search for an existing pair */
451 for (md = ctx->mdtab[hash % ctx->mdtab_capacity]; md; md = md->bucket_next) {
452 if (md->key == key && md->value == value) {
Craig Tiller1a65a232015-07-06 10:22:32 -0700453 REF_MD_LOCKED(md);
454 INTERNAL_STRING_UNREF(key);
455 INTERNAL_STRING_UNREF(value);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800456 unlock(ctx);
457 return (grpc_mdelem *)md;
458 }
459 }
460
461 /* not found: create a new pair */
462 md = gpr_malloc(sizeof(internal_metadata));
Craig Tiller9fa41b92015-04-10 15:08:03 -0700463 gpr_atm_rel_store(&md->refcnt, 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800464 md->context = ctx;
465 md->key = key;
466 md->value = value;
467 md->user_data = NULL;
468 md->destroy_user_data = NULL;
469 md->bucket_next = ctx->mdtab[hash % ctx->mdtab_capacity];
Craig Tiller1a65a232015-07-06 10:22:32 -0700470#ifdef GRPC_METADATA_REFCOUNT_DEBUG
471 gpr_log(GPR_DEBUG, "ELM NEW:%p:%d: '%s' = '%s'", md,
472 gpr_atm_no_barrier_load(&md->refcnt),
473 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
474 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
475#endif
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800476 ctx->mdtab[hash % ctx->mdtab_capacity] = md;
477 ctx->mdtab_count++;
478
479 if (ctx->mdtab_count > ctx->mdtab_capacity * 2) {
480 rehash_mdtab(ctx);
481 }
482
483 unlock(ctx);
484
485 return (grpc_mdelem *)md;
486}
487
488grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
489 const char *value) {
490 return grpc_mdelem_from_metadata_strings(ctx,
491 grpc_mdstr_from_string(ctx, key),
492 grpc_mdstr_from_string(ctx, value));
493}
494
495grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
496 gpr_slice value) {
497 return grpc_mdelem_from_metadata_strings(ctx, grpc_mdstr_from_slice(ctx, key),
498 grpc_mdstr_from_slice(ctx, value));
499}
500
501grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,
502 const char *key,
503 const gpr_uint8 *value,
504 size_t value_length) {
505 return grpc_mdelem_from_metadata_strings(
506 ctx, grpc_mdstr_from_string(ctx, key),
507 grpc_mdstr_from_buffer(ctx, value, value_length));
508}
509
Craig Tiller1a65a232015-07-06 10:22:32 -0700510grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800511 internal_metadata *md = (internal_metadata *)gmd;
Craig Tiller1a65a232015-07-06 10:22:32 -0700512#ifdef GRPC_METADATA_REFCOUNT_DEBUG
513 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
514 "ELM REF:%p:%d->%d: '%s' = '%s'", md,
515 gpr_atm_no_barrier_load(&md->refcnt),
516 gpr_atm_no_barrier_load(&md->refcnt) + 1,
517 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
518 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
519#endif
Craig Tiller9fa41b92015-04-10 15:08:03 -0700520 /* we can assume the ref count is >= 1 as the application is calling
521 this function - meaning that no adjustment to mdtab_free is necessary,
522 simplifying the logic here to be just an atomic increment */
523 /* use C assert to have this removed in opt builds */
Craig Tiller1ae46a22015-04-13 10:48:55 -0700524 assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
Craig Tiller9fa41b92015-04-10 15:08:03 -0700525 gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800526 return gmd;
527}
528
Craig Tiller1a65a232015-07-06 10:22:32 -0700529void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800530 internal_metadata *md = (internal_metadata *)gmd;
531 grpc_mdctx *ctx = md->context;
532 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700533#ifdef GRPC_METADATA_REFCOUNT_DEBUG
534 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
535 "ELM UNREF:%p:%d->%d: '%s' = '%s'", md,
536 gpr_atm_no_barrier_load(&md->refcnt),
537 gpr_atm_no_barrier_load(&md->refcnt) - 1,
538 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
539 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
540#endif
Craig Tillerba63e8a2015-04-13 15:02:29 -0700541 assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
Craig Tiller9fa41b92015-04-10 15:08:03 -0700542 if (1 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800543 ctx->mdtab_free++;
544 }
545 unlock(ctx);
546}
547
548const char *grpc_mdstr_as_c_string(grpc_mdstr *s) {
549 return (const char *)GPR_SLICE_START_PTR(s->slice);
550}
551
Craig Tiller1a65a232015-07-06 10:22:32 -0700552grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800553 internal_string *s = (internal_string *)gs;
554 grpc_mdctx *ctx = s->context;
555 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700556 internal_string_ref(s FWD_DEBUG_ARGS);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800557 unlock(ctx);
558 return gs;
559}
560
Craig Tiller1a65a232015-07-06 10:22:32 -0700561void grpc_mdstr_unref(grpc_mdstr *gs DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800562 internal_string *s = (internal_string *)gs;
563 grpc_mdctx *ctx = s->context;
564 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700565 internal_string_unref(s FWD_DEBUG_ARGS);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800566 unlock(ctx);
567}
568
569size_t grpc_mdctx_get_mdtab_capacity_test_only(grpc_mdctx *ctx) {
570 return ctx->mdtab_capacity;
571}
572
573size_t grpc_mdctx_get_mdtab_count_test_only(grpc_mdctx *ctx) {
574 return ctx->mdtab_count;
575}
576
577size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *ctx) {
578 return ctx->mdtab_free;
579}
580
581void *grpc_mdelem_get_user_data(grpc_mdelem *md,
582 void (*if_destroy_func)(void *)) {
583 internal_metadata *im = (internal_metadata *)md;
584 return im->destroy_user_data == if_destroy_func ? im->user_data : NULL;
585}
586
587void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
588 void *user_data) {
589 internal_metadata *im = (internal_metadata *)md;
590 GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
591 if (im->destroy_user_data) {
592 im->destroy_user_data(im->user_data);
593 }
594 im->destroy_user_data = destroy_func;
595 im->user_data = user_data;
596}
ctiller430c4992014-12-11 09:15:41 -0800597
598gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {
599 internal_string *s = (internal_string *)gs;
600 gpr_slice slice;
601 grpc_mdctx *ctx = s->context;
602 lock(ctx);
603 if (!s->has_base64_and_huffman_encoded) {
604 s->base64_and_huffman =
605 grpc_chttp2_base64_encode_and_huffman_compress(s->slice);
ctiller33023c42014-12-12 16:28:33 -0800606 s->has_base64_and_huffman_encoded = 1;
ctiller430c4992014-12-11 09:15:41 -0800607 }
608 slice = s->base64_and_huffman;
609 unlock(ctx);
610 return slice;
Craig Tiller190d3602015-02-18 09:23:38 -0800611}
Craig Tillerfe0104a2015-04-14 09:19:12 -0700612
613void grpc_mdctx_lock(grpc_mdctx *ctx) { lock(ctx); }
614
Craig Tiller1a65a232015-07-06 10:22:32 -0700615void grpc_mdctx_locked_mdelem_unref(grpc_mdctx *ctx,
616 grpc_mdelem *gmd DEBUG_ARGS) {
Craig Tillerfe0104a2015-04-14 09:19:12 -0700617 internal_metadata *md = (internal_metadata *)gmd;
618 grpc_mdctx *elem_ctx = md->context;
Craig Tillerfe0104a2015-04-14 09:19:12 -0700619 GPR_ASSERT(ctx == elem_ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700620#ifdef GRPC_METADATA_REFCOUNT_DEBUG
621 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
622 "ELM UNREF:%p:%d->%d: '%s' = '%s'", md,
623 gpr_atm_no_barrier_load(&md->refcnt),
624 gpr_atm_no_barrier_load(&md->refcnt) - 1,
625 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
626 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
627#endif
Craig Tiller634346b2015-04-14 11:22:35 -0700628 assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
629 if (1 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
Craig Tillerfe0104a2015-04-14 09:19:12 -0700630 ctx->mdtab_free++;
631 }
632}
633
634void grpc_mdctx_unlock(grpc_mdctx *ctx) { unlock(ctx); }
Craig Tillerb96d0012015-05-06 15:33:23 -0700635
636int grpc_mdstr_is_legal_header(grpc_mdstr *s) {
637 /* TODO(ctiller): consider caching this, or computing it on construction */
638 const gpr_uint8 *p = GPR_SLICE_START_PTR(s->slice);
639 const gpr_uint8 *e = GPR_SLICE_END_PTR(s->slice);
640 for (; p != e; p++) {
641 if (*p < 32 || *p > 126) return 0;
642 }
643 return 1;
644}
645
646int grpc_mdstr_is_bin_suffixed(grpc_mdstr *s) {
647 /* TODO(ctiller): consider caching this */
648 return grpc_is_binary_header((const char *)GPR_SLICE_START_PTR(s->slice),
649 GPR_SLICE_LENGTH(s->slice));
650}