blob: 71fcb2fc147efa2ac483aa662127c228815eb72b [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Nicolas "Pixel" Nobled5a99852015-01-24 01:27:48 -080034#include "src/core/iomgr/sockaddr.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080035#include "src/core/transport/metadata.h"
36
Craig Tiller9fa41b92015-04-10 15:08:03 -070037#include <assert.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080038#include <stddef.h>
39#include <string.h>
40
41#include <grpc/support/alloc.h>
Craig Tiller9fa41b92015-04-10 15:08:03 -070042#include <grpc/support/atm.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080043#include <grpc/support/log.h>
44#include "src/core/support/murmur_hash.h"
ctiller430c4992014-12-11 09:15:41 -080045#include "src/core/transport/chttp2/bin_encoder.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080046#include <grpc/support/time.h>
47
48#define INITIAL_STRTAB_CAPACITY 4
49#define INITIAL_MDTAB_CAPACITY 4
50
Craig Tiller1a65a232015-07-06 10:22:32 -070051#ifdef GRPC_METADATA_REFCOUNT_DEBUG
52#define DEBUG_ARGS , const char *file, int line
53#define FWD_DEBUG_ARGS , file, line
54#define INTERNAL_STRING_REF(s) internal_string_ref((s), __FILE__, __LINE__)
55#define INTERNAL_STRING_UNREF(s) internal_string_unref((s), __FILE__, __LINE__)
56#define REF_MD_LOCKED(s) ref_md_locked((s), __FILE__, __LINE__)
57#else
58#define DEBUG_ARGS
59#define FWD_DEBUG_ARGS
60#define INTERNAL_STRING_REF(s) internal_string_ref((s))
61#define INTERNAL_STRING_UNREF(s) internal_string_unref((s))
62#define REF_MD_LOCKED(s) ref_md_locked((s))
63#endif
64
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080065typedef struct internal_string {
66 /* must be byte compatible with grpc_mdstr */
67 gpr_slice slice;
68 gpr_uint32 hash;
69
70 /* private only data */
71 gpr_uint32 refs;
ctiller430c4992014-12-11 09:15:41 -080072 gpr_uint8 has_base64_and_huffman_encoded;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080073 gpr_slice_refcount refcount;
74
ctiller430c4992014-12-11 09:15:41 -080075 gpr_slice base64_and_huffman;
76
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080077 grpc_mdctx *context;
78
79 struct internal_string *bucket_next;
80} internal_string;
81
82typedef struct internal_metadata {
83 /* must be byte compatible with grpc_mdelem */
84 internal_string *key;
85 internal_string *value;
86
Craig Tiller9fa41b92015-04-10 15:08:03 -070087 gpr_atm refcnt;
88
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080089 /* private only data */
Craig Tiller83901532015-07-10 14:02:45 -070090 gpr_mu mu_user_data;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080091 void *user_data;
92 void (*destroy_user_data)(void *user_data);
93
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080094 grpc_mdctx *context;
95 struct internal_metadata *bucket_next;
96} internal_metadata;
97
98struct grpc_mdctx {
99 gpr_uint32 hash_seed;
Craig Tiller9be83ee2015-02-18 14:16:15 -0800100 int refs;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800101
102 gpr_mu mu;
103
104 internal_string **strtab;
105 size_t strtab_count;
106 size_t strtab_capacity;
107
108 internal_metadata **mdtab;
109 size_t mdtab_count;
110 size_t mdtab_free;
111 size_t mdtab_capacity;
112};
113
Craig Tiller1a65a232015-07-06 10:22:32 -0700114static void internal_string_ref(internal_string *s DEBUG_ARGS);
115static void internal_string_unref(internal_string *s DEBUG_ARGS);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800116static void discard_metadata(grpc_mdctx *ctx);
117static void gc_mdtab(grpc_mdctx *ctx);
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700118static void metadata_context_destroy_locked(grpc_mdctx *ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800119
120static void lock(grpc_mdctx *ctx) { gpr_mu_lock(&ctx->mu); }
121
122static void unlock(grpc_mdctx *ctx) {
123 /* If the context has been orphaned we'd like to delete it soon. We check
124 conditions in unlock as it signals the end of mutations on a context.
125
126 We need to ensure all grpc_mdelem and grpc_mdstr elements have been deleted
127 first. This is equivalent to saying that both tables have zero counts,
128 which is equivalent to saying that strtab_count is zero (as mdelem's MUST
129 reference an mdstr for their key and value slots).
130
131 To encourage that to happen, we start discarding zero reference count
132 mdelems on every unlock (instead of the usual 'I'm too loaded' trigger
133 case), since otherwise we can be stuck waiting for a garbage collection
134 that will never happen. */
Craig Tiller9be83ee2015-02-18 14:16:15 -0800135 if (ctx->refs == 0) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800136 /* uncomment if you're having trouble diagnosing an mdelem leak to make
137 things clearer (slows down destruction a lot, however) */
Craig Tiller0d4836d2015-06-30 15:15:43 -0700138 gc_mdtab(ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800139 if (ctx->mdtab_count && ctx->mdtab_count == ctx->mdtab_free) {
140 discard_metadata(ctx);
141 }
142 if (ctx->strtab_count == 0) {
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700143 metadata_context_destroy_locked(ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800144 return;
145 }
146 }
147 gpr_mu_unlock(&ctx->mu);
148}
149
Craig Tiller1a65a232015-07-06 10:22:32 -0700150static void ref_md_locked(internal_metadata *md DEBUG_ARGS) {
151#ifdef GRPC_METADATA_REFCOUNT_DEBUG
152 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
153 "ELM REF:%p:%d->%d: '%s' = '%s'", md,
154 gpr_atm_no_barrier_load(&md->refcnt),
155 gpr_atm_no_barrier_load(&md->refcnt) + 1,
156 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
157 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
158#endif
Craig Tiller9fa41b92015-04-10 15:08:03 -0700159 if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800160 md->context->mdtab_free--;
161 }
162}
163
164grpc_mdctx *grpc_mdctx_create_with_seed(gpr_uint32 seed) {
165 grpc_mdctx *ctx = gpr_malloc(sizeof(grpc_mdctx));
166
Craig Tiller9be83ee2015-02-18 14:16:15 -0800167 ctx->refs = 1;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800168 ctx->hash_seed = seed;
169 gpr_mu_init(&ctx->mu);
170 ctx->strtab = gpr_malloc(sizeof(internal_string *) * INITIAL_STRTAB_CAPACITY);
171 memset(ctx->strtab, 0, sizeof(grpc_mdstr *) * INITIAL_STRTAB_CAPACITY);
172 ctx->strtab_count = 0;
173 ctx->strtab_capacity = INITIAL_STRTAB_CAPACITY;
174 ctx->mdtab = gpr_malloc(sizeof(internal_metadata *) * INITIAL_MDTAB_CAPACITY);
175 memset(ctx->mdtab, 0, sizeof(grpc_mdelem *) * INITIAL_MDTAB_CAPACITY);
176 ctx->mdtab_count = 0;
177 ctx->mdtab_capacity = INITIAL_MDTAB_CAPACITY;
178 ctx->mdtab_free = 0;
179
180 return ctx;
181}
182
Craig Tiller32946d32015-01-15 11:37:30 -0800183grpc_mdctx *grpc_mdctx_create(void) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800184 /* This seed is used to prevent remote connections from controlling hash table
185 * collisions. It needs to be somewhat unpredictable to a remote connection.
186 */
187 return grpc_mdctx_create_with_seed(gpr_now().tv_nsec);
188}
189
190static void discard_metadata(grpc_mdctx *ctx) {
191 size_t i;
192 internal_metadata *next, *cur;
193
194 for (i = 0; i < ctx->mdtab_capacity; i++) {
195 cur = ctx->mdtab[i];
196 while (cur) {
Craig Tiller9fa41b92015-04-10 15:08:03 -0700197 GPR_ASSERT(gpr_atm_acq_load(&cur->refcnt) == 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800198 next = cur->bucket_next;
Craig Tiller1a65a232015-07-06 10:22:32 -0700199 INTERNAL_STRING_UNREF(cur->key);
200 INTERNAL_STRING_UNREF(cur->value);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800201 if (cur->user_data) {
202 cur->destroy_user_data(cur->user_data);
203 }
Craig Tiller83901532015-07-10 14:02:45 -0700204 gpr_mu_destroy(&cur->mu_user_data);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800205 gpr_free(cur);
206 cur = next;
207 ctx->mdtab_free--;
208 ctx->mdtab_count--;
209 }
210 ctx->mdtab[i] = NULL;
211 }
212}
213
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700214static void metadata_context_destroy_locked(grpc_mdctx *ctx) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800215 GPR_ASSERT(ctx->strtab_count == 0);
216 GPR_ASSERT(ctx->mdtab_count == 0);
217 GPR_ASSERT(ctx->mdtab_free == 0);
218 gpr_free(ctx->strtab);
219 gpr_free(ctx->mdtab);
220 gpr_mu_unlock(&ctx->mu);
221 gpr_mu_destroy(&ctx->mu);
222 gpr_free(ctx);
223}
224
Craig Tiller9be83ee2015-02-18 14:16:15 -0800225void grpc_mdctx_ref(grpc_mdctx *ctx) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800226 lock(ctx);
Craig Tiller9be83ee2015-02-18 14:16:15 -0800227 GPR_ASSERT(ctx->refs > 0);
228 ctx->refs++;
229 unlock(ctx);
230}
231
232void grpc_mdctx_unref(grpc_mdctx *ctx) {
233 lock(ctx);
234 GPR_ASSERT(ctx->refs > 0);
235 ctx->refs--;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800236 unlock(ctx);
237}
238
239static void grow_strtab(grpc_mdctx *ctx) {
240 size_t capacity = ctx->strtab_capacity * 2;
241 size_t i;
242 internal_string **strtab = gpr_malloc(sizeof(internal_string *) * capacity);
243 internal_string *s, *next;
244 memset(strtab, 0, sizeof(internal_string *) * capacity);
245
246 for (i = 0; i < ctx->strtab_capacity; i++) {
247 for (s = ctx->strtab[i]; s; s = next) {
248 next = s->bucket_next;
249 s->bucket_next = strtab[s->hash % capacity];
250 strtab[s->hash % capacity] = s;
251 }
252 }
253
254 gpr_free(ctx->strtab);
255 ctx->strtab = strtab;
256 ctx->strtab_capacity = capacity;
257}
258
259static void internal_destroy_string(internal_string *is) {
260 internal_string **prev_next;
261 internal_string *cur;
262 grpc_mdctx *ctx = is->context;
ctiller430c4992014-12-11 09:15:41 -0800263 if (is->has_base64_and_huffman_encoded) {
264 gpr_slice_unref(is->base64_and_huffman);
265 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800266 for (prev_next = &ctx->strtab[is->hash % ctx->strtab_capacity],
267 cur = *prev_next;
268 cur != is; prev_next = &cur->bucket_next, cur = cur->bucket_next)
269 ;
270 *prev_next = cur->bucket_next;
271 ctx->strtab_count--;
272 gpr_free(is);
273}
274
Craig Tiller1a65a232015-07-06 10:22:32 -0700275static void internal_string_ref(internal_string *s DEBUG_ARGS) {
276#ifdef GRPC_METADATA_REFCOUNT_DEBUG
277 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR REF:%p:%d->%d: '%s'", s,
278 s->refs, s->refs + 1, grpc_mdstr_as_c_string((grpc_mdstr *)s));
279#endif
280 ++s->refs;
281}
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800282
Craig Tiller1a65a232015-07-06 10:22:32 -0700283static void internal_string_unref(internal_string *s DEBUG_ARGS) {
284#ifdef GRPC_METADATA_REFCOUNT_DEBUG
285 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG, "STR UNREF:%p:%d->%d: '%s'", s,
286 s->refs, s->refs - 1, grpc_mdstr_as_c_string((grpc_mdstr *)s));
287#endif
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800288 GPR_ASSERT(s->refs > 0);
289 if (0 == --s->refs) {
290 internal_destroy_string(s);
291 }
292}
293
294static void slice_ref(void *p) {
295 internal_string *is =
296 (internal_string *)((char *)p - offsetof(internal_string, refcount));
297 grpc_mdctx *ctx = is->context;
298 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700299 INTERNAL_STRING_REF(is);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800300 unlock(ctx);
301}
302
303static void slice_unref(void *p) {
304 internal_string *is =
305 (internal_string *)((char *)p - offsetof(internal_string, refcount));
306 grpc_mdctx *ctx = is->context;
307 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700308 INTERNAL_STRING_UNREF(is);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800309 unlock(ctx);
310}
311
312grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str) {
313 return grpc_mdstr_from_buffer(ctx, (const gpr_uint8 *)str, strlen(str));
314}
315
316grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice) {
317 grpc_mdstr *result = grpc_mdstr_from_buffer(ctx, GPR_SLICE_START_PTR(slice),
318 GPR_SLICE_LENGTH(slice));
319 gpr_slice_unref(slice);
320 return result;
321}
322
323grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
324 size_t length) {
325 gpr_uint32 hash = gpr_murmur_hash3(buf, length, ctx->hash_seed);
326 internal_string *s;
327
328 lock(ctx);
329
330 /* search for an existing string */
331 for (s = ctx->strtab[hash % ctx->strtab_capacity]; s; s = s->bucket_next) {
332 if (s->hash == hash && GPR_SLICE_LENGTH(s->slice) == length &&
333 0 == memcmp(buf, GPR_SLICE_START_PTR(s->slice), length)) {
Craig Tiller1a65a232015-07-06 10:22:32 -0700334 INTERNAL_STRING_REF(s);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800335 unlock(ctx);
336 return (grpc_mdstr *)s;
337 }
338 }
339
340 /* not found: create a new string */
341 if (length + 1 < GPR_SLICE_INLINED_SIZE) {
342 /* string data goes directly into the slice */
343 s = gpr_malloc(sizeof(internal_string));
344 s->refs = 1;
345 s->slice.refcount = NULL;
346 memcpy(s->slice.data.inlined.bytes, buf, length);
347 s->slice.data.inlined.bytes[length] = 0;
348 s->slice.data.inlined.length = length;
349 } else {
350 /* string data goes after the internal_string header, and we +1 for null
351 terminator */
352 s = gpr_malloc(sizeof(internal_string) + length + 1);
353 s->refs = 1;
354 s->refcount.ref = slice_ref;
355 s->refcount.unref = slice_unref;
356 s->slice.refcount = &s->refcount;
357 s->slice.data.refcounted.bytes = (gpr_uint8 *)(s + 1);
358 s->slice.data.refcounted.length = length;
359 memcpy(s->slice.data.refcounted.bytes, buf, length);
360 /* add a null terminator for cheap c string conversion when desired */
361 s->slice.data.refcounted.bytes[length] = 0;
362 }
ctiller430c4992014-12-11 09:15:41 -0800363 s->has_base64_and_huffman_encoded = 0;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800364 s->hash = hash;
365 s->context = ctx;
366 s->bucket_next = ctx->strtab[hash % ctx->strtab_capacity];
367 ctx->strtab[hash % ctx->strtab_capacity] = s;
368
369 ctx->strtab_count++;
370
371 if (ctx->strtab_count > ctx->strtab_capacity * 2) {
372 grow_strtab(ctx);
373 }
374
375 unlock(ctx);
376
377 return (grpc_mdstr *)s;
378}
379
380static void gc_mdtab(grpc_mdctx *ctx) {
381 size_t i;
382 internal_metadata **prev_next;
383 internal_metadata *md, *next;
384
385 for (i = 0; i < ctx->mdtab_capacity; i++) {
386 prev_next = &ctx->mdtab[i];
387 for (md = ctx->mdtab[i]; md; md = next) {
388 next = md->bucket_next;
Craig Tiller9fa41b92015-04-10 15:08:03 -0700389 if (gpr_atm_acq_load(&md->refcnt) == 0) {
Craig Tiller1a65a232015-07-06 10:22:32 -0700390 INTERNAL_STRING_UNREF(md->key);
391 INTERNAL_STRING_UNREF(md->value);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800392 if (md->user_data) {
393 md->destroy_user_data(md->user_data);
394 }
395 gpr_free(md);
396 *prev_next = next;
397 ctx->mdtab_free--;
398 ctx->mdtab_count--;
399 } else {
400 prev_next = &md->bucket_next;
401 }
402 }
403 }
404
405 GPR_ASSERT(ctx->mdtab_free == 0);
406}
407
408static void grow_mdtab(grpc_mdctx *ctx) {
409 size_t capacity = ctx->mdtab_capacity * 2;
410 size_t i;
411 internal_metadata **mdtab =
412 gpr_malloc(sizeof(internal_metadata *) * capacity);
413 internal_metadata *md, *next;
414 gpr_uint32 hash;
415 memset(mdtab, 0, sizeof(internal_metadata *) * capacity);
416
417 for (i = 0; i < ctx->mdtab_capacity; i++) {
418 for (md = ctx->mdtab[i]; md; md = next) {
ctillerfb93d192014-12-15 10:40:05 -0800419 hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800420 next = md->bucket_next;
421 md->bucket_next = mdtab[hash % capacity];
422 mdtab[hash % capacity] = md;
423 }
424 }
425
426 gpr_free(ctx->mdtab);
427 ctx->mdtab = mdtab;
428 ctx->mdtab_capacity = capacity;
429}
430
431static void rehash_mdtab(grpc_mdctx *ctx) {
432 if (ctx->mdtab_free > ctx->mdtab_capacity / 4) {
433 gc_mdtab(ctx);
434 } else {
435 grow_mdtab(ctx);
436 }
437}
438
439grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
440 grpc_mdstr *mkey,
441 grpc_mdstr *mvalue) {
442 internal_string *key = (internal_string *)mkey;
443 internal_string *value = (internal_string *)mvalue;
ctillerfb93d192014-12-15 10:40:05 -0800444 gpr_uint32 hash = GRPC_MDSTR_KV_HASH(mkey->hash, mvalue->hash);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800445 internal_metadata *md;
446
447 GPR_ASSERT(key->context == ctx);
448 GPR_ASSERT(value->context == ctx);
449
450 lock(ctx);
451
452 /* search for an existing pair */
453 for (md = ctx->mdtab[hash % ctx->mdtab_capacity]; md; md = md->bucket_next) {
454 if (md->key == key && md->value == value) {
Craig Tiller1a65a232015-07-06 10:22:32 -0700455 REF_MD_LOCKED(md);
456 INTERNAL_STRING_UNREF(key);
457 INTERNAL_STRING_UNREF(value);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800458 unlock(ctx);
459 return (grpc_mdelem *)md;
460 }
461 }
462
463 /* not found: create a new pair */
464 md = gpr_malloc(sizeof(internal_metadata));
Craig Tiller9fa41b92015-04-10 15:08:03 -0700465 gpr_atm_rel_store(&md->refcnt, 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800466 md->context = ctx;
467 md->key = key;
468 md->value = value;
469 md->user_data = NULL;
470 md->destroy_user_data = NULL;
471 md->bucket_next = ctx->mdtab[hash % ctx->mdtab_capacity];
Craig Tiller83901532015-07-10 14:02:45 -0700472 gpr_mu_init(&md->mu_user_data);
Craig Tiller1a65a232015-07-06 10:22:32 -0700473#ifdef GRPC_METADATA_REFCOUNT_DEBUG
474 gpr_log(GPR_DEBUG, "ELM NEW:%p:%d: '%s' = '%s'", md,
475 gpr_atm_no_barrier_load(&md->refcnt),
476 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
477 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
478#endif
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800479 ctx->mdtab[hash % ctx->mdtab_capacity] = md;
480 ctx->mdtab_count++;
481
482 if (ctx->mdtab_count > ctx->mdtab_capacity * 2) {
483 rehash_mdtab(ctx);
484 }
485
486 unlock(ctx);
487
488 return (grpc_mdelem *)md;
489}
490
491grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
492 const char *value) {
493 return grpc_mdelem_from_metadata_strings(ctx,
494 grpc_mdstr_from_string(ctx, key),
495 grpc_mdstr_from_string(ctx, value));
496}
497
498grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
499 gpr_slice value) {
500 return grpc_mdelem_from_metadata_strings(ctx, grpc_mdstr_from_slice(ctx, key),
501 grpc_mdstr_from_slice(ctx, value));
502}
503
504grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,
505 const char *key,
506 const gpr_uint8 *value,
507 size_t value_length) {
508 return grpc_mdelem_from_metadata_strings(
509 ctx, grpc_mdstr_from_string(ctx, key),
510 grpc_mdstr_from_buffer(ctx, value, value_length));
511}
512
Craig Tiller1a65a232015-07-06 10:22:32 -0700513grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800514 internal_metadata *md = (internal_metadata *)gmd;
Craig Tiller1a65a232015-07-06 10:22:32 -0700515#ifdef GRPC_METADATA_REFCOUNT_DEBUG
516 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
517 "ELM REF:%p:%d->%d: '%s' = '%s'", md,
518 gpr_atm_no_barrier_load(&md->refcnt),
519 gpr_atm_no_barrier_load(&md->refcnt) + 1,
520 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
521 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
522#endif
Craig Tiller9fa41b92015-04-10 15:08:03 -0700523 /* we can assume the ref count is >= 1 as the application is calling
524 this function - meaning that no adjustment to mdtab_free is necessary,
525 simplifying the logic here to be just an atomic increment */
526 /* use C assert to have this removed in opt builds */
Craig Tiller1ae46a22015-04-13 10:48:55 -0700527 assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
Craig Tiller9fa41b92015-04-10 15:08:03 -0700528 gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800529 return gmd;
530}
531
Craig Tiller1a65a232015-07-06 10:22:32 -0700532void grpc_mdelem_unref(grpc_mdelem *gmd DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800533 internal_metadata *md = (internal_metadata *)gmd;
534 grpc_mdctx *ctx = md->context;
535 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700536#ifdef GRPC_METADATA_REFCOUNT_DEBUG
537 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
538 "ELM UNREF:%p:%d->%d: '%s' = '%s'", md,
539 gpr_atm_no_barrier_load(&md->refcnt),
540 gpr_atm_no_barrier_load(&md->refcnt) - 1,
541 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
542 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
543#endif
Craig Tillerba63e8a2015-04-13 15:02:29 -0700544 assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
Craig Tiller9fa41b92015-04-10 15:08:03 -0700545 if (1 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800546 ctx->mdtab_free++;
547 }
548 unlock(ctx);
549}
550
551const char *grpc_mdstr_as_c_string(grpc_mdstr *s) {
552 return (const char *)GPR_SLICE_START_PTR(s->slice);
553}
554
Craig Tiller1a65a232015-07-06 10:22:32 -0700555grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800556 internal_string *s = (internal_string *)gs;
557 grpc_mdctx *ctx = s->context;
558 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700559 internal_string_ref(s FWD_DEBUG_ARGS);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800560 unlock(ctx);
561 return gs;
562}
563
Craig Tiller1a65a232015-07-06 10:22:32 -0700564void grpc_mdstr_unref(grpc_mdstr *gs DEBUG_ARGS) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800565 internal_string *s = (internal_string *)gs;
566 grpc_mdctx *ctx = s->context;
567 lock(ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700568 internal_string_unref(s FWD_DEBUG_ARGS);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800569 unlock(ctx);
570}
571
572size_t grpc_mdctx_get_mdtab_capacity_test_only(grpc_mdctx *ctx) {
573 return ctx->mdtab_capacity;
574}
575
576size_t grpc_mdctx_get_mdtab_count_test_only(grpc_mdctx *ctx) {
577 return ctx->mdtab_count;
578}
579
580size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *ctx) {
581 return ctx->mdtab_free;
582}
583
584void *grpc_mdelem_get_user_data(grpc_mdelem *md,
585 void (*if_destroy_func)(void *)) {
586 internal_metadata *im = (internal_metadata *)md;
Craig Tiller83901532015-07-10 14:02:45 -0700587 void *result;
588 gpr_mu_lock(&im->mu_user_data);
589 result = im->destroy_user_data == if_destroy_func ? im->user_data : NULL;
590 gpr_mu_unlock(&im->mu_user_data);
591 return result;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800592}
593
594void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
595 void *user_data) {
596 internal_metadata *im = (internal_metadata *)md;
597 GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
Craig Tiller83901532015-07-10 14:02:45 -0700598 gpr_mu_lock(&im->mu_user_data);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800599 if (im->destroy_user_data) {
Craig Tiller83901532015-07-10 14:02:45 -0700600 /* user data can only be set once */
601 gpr_mu_unlock(&im->mu_user_data);
602 if (destroy_func != NULL) {
603 destroy_func(user_data);
604 }
605 return;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800606 }
607 im->destroy_user_data = destroy_func;
608 im->user_data = user_data;
Craig Tiller83901532015-07-10 14:02:45 -0700609 gpr_mu_unlock(&im->mu_user_data);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800610}
ctiller430c4992014-12-11 09:15:41 -0800611
612gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {
613 internal_string *s = (internal_string *)gs;
614 gpr_slice slice;
615 grpc_mdctx *ctx = s->context;
616 lock(ctx);
617 if (!s->has_base64_and_huffman_encoded) {
618 s->base64_and_huffman =
619 grpc_chttp2_base64_encode_and_huffman_compress(s->slice);
ctiller33023c42014-12-12 16:28:33 -0800620 s->has_base64_and_huffman_encoded = 1;
ctiller430c4992014-12-11 09:15:41 -0800621 }
622 slice = s->base64_and_huffman;
623 unlock(ctx);
624 return slice;
Craig Tiller190d3602015-02-18 09:23:38 -0800625}
Craig Tillerfe0104a2015-04-14 09:19:12 -0700626
627void grpc_mdctx_lock(grpc_mdctx *ctx) { lock(ctx); }
628
Craig Tiller1a65a232015-07-06 10:22:32 -0700629void grpc_mdctx_locked_mdelem_unref(grpc_mdctx *ctx,
630 grpc_mdelem *gmd DEBUG_ARGS) {
Craig Tillerfe0104a2015-04-14 09:19:12 -0700631 internal_metadata *md = (internal_metadata *)gmd;
632 grpc_mdctx *elem_ctx = md->context;
Craig Tillerfe0104a2015-04-14 09:19:12 -0700633 GPR_ASSERT(ctx == elem_ctx);
Craig Tiller1a65a232015-07-06 10:22:32 -0700634#ifdef GRPC_METADATA_REFCOUNT_DEBUG
635 gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
636 "ELM UNREF:%p:%d->%d: '%s' = '%s'", md,
637 gpr_atm_no_barrier_load(&md->refcnt),
638 gpr_atm_no_barrier_load(&md->refcnt) - 1,
639 grpc_mdstr_as_c_string((grpc_mdstr *)md->key),
640 grpc_mdstr_as_c_string((grpc_mdstr *)md->value));
641#endif
Craig Tiller634346b2015-04-14 11:22:35 -0700642 assert(gpr_atm_no_barrier_load(&md->refcnt) >= 1);
643 if (1 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
Craig Tillerfe0104a2015-04-14 09:19:12 -0700644 ctx->mdtab_free++;
645 }
646}
647
648void grpc_mdctx_unlock(grpc_mdctx *ctx) { unlock(ctx); }
Craig Tillerb96d0012015-05-06 15:33:23 -0700649
650int grpc_mdstr_is_legal_header(grpc_mdstr *s) {
651 /* TODO(ctiller): consider caching this, or computing it on construction */
652 const gpr_uint8 *p = GPR_SLICE_START_PTR(s->slice);
653 const gpr_uint8 *e = GPR_SLICE_END_PTR(s->slice);
654 for (; p != e; p++) {
655 if (*p < 32 || *p > 126) return 0;
656 }
657 return 1;
658}
659
660int grpc_mdstr_is_bin_suffixed(grpc_mdstr *s) {
661 /* TODO(ctiller): consider caching this */
662 return grpc_is_binary_header((const char *)GPR_SLICE_START_PTR(s->slice),
663 GPR_SLICE_LENGTH(s->slice));
664}