blob: e3de6ce45526e1829a2269500ff85424782e2b8e [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Nicolas "Pixel" Nobled5a99852015-01-24 01:27:48 -080034#include "src/core/iomgr/sockaddr.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080035#include "src/core/transport/metadata.h"
36
Craig Tiller9fa41b92015-04-10 15:08:03 -070037#include <assert.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080038#include <stddef.h>
39#include <string.h>
40
41#include <grpc/support/alloc.h>
Craig Tiller9fa41b92015-04-10 15:08:03 -070042#include <grpc/support/atm.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080043#include <grpc/support/log.h>
44#include "src/core/support/murmur_hash.h"
ctiller430c4992014-12-11 09:15:41 -080045#include "src/core/transport/chttp2/bin_encoder.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080046#include <grpc/support/time.h>
47
48#define INITIAL_STRTAB_CAPACITY 4
49#define INITIAL_MDTAB_CAPACITY 4
50
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080051typedef struct internal_string {
52 /* must be byte compatible with grpc_mdstr */
53 gpr_slice slice;
54 gpr_uint32 hash;
55
56 /* private only data */
57 gpr_uint32 refs;
ctiller430c4992014-12-11 09:15:41 -080058 gpr_uint8 has_base64_and_huffman_encoded;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080059 gpr_slice_refcount refcount;
60
ctiller430c4992014-12-11 09:15:41 -080061 gpr_slice base64_and_huffman;
62
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080063 grpc_mdctx *context;
64
65 struct internal_string *bucket_next;
66} internal_string;
67
68typedef struct internal_metadata {
69 /* must be byte compatible with grpc_mdelem */
70 internal_string *key;
71 internal_string *value;
72
Craig Tiller9fa41b92015-04-10 15:08:03 -070073 gpr_atm refcnt;
74
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080075 /* private only data */
76 void *user_data;
77 void (*destroy_user_data)(void *user_data);
78
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080079 grpc_mdctx *context;
80 struct internal_metadata *bucket_next;
81} internal_metadata;
82
83struct grpc_mdctx {
84 gpr_uint32 hash_seed;
Craig Tiller9be83ee2015-02-18 14:16:15 -080085 int refs;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080086
87 gpr_mu mu;
88
89 internal_string **strtab;
90 size_t strtab_count;
91 size_t strtab_capacity;
92
93 internal_metadata **mdtab;
94 size_t mdtab_count;
95 size_t mdtab_free;
96 size_t mdtab_capacity;
97};
98
99static void internal_string_ref(internal_string *s);
100static void internal_string_unref(internal_string *s);
101static void discard_metadata(grpc_mdctx *ctx);
102static void gc_mdtab(grpc_mdctx *ctx);
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700103static void metadata_context_destroy_locked(grpc_mdctx *ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800104
105static void lock(grpc_mdctx *ctx) { gpr_mu_lock(&ctx->mu); }
106
107static void unlock(grpc_mdctx *ctx) {
108 /* If the context has been orphaned we'd like to delete it soon. We check
109 conditions in unlock as it signals the end of mutations on a context.
110
111 We need to ensure all grpc_mdelem and grpc_mdstr elements have been deleted
112 first. This is equivalent to saying that both tables have zero counts,
113 which is equivalent to saying that strtab_count is zero (as mdelem's MUST
114 reference an mdstr for their key and value slots).
115
116 To encourage that to happen, we start discarding zero reference count
117 mdelems on every unlock (instead of the usual 'I'm too loaded' trigger
118 case), since otherwise we can be stuck waiting for a garbage collection
119 that will never happen. */
Craig Tiller9be83ee2015-02-18 14:16:15 -0800120 if (ctx->refs == 0) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800121 /* uncomment if you're having trouble diagnosing an mdelem leak to make
122 things clearer (slows down destruction a lot, however) */
123 /* gc_mdtab(ctx); */
124 if (ctx->mdtab_count && ctx->mdtab_count == ctx->mdtab_free) {
125 discard_metadata(ctx);
126 }
127 if (ctx->strtab_count == 0) {
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700128 metadata_context_destroy_locked(ctx);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800129 return;
130 }
131 }
132 gpr_mu_unlock(&ctx->mu);
133}
134
Craig Tiller9fa41b92015-04-10 15:08:03 -0700135static void ref_md_locked(internal_metadata *md) {
136 if (0 == gpr_atm_no_barrier_fetch_add(&md->refcnt, 1)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800137 md->context->mdtab_free--;
138 }
139}
140
141grpc_mdctx *grpc_mdctx_create_with_seed(gpr_uint32 seed) {
142 grpc_mdctx *ctx = gpr_malloc(sizeof(grpc_mdctx));
143
Craig Tiller9be83ee2015-02-18 14:16:15 -0800144 ctx->refs = 1;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800145 ctx->hash_seed = seed;
146 gpr_mu_init(&ctx->mu);
147 ctx->strtab = gpr_malloc(sizeof(internal_string *) * INITIAL_STRTAB_CAPACITY);
148 memset(ctx->strtab, 0, sizeof(grpc_mdstr *) * INITIAL_STRTAB_CAPACITY);
149 ctx->strtab_count = 0;
150 ctx->strtab_capacity = INITIAL_STRTAB_CAPACITY;
151 ctx->mdtab = gpr_malloc(sizeof(internal_metadata *) * INITIAL_MDTAB_CAPACITY);
152 memset(ctx->mdtab, 0, sizeof(grpc_mdelem *) * INITIAL_MDTAB_CAPACITY);
153 ctx->mdtab_count = 0;
154 ctx->mdtab_capacity = INITIAL_MDTAB_CAPACITY;
155 ctx->mdtab_free = 0;
156
157 return ctx;
158}
159
Craig Tiller32946d32015-01-15 11:37:30 -0800160grpc_mdctx *grpc_mdctx_create(void) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800161 /* This seed is used to prevent remote connections from controlling hash table
162 * collisions. It needs to be somewhat unpredictable to a remote connection.
163 */
164 return grpc_mdctx_create_with_seed(gpr_now().tv_nsec);
165}
166
167static void discard_metadata(grpc_mdctx *ctx) {
168 size_t i;
169 internal_metadata *next, *cur;
170
171 for (i = 0; i < ctx->mdtab_capacity; i++) {
172 cur = ctx->mdtab[i];
173 while (cur) {
Craig Tiller9fa41b92015-04-10 15:08:03 -0700174 GPR_ASSERT(gpr_atm_acq_load(&cur->refcnt) == 0);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800175 next = cur->bucket_next;
176 internal_string_unref(cur->key);
177 internal_string_unref(cur->value);
178 if (cur->user_data) {
179 cur->destroy_user_data(cur->user_data);
180 }
181 gpr_free(cur);
182 cur = next;
183 ctx->mdtab_free--;
184 ctx->mdtab_count--;
185 }
186 ctx->mdtab[i] = NULL;
187 }
188}
189
Vijay Pai7d3d9ca2015-04-02 14:34:27 -0700190static void metadata_context_destroy_locked(grpc_mdctx *ctx) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800191 GPR_ASSERT(ctx->strtab_count == 0);
192 GPR_ASSERT(ctx->mdtab_count == 0);
193 GPR_ASSERT(ctx->mdtab_free == 0);
194 gpr_free(ctx->strtab);
195 gpr_free(ctx->mdtab);
196 gpr_mu_unlock(&ctx->mu);
197 gpr_mu_destroy(&ctx->mu);
198 gpr_free(ctx);
199}
200
Craig Tiller9be83ee2015-02-18 14:16:15 -0800201void grpc_mdctx_ref(grpc_mdctx *ctx) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800202 lock(ctx);
Craig Tiller9be83ee2015-02-18 14:16:15 -0800203 GPR_ASSERT(ctx->refs > 0);
204 ctx->refs++;
205 unlock(ctx);
206}
207
208void grpc_mdctx_unref(grpc_mdctx *ctx) {
209 lock(ctx);
210 GPR_ASSERT(ctx->refs > 0);
211 ctx->refs--;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800212 unlock(ctx);
213}
214
215static void grow_strtab(grpc_mdctx *ctx) {
216 size_t capacity = ctx->strtab_capacity * 2;
217 size_t i;
218 internal_string **strtab = gpr_malloc(sizeof(internal_string *) * capacity);
219 internal_string *s, *next;
220 memset(strtab, 0, sizeof(internal_string *) * capacity);
221
222 for (i = 0; i < ctx->strtab_capacity; i++) {
223 for (s = ctx->strtab[i]; s; s = next) {
224 next = s->bucket_next;
225 s->bucket_next = strtab[s->hash % capacity];
226 strtab[s->hash % capacity] = s;
227 }
228 }
229
230 gpr_free(ctx->strtab);
231 ctx->strtab = strtab;
232 ctx->strtab_capacity = capacity;
233}
234
235static void internal_destroy_string(internal_string *is) {
236 internal_string **prev_next;
237 internal_string *cur;
238 grpc_mdctx *ctx = is->context;
ctiller430c4992014-12-11 09:15:41 -0800239 if (is->has_base64_and_huffman_encoded) {
240 gpr_slice_unref(is->base64_and_huffman);
241 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800242 for (prev_next = &ctx->strtab[is->hash % ctx->strtab_capacity],
243 cur = *prev_next;
244 cur != is; prev_next = &cur->bucket_next, cur = cur->bucket_next)
245 ;
246 *prev_next = cur->bucket_next;
247 ctx->strtab_count--;
248 gpr_free(is);
249}
250
251static void internal_string_ref(internal_string *s) { ++s->refs; }
252
253static void internal_string_unref(internal_string *s) {
254 GPR_ASSERT(s->refs > 0);
255 if (0 == --s->refs) {
256 internal_destroy_string(s);
257 }
258}
259
260static void slice_ref(void *p) {
261 internal_string *is =
262 (internal_string *)((char *)p - offsetof(internal_string, refcount));
263 grpc_mdctx *ctx = is->context;
264 lock(ctx);
265 internal_string_ref(is);
266 unlock(ctx);
267}
268
269static void slice_unref(void *p) {
270 internal_string *is =
271 (internal_string *)((char *)p - offsetof(internal_string, refcount));
272 grpc_mdctx *ctx = is->context;
273 lock(ctx);
274 internal_string_unref(is);
275 unlock(ctx);
276}
277
278grpc_mdstr *grpc_mdstr_from_string(grpc_mdctx *ctx, const char *str) {
279 return grpc_mdstr_from_buffer(ctx, (const gpr_uint8 *)str, strlen(str));
280}
281
282grpc_mdstr *grpc_mdstr_from_slice(grpc_mdctx *ctx, gpr_slice slice) {
283 grpc_mdstr *result = grpc_mdstr_from_buffer(ctx, GPR_SLICE_START_PTR(slice),
284 GPR_SLICE_LENGTH(slice));
285 gpr_slice_unref(slice);
286 return result;
287}
288
289grpc_mdstr *grpc_mdstr_from_buffer(grpc_mdctx *ctx, const gpr_uint8 *buf,
290 size_t length) {
291 gpr_uint32 hash = gpr_murmur_hash3(buf, length, ctx->hash_seed);
292 internal_string *s;
293
294 lock(ctx);
295
296 /* search for an existing string */
297 for (s = ctx->strtab[hash % ctx->strtab_capacity]; s; s = s->bucket_next) {
298 if (s->hash == hash && GPR_SLICE_LENGTH(s->slice) == length &&
299 0 == memcmp(buf, GPR_SLICE_START_PTR(s->slice), length)) {
300 internal_string_ref(s);
301 unlock(ctx);
302 return (grpc_mdstr *)s;
303 }
304 }
305
306 /* not found: create a new string */
307 if (length + 1 < GPR_SLICE_INLINED_SIZE) {
308 /* string data goes directly into the slice */
309 s = gpr_malloc(sizeof(internal_string));
310 s->refs = 1;
311 s->slice.refcount = NULL;
312 memcpy(s->slice.data.inlined.bytes, buf, length);
313 s->slice.data.inlined.bytes[length] = 0;
314 s->slice.data.inlined.length = length;
315 } else {
316 /* string data goes after the internal_string header, and we +1 for null
317 terminator */
318 s = gpr_malloc(sizeof(internal_string) + length + 1);
319 s->refs = 1;
320 s->refcount.ref = slice_ref;
321 s->refcount.unref = slice_unref;
322 s->slice.refcount = &s->refcount;
323 s->slice.data.refcounted.bytes = (gpr_uint8 *)(s + 1);
324 s->slice.data.refcounted.length = length;
325 memcpy(s->slice.data.refcounted.bytes, buf, length);
326 /* add a null terminator for cheap c string conversion when desired */
327 s->slice.data.refcounted.bytes[length] = 0;
328 }
ctiller430c4992014-12-11 09:15:41 -0800329 s->has_base64_and_huffman_encoded = 0;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800330 s->hash = hash;
331 s->context = ctx;
332 s->bucket_next = ctx->strtab[hash % ctx->strtab_capacity];
333 ctx->strtab[hash % ctx->strtab_capacity] = s;
334
335 ctx->strtab_count++;
336
337 if (ctx->strtab_count > ctx->strtab_capacity * 2) {
338 grow_strtab(ctx);
339 }
340
341 unlock(ctx);
342
343 return (grpc_mdstr *)s;
344}
345
346static void gc_mdtab(grpc_mdctx *ctx) {
347 size_t i;
348 internal_metadata **prev_next;
349 internal_metadata *md, *next;
350
351 for (i = 0; i < ctx->mdtab_capacity; i++) {
352 prev_next = &ctx->mdtab[i];
353 for (md = ctx->mdtab[i]; md; md = next) {
354 next = md->bucket_next;
Craig Tiller9fa41b92015-04-10 15:08:03 -0700355 if (gpr_atm_acq_load(&md->refcnt) == 0) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800356 internal_string_unref(md->key);
357 internal_string_unref(md->value);
358 if (md->user_data) {
359 md->destroy_user_data(md->user_data);
360 }
361 gpr_free(md);
362 *prev_next = next;
363 ctx->mdtab_free--;
364 ctx->mdtab_count--;
365 } else {
366 prev_next = &md->bucket_next;
367 }
368 }
369 }
370
371 GPR_ASSERT(ctx->mdtab_free == 0);
372}
373
374static void grow_mdtab(grpc_mdctx *ctx) {
375 size_t capacity = ctx->mdtab_capacity * 2;
376 size_t i;
377 internal_metadata **mdtab =
378 gpr_malloc(sizeof(internal_metadata *) * capacity);
379 internal_metadata *md, *next;
380 gpr_uint32 hash;
381 memset(mdtab, 0, sizeof(internal_metadata *) * capacity);
382
383 for (i = 0; i < ctx->mdtab_capacity; i++) {
384 for (md = ctx->mdtab[i]; md; md = next) {
ctillerfb93d192014-12-15 10:40:05 -0800385 hash = GRPC_MDSTR_KV_HASH(md->key->hash, md->value->hash);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800386 next = md->bucket_next;
387 md->bucket_next = mdtab[hash % capacity];
388 mdtab[hash % capacity] = md;
389 }
390 }
391
392 gpr_free(ctx->mdtab);
393 ctx->mdtab = mdtab;
394 ctx->mdtab_capacity = capacity;
395}
396
397static void rehash_mdtab(grpc_mdctx *ctx) {
398 if (ctx->mdtab_free > ctx->mdtab_capacity / 4) {
399 gc_mdtab(ctx);
400 } else {
401 grow_mdtab(ctx);
402 }
403}
404
405grpc_mdelem *grpc_mdelem_from_metadata_strings(grpc_mdctx *ctx,
406 grpc_mdstr *mkey,
407 grpc_mdstr *mvalue) {
408 internal_string *key = (internal_string *)mkey;
409 internal_string *value = (internal_string *)mvalue;
ctillerfb93d192014-12-15 10:40:05 -0800410 gpr_uint32 hash = GRPC_MDSTR_KV_HASH(mkey->hash, mvalue->hash);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800411 internal_metadata *md;
412
413 GPR_ASSERT(key->context == ctx);
414 GPR_ASSERT(value->context == ctx);
415
416 lock(ctx);
417
418 /* search for an existing pair */
419 for (md = ctx->mdtab[hash % ctx->mdtab_capacity]; md; md = md->bucket_next) {
420 if (md->key == key && md->value == value) {
Craig Tiller9fa41b92015-04-10 15:08:03 -0700421 ref_md_locked(md);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800422 internal_string_unref(key);
423 internal_string_unref(value);
424 unlock(ctx);
425 return (grpc_mdelem *)md;
426 }
427 }
428
429 /* not found: create a new pair */
430 md = gpr_malloc(sizeof(internal_metadata));
Craig Tiller9fa41b92015-04-10 15:08:03 -0700431 gpr_atm_rel_store(&md->refcnt, 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800432 md->context = ctx;
433 md->key = key;
434 md->value = value;
435 md->user_data = NULL;
436 md->destroy_user_data = NULL;
437 md->bucket_next = ctx->mdtab[hash % ctx->mdtab_capacity];
438 ctx->mdtab[hash % ctx->mdtab_capacity] = md;
439 ctx->mdtab_count++;
440
441 if (ctx->mdtab_count > ctx->mdtab_capacity * 2) {
442 rehash_mdtab(ctx);
443 }
444
445 unlock(ctx);
446
447 return (grpc_mdelem *)md;
448}
449
450grpc_mdelem *grpc_mdelem_from_strings(grpc_mdctx *ctx, const char *key,
451 const char *value) {
452 return grpc_mdelem_from_metadata_strings(ctx,
453 grpc_mdstr_from_string(ctx, key),
454 grpc_mdstr_from_string(ctx, value));
455}
456
457grpc_mdelem *grpc_mdelem_from_slices(grpc_mdctx *ctx, gpr_slice key,
458 gpr_slice value) {
459 return grpc_mdelem_from_metadata_strings(ctx, grpc_mdstr_from_slice(ctx, key),
460 grpc_mdstr_from_slice(ctx, value));
461}
462
463grpc_mdelem *grpc_mdelem_from_string_and_buffer(grpc_mdctx *ctx,
464 const char *key,
465 const gpr_uint8 *value,
466 size_t value_length) {
467 return grpc_mdelem_from_metadata_strings(
468 ctx, grpc_mdstr_from_string(ctx, key),
469 grpc_mdstr_from_buffer(ctx, value, value_length));
470}
471
472grpc_mdelem *grpc_mdelem_ref(grpc_mdelem *gmd) {
473 internal_metadata *md = (internal_metadata *)gmd;
Craig Tiller9fa41b92015-04-10 15:08:03 -0700474 /* we can assume the ref count is >= 1 as the application is calling
475 this function - meaning that no adjustment to mdtab_free is necessary,
476 simplifying the logic here to be just an atomic increment */
477 /* use C assert to have this removed in opt builds */
478 assert(gpr_atm_acq_load(&md->refcnt) >= 1);
479 gpr_atm_no_barrier_fetch_add(&md->refcnt, 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800480 return gmd;
481}
482
483void grpc_mdelem_unref(grpc_mdelem *gmd) {
484 internal_metadata *md = (internal_metadata *)gmd;
485 grpc_mdctx *ctx = md->context;
486 lock(ctx);
Craig Tiller9fa41b92015-04-10 15:08:03 -0700487 assert(gpr_atm_acq_load(&md->refcnt) >= 1);
488 if (1 == gpr_atm_full_fetch_add(&md->refcnt, -1)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800489 ctx->mdtab_free++;
490 }
491 unlock(ctx);
492}
493
494const char *grpc_mdstr_as_c_string(grpc_mdstr *s) {
495 return (const char *)GPR_SLICE_START_PTR(s->slice);
496}
497
498grpc_mdstr *grpc_mdstr_ref(grpc_mdstr *gs) {
499 internal_string *s = (internal_string *)gs;
500 grpc_mdctx *ctx = s->context;
501 lock(ctx);
502 internal_string_ref(s);
503 unlock(ctx);
504 return gs;
505}
506
507void grpc_mdstr_unref(grpc_mdstr *gs) {
508 internal_string *s = (internal_string *)gs;
509 grpc_mdctx *ctx = s->context;
510 lock(ctx);
511 internal_string_unref(s);
512 unlock(ctx);
513}
514
515size_t grpc_mdctx_get_mdtab_capacity_test_only(grpc_mdctx *ctx) {
516 return ctx->mdtab_capacity;
517}
518
519size_t grpc_mdctx_get_mdtab_count_test_only(grpc_mdctx *ctx) {
520 return ctx->mdtab_count;
521}
522
523size_t grpc_mdctx_get_mdtab_free_test_only(grpc_mdctx *ctx) {
524 return ctx->mdtab_free;
525}
526
527void *grpc_mdelem_get_user_data(grpc_mdelem *md,
528 void (*if_destroy_func)(void *)) {
529 internal_metadata *im = (internal_metadata *)md;
530 return im->destroy_user_data == if_destroy_func ? im->user_data : NULL;
531}
532
533void grpc_mdelem_set_user_data(grpc_mdelem *md, void (*destroy_func)(void *),
534 void *user_data) {
535 internal_metadata *im = (internal_metadata *)md;
536 GPR_ASSERT((user_data == NULL) == (destroy_func == NULL));
537 if (im->destroy_user_data) {
538 im->destroy_user_data(im->user_data);
539 }
540 im->destroy_user_data = destroy_func;
541 im->user_data = user_data;
542}
ctiller430c4992014-12-11 09:15:41 -0800543
544gpr_slice grpc_mdstr_as_base64_encoded_and_huffman_compressed(grpc_mdstr *gs) {
545 internal_string *s = (internal_string *)gs;
546 gpr_slice slice;
547 grpc_mdctx *ctx = s->context;
548 lock(ctx);
549 if (!s->has_base64_and_huffman_encoded) {
550 s->base64_and_huffman =
551 grpc_chttp2_base64_encode_and_huffman_compress(s->slice);
ctiller33023c42014-12-12 16:28:33 -0800552 s->has_base64_and_huffman_encoded = 1;
ctiller430c4992014-12-11 09:15:41 -0800553 }
554 slice = s->base64_and_huffman;
555 unlock(ctx);
556 return slice;
Craig Tiller190d3602015-02-18 09:23:38 -0800557}