blob: 563e659dd74045d49a8be91801cb3407570217bf [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller8a9fd522016-03-25 17:09:29 -07003 * Copyright 2015-2016, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tiller59256032015-11-02 14:19:15 -080034#include <grpc/support/port_platform.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080035#include <grpc/support/slice_buffer.h>
36
37#include <string.h>
38
39#include <grpc/support/alloc.h>
40#include <grpc/support/log.h>
Craig Tillerbae41c82015-04-28 13:22:25 -070041#include <grpc/support/useful.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080042
Craig Tiller721f3622015-04-13 16:14:28 -070043/* grow a buffer; requires GRPC_SLICE_BUFFER_INLINE_ELEMENTS > 1 */
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080044#define GROW(x) (3 * (x) / 2)
45
Craig Tiller721f3622015-04-13 16:14:28 -070046static void maybe_embiggen(gpr_slice_buffer *sb) {
47 if (sb->count == sb->capacity) {
48 sb->capacity = GROW(sb->capacity);
49 GPR_ASSERT(sb->capacity > sb->count);
50 if (sb->slices == sb->inlined) {
51 sb->slices = gpr_malloc(sb->capacity * sizeof(gpr_slice));
52 memcpy(sb->slices, sb->inlined, sb->count * sizeof(gpr_slice));
53 } else {
54 sb->slices = gpr_realloc(sb->slices, sb->capacity * sizeof(gpr_slice));
55 }
56 }
57}
58
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080059void gpr_slice_buffer_init(gpr_slice_buffer *sb) {
60 sb->count = 0;
61 sb->length = 0;
Craig Tiller721f3622015-04-13 16:14:28 -070062 sb->capacity = GRPC_SLICE_BUFFER_INLINE_ELEMENTS;
63 sb->slices = sb->inlined;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080064}
65
66void gpr_slice_buffer_destroy(gpr_slice_buffer *sb) {
67 gpr_slice_buffer_reset_and_unref(sb);
Craig Tiller721f3622015-04-13 16:14:28 -070068 if (sb->slices != sb->inlined) {
69 gpr_free(sb->slices);
70 }
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080071}
72
Craig Tiller7536af02015-12-22 13:49:30 -080073uint8_t *gpr_slice_buffer_tiny_add(gpr_slice_buffer *sb, size_t n) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080074 gpr_slice *back;
Craig Tiller7536af02015-12-22 13:49:30 -080075 uint8_t *out;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080076
77 sb->length += n;
78
79 if (sb->count == 0) goto add_new;
80 back = &sb->slices[sb->count - 1];
81 if (back->refcount) goto add_new;
Nicolas "Pixel" Noble213ed912015-01-30 02:11:35 +010082 if ((back->data.inlined.length + n) > sizeof(back->data.inlined.bytes))
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080083 goto add_new;
84 out = back->data.inlined.bytes + back->data.inlined.length;
Craig Tiller7536af02015-12-22 13:49:30 -080085 back->data.inlined.length = (uint8_t)(back->data.inlined.length + n);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080086 return out;
87
88add_new:
Craig Tiller721f3622015-04-13 16:14:28 -070089 maybe_embiggen(sb);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080090 back = &sb->slices[sb->count];
91 sb->count++;
92 back->refcount = NULL;
Craig Tiller7536af02015-12-22 13:49:30 -080093 back->data.inlined.length = (uint8_t)n;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080094 return back->data.inlined.bytes;
95}
96
97size_t gpr_slice_buffer_add_indexed(gpr_slice_buffer *sb, gpr_slice s) {
98 size_t out = sb->count;
Craig Tiller721f3622015-04-13 16:14:28 -070099 maybe_embiggen(sb);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800100 sb->slices[out] = s;
101 sb->length += GPR_SLICE_LENGTH(s);
102 sb->count = out + 1;
103 return out;
104}
105
106void gpr_slice_buffer_add(gpr_slice_buffer *sb, gpr_slice s) {
107 size_t n = sb->count;
108 /* if both the last slice in the slice buffer and the slice being added
109 are inlined (that is, that they carry their data inside the slice data
110 structure), and the back slice is not full, then concatenate directly
111 into the back slice, preventing many small slices being passed into
112 writes */
113 if (!s.refcount && n) {
114 gpr_slice *back = &sb->slices[n - 1];
115 if (!back->refcount && back->data.inlined.length < GPR_SLICE_INLINED_SIZE) {
116 if (s.data.inlined.length + back->data.inlined.length <=
117 GPR_SLICE_INLINED_SIZE) {
118 memcpy(back->data.inlined.bytes + back->data.inlined.length,
119 s.data.inlined.bytes, s.data.inlined.length);
Craig Tillerd6c98df2015-08-18 09:33:44 -0700120 back->data.inlined.length =
Craig Tiller7536af02015-12-22 13:49:30 -0800121 (uint8_t)(back->data.inlined.length + s.data.inlined.length);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800122 } else {
123 size_t cp1 = GPR_SLICE_INLINED_SIZE - back->data.inlined.length;
124 memcpy(back->data.inlined.bytes + back->data.inlined.length,
125 s.data.inlined.bytes, cp1);
126 back->data.inlined.length = GPR_SLICE_INLINED_SIZE;
Craig Tiller721f3622015-04-13 16:14:28 -0700127 maybe_embiggen(sb);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800128 back = &sb->slices[n];
129 sb->count = n + 1;
130 back->refcount = NULL;
Craig Tiller7536af02015-12-22 13:49:30 -0800131 back->data.inlined.length = (uint8_t)(s.data.inlined.length - cp1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800132 memcpy(back->data.inlined.bytes, s.data.inlined.bytes + cp1,
133 s.data.inlined.length - cp1);
134 }
135 sb->length += s.data.inlined.length;
136 return; /* early out */
137 }
138 }
139 gpr_slice_buffer_add_indexed(sb, s);
140}
141
142void gpr_slice_buffer_addn(gpr_slice_buffer *sb, gpr_slice *s, size_t n) {
143 size_t i;
144 for (i = 0; i < n; i++) {
145 gpr_slice_buffer_add(sb, s[i]);
146 }
147}
148
Chilledheartca767c02015-03-10 01:19:15 +0800149void gpr_slice_buffer_pop(gpr_slice_buffer *sb) {
150 if (sb->count != 0) {
151 size_t count = --sb->count;
152 sb->length -= GPR_SLICE_LENGTH(sb->slices[count]);
153 }
154}
155
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800156void gpr_slice_buffer_reset_and_unref(gpr_slice_buffer *sb) {
157 size_t i;
158
159 for (i = 0; i < sb->count; i++) {
160 gpr_slice_unref(sb->slices[i]);
161 }
162
163 sb->count = 0;
164 sb->length = 0;
Craig Tiller190d3602015-02-18 09:23:38 -0800165}
Craig Tiller721f3622015-04-13 16:14:28 -0700166
Craig Tiller40158ed2015-04-28 13:03:40 -0700167void gpr_slice_buffer_swap(gpr_slice_buffer *a, gpr_slice_buffer *b) {
Craig Tillerbae41c82015-04-28 13:22:25 -0700168 GPR_SWAP(size_t, a->count, b->count);
169 GPR_SWAP(size_t, a->capacity, b->capacity);
170 GPR_SWAP(size_t, a->length, b->length);
Craig Tiller40158ed2015-04-28 13:03:40 -0700171
172 if (a->slices == a->inlined) {
173 if (b->slices == b->inlined) {
174 /* swap contents of inlined buffer */
175 gpr_slice temp[GRPC_SLICE_BUFFER_INLINE_ELEMENTS];
176 memcpy(temp, a->slices, b->count * sizeof(gpr_slice));
177 memcpy(a->slices, b->slices, a->count * sizeof(gpr_slice));
178 memcpy(b->slices, temp, b->count * sizeof(gpr_slice));
179 } else {
180 /* a is inlined, b is not - copy a inlined into b, fix pointers */
181 a->slices = b->slices;
182 b->slices = b->inlined;
183 memcpy(b->slices, a->inlined, b->count * sizeof(gpr_slice));
184 }
185 } else if (b->slices == b->inlined) {
186 /* b is inlined, a is not - copy b inlined int a, fix pointers */
187 b->slices = a->slices;
Craig Tiller721f3622015-04-13 16:14:28 -0700188 a->slices = a->inlined;
Craig Tiller40158ed2015-04-28 13:03:40 -0700189 memcpy(a->slices, b->inlined, a->count * sizeof(gpr_slice));
190 } else {
191 /* no inlining: easy swap */
Craig Tillerbae41c82015-04-28 13:22:25 -0700192 GPR_SWAP(gpr_slice *, a->slices, b->slices);
Craig Tiller721f3622015-04-13 16:14:28 -0700193 }
194}
Craig Tillerca9fb362015-06-16 07:43:08 -0700195
196void gpr_slice_buffer_move_into(gpr_slice_buffer *src, gpr_slice_buffer *dst) {
197 /* anything to move? */
198 if (src->count == 0) {
199 return;
200 }
201 /* anything in dst? */
202 if (dst->count == 0) {
203 gpr_slice_buffer_swap(src, dst);
204 return;
205 }
206 /* both buffers have data - copy, and reset src */
207 gpr_slice_buffer_addn(dst, src->slices, src->count);
208 src->count = 0;
209 src->length = 0;
210}
Craig Tillerb0298592015-08-27 07:38:01 -0700211
Craig Tiller59256032015-11-02 14:19:15 -0800212void gpr_slice_buffer_move_first(gpr_slice_buffer *src, size_t n,
213 gpr_slice_buffer *dst) {
214 size_t src_idx;
215 size_t output_len = dst->length + n;
216 size_t new_input_len = src->length - n;
217 GPR_ASSERT(src->length >= n);
218 if (src->length == n) {
219 gpr_slice_buffer_move_into(src, dst);
220 return;
221 }
222 src_idx = 0;
Craig Tiller6b80d492015-11-18 07:05:54 -0800223 while (src_idx < src->capacity) {
Craig Tiller59256032015-11-02 14:19:15 -0800224 gpr_slice slice = src->slices[src_idx];
225 size_t slice_len = GPR_SLICE_LENGTH(slice);
226 if (n > slice_len) {
227 gpr_slice_buffer_add(dst, slice);
228 n -= slice_len;
229 src_idx++;
230 } else if (n == slice_len) {
231 gpr_slice_buffer_add(dst, slice);
232 src_idx++;
233 break;
234 } else { /* n < slice_len */
235 src->slices[src_idx] = gpr_slice_split_tail(&slice, n);
236 GPR_ASSERT(GPR_SLICE_LENGTH(slice) == n);
237 GPR_ASSERT(GPR_SLICE_LENGTH(src->slices[src_idx]) == slice_len - n);
238 gpr_slice_buffer_add(dst, slice);
239 break;
240 }
241 }
242 GPR_ASSERT(dst->length == output_len);
243 memmove(src->slices, src->slices + src_idx,
244 sizeof(gpr_slice) * (src->count - src_idx));
245 src->count -= src_idx;
246 src->length = new_input_len;
247 GPR_ASSERT(src->count > 0);
248}
249
Craig Tiller71a0f9d2015-09-28 17:22:01 -0700250void gpr_slice_buffer_trim_end(gpr_slice_buffer *sb, size_t n,
251 gpr_slice_buffer *garbage) {
Craig Tillerb0298592015-08-27 07:38:01 -0700252 GPR_ASSERT(n <= sb->length);
253 sb->length -= n;
254 for (;;) {
255 size_t idx = sb->count - 1;
256 gpr_slice slice = sb->slices[idx];
257 size_t slice_len = GPR_SLICE_LENGTH(slice);
258 if (slice_len > n) {
Craig Tiller649deeb2015-09-24 23:19:40 -0700259 sb->slices[idx] = gpr_slice_split_head(&slice, slice_len - n);
260 gpr_slice_buffer_add_indexed(garbage, slice);
Craig Tillerb0298592015-08-27 07:38:01 -0700261 return;
262 } else if (slice_len == n) {
Craig Tiller649deeb2015-09-24 23:19:40 -0700263 gpr_slice_buffer_add_indexed(garbage, slice);
Craig Tillerb0298592015-08-27 07:38:01 -0700264 sb->count = idx;
265 return;
266 } else {
Craig Tiller649deeb2015-09-24 23:19:40 -0700267 gpr_slice_buffer_add_indexed(garbage, slice);
Craig Tillerb0298592015-08-27 07:38:01 -0700268 n -= slice_len;
269 sb->count = idx;
270 }
271 }
272}
Craig Tiller59256032015-11-02 14:19:15 -0800273
274gpr_slice gpr_slice_buffer_take_first(gpr_slice_buffer *sb) {
275 gpr_slice slice;
276 GPR_ASSERT(sb->count > 0);
277 slice = sb->slices[0];
278 memmove(&sb->slices[0], &sb->slices[1], (sb->count - 1) * sizeof(gpr_slice));
279 sb->count--;
280 sb->length -= GPR_SLICE_LENGTH(slice);
281 return slice;
282}