blob: c363036e6720b288e9c946f45049e87a61cc65e8 [file] [log] [blame]
Ben Lindstrom7a2073c2002-03-22 02:30:41 +00001/*
2 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "includes.h"
Ben Lindstrom7d9c38f2002-06-06 21:40:51 +000027RCSID("$OpenBSD: monitor_mm.c,v 1.6 2002/06/04 23:05:49 markus Exp $");
Ben Lindstrom7a2073c2002-03-22 02:30:41 +000028
Kevin Steves86b9fe62002-04-07 17:08:53 +000029#ifdef HAVE_SYS_MMAN_H
Ben Lindstrom7a2073c2002-03-22 02:30:41 +000030#include <sys/mman.h>
Kevin Steves86b9fe62002-04-07 17:08:53 +000031#endif
Ben Lindstrom7a2073c2002-03-22 02:30:41 +000032
33#include "ssh.h"
34#include "xmalloc.h"
35#include "log.h"
36#include "monitor_mm.h"
37
38static int
39mm_compare(struct mm_share *a, struct mm_share *b)
40{
41 return ((char *)a->address - (char *)b->address);
42}
43
44RB_GENERATE(mmtree, mm_share, next, mm_compare)
45
46static struct mm_share *
47mm_make_entry(struct mm_master *mm, struct mmtree *head,
48 void *address, size_t size)
49{
50 struct mm_share *tmp, *tmp2;
51
52 if (mm->mmalloc == NULL)
53 tmp = xmalloc(sizeof(struct mm_share));
54 else
55 tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share));
56 tmp->address = address;
57 tmp->size = size;
58
59 tmp2 = RB_INSERT(mmtree, head, tmp);
60 if (tmp2 != NULL)
Ben Lindstromc8615472002-03-26 03:20:45 +000061 fatal("mm_make_entry(%p): double address %p->%p(%lu)",
62 mm, tmp2, address, (u_long)size);
Ben Lindstrom7a2073c2002-03-22 02:30:41 +000063
64 return (tmp);
65}
66
67/* Creates a shared memory area of a certain size */
68
69struct mm_master *
70mm_create(struct mm_master *mmalloc, size_t size)
71{
72 void *address;
73 struct mm_master *mm;
74
75 if (mmalloc == NULL)
76 mm = xmalloc(sizeof(struct mm_master));
77 else
78 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
79
80 /*
81 * If the memory map has a mm_master it can be completely
82 * shared including authentication between the child
83 * and the client.
84 */
85 mm->mmalloc = mmalloc;
86
Ben Lindstrom6b0c96a2002-06-25 03:22:03 +000087#ifdef HAVE_MMAP_ANON_SHARED
Ben Lindstrom7a2073c2002-03-22 02:30:41 +000088 address = mmap(NULL, size, PROT_WRITE|PROT_READ, MAP_ANON|MAP_SHARED,
89 -1, 0);
Ben Lindstroma95fd3f2002-06-26 00:22:57 +000090 if (address == MAP_FAILED)
91 fatal("mmap(%lu): %s", (u_long)size, strerror(errno));
Kevin Steves86b9fe62002-04-07 17:08:53 +000092#else
Ben Lindstrom6b0c96a2002-06-25 03:22:03 +000093 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
Ben Lindstrom7d9c38f2002-06-06 21:40:51 +000094 __func__);
Kevin Steves86b9fe62002-04-07 17:08:53 +000095#endif
Ben Lindstrom7a2073c2002-03-22 02:30:41 +000096
97 mm->address = address;
98 mm->size = size;
99
100 RB_INIT(&mm->rb_free);
101 RB_INIT(&mm->rb_allocated);
102
103 mm_make_entry(mm, &mm->rb_free, address, size);
104
105 return (mm);
106}
107
108/* Frees either the allocated or the free list */
109
110static void
111mm_freelist(struct mm_master *mmalloc, struct mmtree *head)
112{
113 struct mm_share *mms, *next;
114
115 for (mms = RB_ROOT(head); mms; mms = next) {
116 next = RB_NEXT(mmtree, head, mms);
117 RB_REMOVE(mmtree, head, mms);
118 if (mmalloc == NULL)
119 xfree(mms);
120 else
121 mm_free(mmalloc, mms);
122 }
123}
124
125/* Destroys a memory mapped area */
126
127void
128mm_destroy(struct mm_master *mm)
129{
130 mm_freelist(mm->mmalloc, &mm->rb_free);
131 mm_freelist(mm->mmalloc, &mm->rb_allocated);
132
Ben Lindstrom4e3c6312002-06-26 00:29:02 +0000133#ifdef HAVE_MMAP_ANON_SHARED
Ben Lindstrom7a2073c2002-03-22 02:30:41 +0000134 if (munmap(mm->address, mm->size) == -1)
Ben Lindstrom4eeccc72002-06-07 01:57:25 +0000135 fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size,
Ben Lindstrom105ccbe2002-06-06 20:33:06 +0000136 strerror(errno));
Kevin Steves265c9d02002-04-07 22:36:49 +0000137#else
Ben Lindstrom4e3c6312002-06-26 00:29:02 +0000138 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
Ben Lindstrom7d9c38f2002-06-06 21:40:51 +0000139 __func__);
Kevin Steves265c9d02002-04-07 22:36:49 +0000140#endif
Ben Lindstrom7a2073c2002-03-22 02:30:41 +0000141 if (mm->mmalloc == NULL)
142 xfree(mm);
143 else
144 mm_free(mm->mmalloc, mm);
145}
146
147void *
148mm_xmalloc(struct mm_master *mm, size_t size)
149{
150 void *address;
151
152 address = mm_malloc(mm, size);
153 if (address == NULL)
Ben Lindstrom7d9c38f2002-06-06 21:40:51 +0000154 fatal("%s: mm_malloc(%lu)", __func__, (u_long)size);
Ben Lindstrom7a2073c2002-03-22 02:30:41 +0000155 return (address);
156}
157
158
159/* Allocates data from a memory mapped area */
160
161void *
162mm_malloc(struct mm_master *mm, size_t size)
163{
164 struct mm_share *mms, *tmp;
165
166 if (size == 0)
167 fatal("mm_malloc: try to allocate 0 space");
168
169 size = ((size + MM_MINSIZE - 1) / MM_MINSIZE) * MM_MINSIZE;
170
171 RB_FOREACH(mms, mmtree, &mm->rb_free) {
172 if (mms->size >= size)
173 break;
174 }
175
176 if (mms == NULL)
177 return (NULL);
178
179 /* Debug */
180 memset(mms->address, 0xd0, size);
181
182 tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size);
183
184 /* Does not change order in RB tree */
185 mms->size -= size;
186 mms->address = (u_char *)mms->address + size;
187
188 if (mms->size == 0) {
189 RB_REMOVE(mmtree, &mm->rb_free, mms);
190 if (mm->mmalloc == NULL)
191 xfree(mms);
192 else
193 mm_free(mm->mmalloc, mms);
194 }
195
196 return (tmp->address);
197}
198
199/* Frees memory in a memory mapped area */
200
201void
202mm_free(struct mm_master *mm, void *address)
203{
204 struct mm_share *mms, *prev, tmp;
205
206 tmp.address = address;
207 mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp);
208 if (mms == NULL)
209 fatal("mm_free(%p): can not find %p", mm, address);
210
211 /* Debug */
212 memset(mms->address, 0xd0, mms->size);
213
214 /* Remove from allocated list and insert in free list */
215 RB_REMOVE(mmtree, &mm->rb_allocated, mms);
216 if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL)
217 fatal("mm_free(%p): double address %p", mm, address);
218
219 /* Find previous entry */
220 prev = mms;
221 if (RB_LEFT(prev, next)) {
222 prev = RB_LEFT(prev, next);
223 while (RB_RIGHT(prev, next))
224 prev = RB_RIGHT(prev, next);
225 } else {
226 if (RB_PARENT(prev, next) &&
227 (prev == RB_RIGHT(RB_PARENT(prev, next), next)))
228 prev = RB_PARENT(prev, next);
229 else {
230 while (RB_PARENT(prev, next) &&
231 (prev == RB_LEFT(RB_PARENT(prev, next), next)))
232 prev = RB_PARENT(prev, next);
233 prev = RB_PARENT(prev, next);
234 }
235 }
236
237 /* Check if range does not overlap */
238 if (prev != NULL && MM_ADDRESS_END(prev) > address)
Ben Lindstromc8615472002-03-26 03:20:45 +0000239 fatal("mm_free: memory corruption: %p(%lu) > %p",
240 prev->address, (u_long)prev->size, address);
Ben Lindstrom7a2073c2002-03-22 02:30:41 +0000241
242 /* See if we can merge backwards */
243 if (prev != NULL && MM_ADDRESS_END(prev) == address) {
244 prev->size += mms->size;
245 RB_REMOVE(mmtree, &mm->rb_free, mms);
246 if (mm->mmalloc == NULL)
247 xfree(mms);
248 else
249 mm_free(mm->mmalloc, mms);
250 } else
251 prev = mms;
252
253 if (prev == NULL)
254 return;
255
256 /* Check if we can merge forwards */
257 mms = RB_NEXT(mmtree, &mm->rb_free, prev);
258 if (mms == NULL)
259 return;
260
261 if (MM_ADDRESS_END(prev) > mms->address)
Ben Lindstromc8615472002-03-26 03:20:45 +0000262 fatal("mm_free: memory corruption: %p < %p(%lu)",
263 mms->address, prev->address, (u_long)prev->size);
Ben Lindstrom7a2073c2002-03-22 02:30:41 +0000264 if (MM_ADDRESS_END(prev) != mms->address)
265 return;
266
267 prev->size += mms->size;
268 RB_REMOVE(mmtree, &mm->rb_free, mms);
269
270 if (mm->mmalloc == NULL)
271 xfree(mms);
272 else
273 mm_free(mm->mmalloc, mms);
274}
275
276static void
277mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree,
278 struct mm_master *mm, struct mm_master *mmold)
279{
280 struct mm_master *mmalloc = mm->mmalloc;
281 struct mm_share *mms, *new;
282
283 /* Sync free list */
284 RB_FOREACH(mms, mmtree, oldtree) {
285 /* Check the values */
286 mm_memvalid(mmold, mms, sizeof(struct mm_share));
287 mm_memvalid(mm, mms->address, mms->size);
288
289 new = mm_xmalloc(mmalloc, sizeof(struct mm_share));
290 memcpy(new, mms, sizeof(struct mm_share));
291 RB_INSERT(mmtree, newtree, new);
292 }
293}
294
295void
296mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc)
297{
298 struct mm_master *mm;
299 struct mm_master *mmalloc;
300 struct mm_master *mmold;
301 struct mmtree rb_free, rb_allocated;
302
Ben Lindstrom7d9c38f2002-06-06 21:40:51 +0000303 debug3("%s: Share sync", __func__);
Ben Lindstrom7a2073c2002-03-22 02:30:41 +0000304
305 mm = *pmm;
306 mmold = mm->mmalloc;
307 mm_memvalid(mmold, mm, sizeof(*mm));
308
309 mmalloc = mm_create(NULL, mm->size);
310 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
311 memcpy(mm, *pmm, sizeof(struct mm_master));
312 mm->mmalloc = mmalloc;
313
314 rb_free = mm->rb_free;
315 rb_allocated = mm->rb_allocated;
316
317 RB_INIT(&mm->rb_free);
318 RB_INIT(&mm->rb_allocated);
319
320 mm_sync_list(&rb_free, &mm->rb_free, mm, mmold);
321 mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold);
322
323 mm_destroy(mmold);
324
325 *pmm = mm;
326 *pmmalloc = mmalloc;
327
Ben Lindstrom7d9c38f2002-06-06 21:40:51 +0000328 debug3("%s: Share sync end", __func__);
Ben Lindstrom7a2073c2002-03-22 02:30:41 +0000329}
330
331void
332mm_memvalid(struct mm_master *mm, void *address, size_t size)
333{
334 void *end = (u_char *)address + size;
335
336 if (address < mm->address)
337 fatal("mm_memvalid: address too small: %p", address);
338 if (end < address)
339 fatal("mm_memvalid: end < address: %p < %p", end, address);
340 if (end > (void *)((u_char *)mm->address + mm->size))
341 fatal("mm_memvalid: address too large: %p", address);
342}