blob: 7e3e9cf6e7a958c997e8539a3f40a3114dae5480 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- For when the client advises Valgrind about memory ---*/
4/*--- permissions. ---*/
njn25cac76cb2002-09-23 11:21:57 +00005/*--- mc_clientreqs.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +00006/*--------------------------------------------------------------------*/
7
8/*
njnc9539842002-10-02 13:26:35 +00009 This file is part of MemCheck, a heavyweight Valgrind skin for
10 detecting memory errors.
sewardjde4a1d02002-03-22 01:27:54 +000011
12 Copyright (C) 2000-2002 Julian Seward
13 jseward@acm.org
sewardjde4a1d02002-03-22 01:27:54 +000014
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
njn25e49d8e72002-09-23 09:36:25 +000030 The GNU General Public License is contained in the file COPYING.
sewardjde4a1d02002-03-22 01:27:54 +000031*/
32
njn25cac76cb2002-09-23 11:21:57 +000033#include "mc_include.h"
sewardjde4a1d02002-03-22 01:27:54 +000034
njn25cac76cb2002-09-23 11:21:57 +000035#include "memcheck.h" /* for VG_USERREQ__* */
sewardj2e93c502002-04-12 11:12:52 +000036
sewardjde4a1d02002-03-22 01:27:54 +000037
38/*------------------------------------------------------------*/
39/*--- General client block management. ---*/
40/*------------------------------------------------------------*/
41
42/* This is managed as an expanding array of client block descriptors.
43 Indices of live descriptors are issued to the client, so it can ask
44 to free them later. Therefore we cannot slide live entries down
45 over dead ones. Instead we must use free/inuse flags and scan for
46 an empty slot at allocation time. This in turn means allocation is
47 relatively expensive, so we hope this does not happen too often.
48*/
49
50typedef
51 enum { CG_NotInUse, CG_NoAccess, CG_Writable, CG_Readable }
52 CGenBlockKind;
53
54typedef
55 struct {
56 Addr start;
57 UInt size;
58 ExeContext* where;
59 CGenBlockKind kind;
60 }
61 CGenBlock;
62
63/* This subsystem is self-initialising. */
64static UInt vg_cgb_size = 0;
65static UInt vg_cgb_used = 0;
66static CGenBlock* vg_cgbs = NULL;
67
68/* Stats for this subsystem. */
69static UInt vg_cgb_used_MAX = 0; /* Max in use. */
70static UInt vg_cgb_allocs = 0; /* Number of allocs. */
71static UInt vg_cgb_discards = 0; /* Number of discards. */
72static UInt vg_cgb_search = 0; /* Number of searches. */
73
74
75static
76Int vg_alloc_client_block ( void )
77{
78 Int i, sz_new;
79 CGenBlock* cgbs_new;
80
81 vg_cgb_allocs++;
82
83 for (i = 0; i < vg_cgb_used; i++) {
84 vg_cgb_search++;
85 if (vg_cgbs[i].kind == CG_NotInUse)
86 return i;
87 }
88
89 /* Not found. Try to allocate one at the end. */
90 if (vg_cgb_used < vg_cgb_size) {
91 vg_cgb_used++;
92 return vg_cgb_used-1;
93 }
94
95 /* Ok, we have to allocate a new one. */
njne427a662002-10-02 11:08:25 +000096 sk_assert(vg_cgb_used == vg_cgb_size);
sewardjde4a1d02002-03-22 01:27:54 +000097 sz_new = (vg_cgbs == NULL) ? 10 : (2 * vg_cgb_size);
98
njn25e49d8e72002-09-23 09:36:25 +000099 cgbs_new = VG_(malloc)( sz_new * sizeof(CGenBlock) );
sewardjde4a1d02002-03-22 01:27:54 +0000100 for (i = 0; i < vg_cgb_used; i++)
101 cgbs_new[i] = vg_cgbs[i];
102
103 if (vg_cgbs != NULL)
njn25e49d8e72002-09-23 09:36:25 +0000104 VG_(free)( vg_cgbs );
sewardjde4a1d02002-03-22 01:27:54 +0000105 vg_cgbs = cgbs_new;
106
107 vg_cgb_size = sz_new;
108 vg_cgb_used++;
109 if (vg_cgb_used > vg_cgb_used_MAX)
110 vg_cgb_used_MAX = vg_cgb_used;
111 return vg_cgb_used-1;
112}
113
114
115/*------------------------------------------------------------*/
116/*--- Stack block management. ---*/
117/*------------------------------------------------------------*/
118
119/* This is managed as an expanding array of CStackBlocks. They are
120 packed up against the left-hand end of the array, with no holes.
121 They are kept sorted by the start field, with the [0] having the
122 highest value. This means it's pretty cheap to put new blocks at
123 the end, corresponding to stack pushes, since the additions put
124 blocks on in what is presumably fairly close to strictly descending
125 order. If this assumption doesn't hold the performance
126 consequences will be horrible.
127
128 When the client's %ESP jumps back upwards as the result of a RET
129 insn, we shrink the array backwards from the end, in a
130 guaranteed-cheap linear scan.
131*/
132
133typedef
134 struct {
135 Addr start;
136 UInt size;
137 ExeContext* where;
138 }
139 CStackBlock;
140
141/* This subsystem is self-initialising. */
142static UInt vg_csb_size = 0;
143static UInt vg_csb_used = 0;
144static CStackBlock* vg_csbs = NULL;
145
146/* Stats for this subsystem. */
147static UInt vg_csb_used_MAX = 0; /* Max in use. */
148static UInt vg_csb_allocs = 0; /* Number of allocs. */
149static UInt vg_csb_discards = 0; /* Number of discards. */
150static UInt vg_csb_swaps = 0; /* Number of searches. */
151
152static
sewardj8c824512002-04-14 04:16:48 +0000153void vg_add_client_stack_block ( ThreadState* tst, Addr aa, UInt sz )
sewardjde4a1d02002-03-22 01:27:54 +0000154{
155 UInt i, sz_new;
156 CStackBlock* csbs_new;
157 vg_csb_allocs++;
158
159 /* Ensure there is space for a new block. */
160
161 if (vg_csb_used >= vg_csb_size) {
162
163 /* No; we have to expand the array. */
njne427a662002-10-02 11:08:25 +0000164 sk_assert(vg_csb_used == vg_csb_size);
sewardjde4a1d02002-03-22 01:27:54 +0000165
166 sz_new = (vg_csbs == NULL) ? 10 : (2 * vg_csb_size);
167
njn25e49d8e72002-09-23 09:36:25 +0000168 csbs_new = VG_(malloc)( sz_new * sizeof(CStackBlock) );
sewardjde4a1d02002-03-22 01:27:54 +0000169 for (i = 0; i < vg_csb_used; i++)
170 csbs_new[i] = vg_csbs[i];
171
172 if (vg_csbs != NULL)
njn25e49d8e72002-09-23 09:36:25 +0000173 VG_(free)( vg_csbs );
sewardjde4a1d02002-03-22 01:27:54 +0000174 vg_csbs = csbs_new;
175
176 vg_csb_size = sz_new;
177 }
178
179 /* Ok, we can use [vg_csb_used]. */
180 vg_csbs[vg_csb_used].start = aa;
181 vg_csbs[vg_csb_used].size = sz;
sewardj8c824512002-04-14 04:16:48 +0000182 /* Actually running a thread at this point. */
njn25e49d8e72002-09-23 09:36:25 +0000183 vg_csbs[vg_csb_used].where = VG_(get_ExeContext) ( tst );
sewardjde4a1d02002-03-22 01:27:54 +0000184 vg_csb_used++;
185
186 if (vg_csb_used > vg_csb_used_MAX)
187 vg_csb_used_MAX = vg_csb_used;
188
njne427a662002-10-02 11:08:25 +0000189 sk_assert(vg_csb_used <= vg_csb_size);
sewardjde4a1d02002-03-22 01:27:54 +0000190
191 /* VG_(printf)("acsb %p %d\n", aa, sz); */
njn5c004e42002-11-18 11:04:50 +0000192 MC_(make_noaccess) ( aa, sz );
sewardjde4a1d02002-03-22 01:27:54 +0000193
194 /* And make sure that they are in descending order of address. */
195 i = vg_csb_used;
196 while (i > 0 && vg_csbs[i-1].start < vg_csbs[i].start) {
197 CStackBlock tmp = vg_csbs[i-1];
198 vg_csbs[i-1] = vg_csbs[i];
199 vg_csbs[i] = tmp;
200 vg_csb_swaps++;
201 }
202
203# if 1
204 for (i = 1; i < vg_csb_used; i++)
njne427a662002-10-02 11:08:25 +0000205 sk_assert(vg_csbs[i-1].start >= vg_csbs[i].start);
sewardjde4a1d02002-03-22 01:27:54 +0000206# endif
207}
208
209
210/*------------------------------------------------------------*/
211/*--- Externally visible functions. ---*/
212/*------------------------------------------------------------*/
213
njn5c004e42002-11-18 11:04:50 +0000214void MC_(show_client_block_stats) ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000215{
216 VG_(message)(Vg_DebugMsg,
217 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
218 vg_cgb_allocs, vg_cgb_discards, vg_cgb_used_MAX, vg_cgb_search
219 );
220 VG_(message)(Vg_DebugMsg,
221 " stack CBs: %d allocs, %d discards, %d maxinuse, %d swap",
222 vg_csb_allocs, vg_csb_discards, vg_csb_used_MAX, vg_csb_swaps
223 );
224}
225
njn5c004e42002-11-18 11:04:50 +0000226Bool MC_(client_perm_maybe_describe)( Addr a, AddrInfo* ai )
sewardjde4a1d02002-03-22 01:27:54 +0000227{
228 Int i;
229 /* VG_(printf)("try to identify %d\n", a); */
230
231 /* First see if it's a stack block. We do two passes, one exact
232 and one with a bit of slop, so as to try and get the most
233 accurate fix. */
234 for (i = 0; i < vg_csb_used; i++) {
235 if (vg_csbs[i].start <= a
236 && a < vg_csbs[i].start + vg_csbs[i].size) {
237 ai->akind = UserS;
238 ai->blksize = vg_csbs[i].size;
239 ai->rwoffset = (Int)(a) - (Int)(vg_csbs[i].start);
240 ai->lastchange = vg_csbs[i].where;
241 return True;
242 }
243 }
244
245 /* No exact match on the stack. Re-do the stack scan with a bit of
246 slop. */
247 for (i = 0; i < vg_csb_used; i++) {
248 if (vg_csbs[i].start - 8 <= a
249 && a < vg_csbs[i].start + vg_csbs[i].size + 8) {
250 ai->akind = UserS;
251 ai->blksize = vg_csbs[i].size;
252 ai->rwoffset = (Int)(a) - (Int)(vg_csbs[i].start);
253 ai->lastchange = vg_csbs[i].where;
254 return True;
255 }
256 }
257
258 /* No match on the stack. Perhaps it's a general block ? */
259 for (i = 0; i < vg_cgb_used; i++) {
260 if (vg_cgbs[i].kind == CG_NotInUse)
261 continue;
njn25e49d8e72002-09-23 09:36:25 +0000262 if (VG_(addr_is_in_block)(a, vg_cgbs[i].start, vg_cgbs[i].size)) {
sewardjde4a1d02002-03-22 01:27:54 +0000263 ai->akind = UserG;
264 ai->blksize = vg_cgbs[i].size;
265 ai->rwoffset = (Int)(a) - (Int)(vg_cgbs[i].start);
266 ai->lastchange = vg_cgbs[i].where;
267 return True;
268 }
269 }
270 return False;
271}
272
njn5c004e42002-11-18 11:04:50 +0000273static __attribute__ ((unused))
274void delete_client_stack_blocks_following_ESP_change ( void )
sewardjde4a1d02002-03-22 01:27:54 +0000275{
njn25e49d8e72002-09-23 09:36:25 +0000276 Addr newESP = VG_(get_stack_pointer)();
277
sewardjc7529c32002-04-16 01:55:18 +0000278 while (vg_csb_used > 0
279 && vg_csbs[vg_csb_used-1].start + vg_csbs[vg_csb_used-1].size
280 <= newESP) {
sewardjde4a1d02002-03-22 01:27:54 +0000281 vg_csb_used--;
282 vg_csb_discards++;
283 if (VG_(clo_verbosity) > 2)
284 VG_(printf)("discarding stack block %p for %d\n",
sewardj9a199dc2002-04-14 13:01:38 +0000285 (void*)vg_csbs[vg_csb_used].start,
286 vg_csbs[vg_csb_used].size);
sewardjde4a1d02002-03-22 01:27:54 +0000287 }
288}
289
290
sewardj34042512002-10-22 04:14:35 +0000291Bool SK_(handle_client_request) ( ThreadState* tst, UInt* arg_block, UInt *ret )
sewardjde4a1d02002-03-22 01:27:54 +0000292{
sewardj2e93c502002-04-12 11:12:52 +0000293 Int i;
294 Bool ok;
295 Addr bad_addr;
296 UInt* arg = arg_block;
sewardjde4a1d02002-03-22 01:27:54 +0000297
sewardj34042512002-10-22 04:14:35 +0000298 if (!VG_IS_SKIN_USERREQ('M','C',arg[0]))
299 return False;
300
sewardj2e93c502002-04-12 11:12:52 +0000301 switch (arg[0]) {
njn25e49d8e72002-09-23 09:36:25 +0000302 case VG_USERREQ__CHECK_WRITABLE: /* check writable */
njn5c004e42002-11-18 11:04:50 +0000303 ok = MC_(check_writable) ( arg[1], arg[2], &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000304 if (!ok)
njn5c004e42002-11-18 11:04:50 +0000305 MC_(record_user_error) ( tst, bad_addr, True );
sewardj34042512002-10-22 04:14:35 +0000306 *ret = ok ? (UInt)NULL : bad_addr;
307 break;
njn25e49d8e72002-09-23 09:36:25 +0000308
309 case VG_USERREQ__CHECK_READABLE: /* check readable */
njn5c004e42002-11-18 11:04:50 +0000310 ok = MC_(check_readable) ( arg[1], arg[2], &bad_addr );
njn25e49d8e72002-09-23 09:36:25 +0000311 if (!ok)
njn5c004e42002-11-18 11:04:50 +0000312 MC_(record_user_error) ( tst, bad_addr, False );
sewardj34042512002-10-22 04:14:35 +0000313 *ret = ok ? (UInt)NULL : bad_addr;
314 break;
njn25e49d8e72002-09-23 09:36:25 +0000315
316 case VG_USERREQ__DO_LEAK_CHECK:
njn5c004e42002-11-18 11:04:50 +0000317 MC_(detect_memory_leaks)();
sewardj34042512002-10-22 04:14:35 +0000318 *ret = 0; /* return value is meaningless */
319 break;
njn25e49d8e72002-09-23 09:36:25 +0000320
sewardj2e93c502002-04-12 11:12:52 +0000321 case VG_USERREQ__MAKE_NOACCESS: /* make no access */
sewardjde4a1d02002-03-22 01:27:54 +0000322 i = vg_alloc_client_block();
323 /* VG_(printf)("allocated %d %p\n", i, vg_cgbs); */
324 vg_cgbs[i].kind = CG_NoAccess;
sewardj2e93c502002-04-12 11:12:52 +0000325 vg_cgbs[i].start = arg[1];
326 vg_cgbs[i].size = arg[2];
njn25e49d8e72002-09-23 09:36:25 +0000327 vg_cgbs[i].where = VG_(get_ExeContext) ( tst );
njn5c004e42002-11-18 11:04:50 +0000328 MC_(make_noaccess) ( arg[1], arg[2] );
sewardj34042512002-10-22 04:14:35 +0000329 *ret = i;
330 break;
njn25e49d8e72002-09-23 09:36:25 +0000331
sewardj2e93c502002-04-12 11:12:52 +0000332 case VG_USERREQ__MAKE_WRITABLE: /* make writable */
sewardjde4a1d02002-03-22 01:27:54 +0000333 i = vg_alloc_client_block();
334 vg_cgbs[i].kind = CG_Writable;
sewardj2e93c502002-04-12 11:12:52 +0000335 vg_cgbs[i].start = arg[1];
336 vg_cgbs[i].size = arg[2];
njn25e49d8e72002-09-23 09:36:25 +0000337 vg_cgbs[i].where = VG_(get_ExeContext) ( tst );
njn5c004e42002-11-18 11:04:50 +0000338 MC_(make_writable) ( arg[1], arg[2] );
sewardj34042512002-10-22 04:14:35 +0000339 *ret = i;
340 break;
njn25e49d8e72002-09-23 09:36:25 +0000341
sewardj2e93c502002-04-12 11:12:52 +0000342 case VG_USERREQ__MAKE_READABLE: /* make readable */
sewardjde4a1d02002-03-22 01:27:54 +0000343 i = vg_alloc_client_block();
344 vg_cgbs[i].kind = CG_Readable;
sewardj2e93c502002-04-12 11:12:52 +0000345 vg_cgbs[i].start = arg[1];
346 vg_cgbs[i].size = arg[2];
njn25e49d8e72002-09-23 09:36:25 +0000347 vg_cgbs[i].where = VG_(get_ExeContext) ( tst );
njn5c004e42002-11-18 11:04:50 +0000348 MC_(make_readable) ( arg[1], arg[2] );
sewardj34042512002-10-22 04:14:35 +0000349 *ret = i;
350 break;
351
sewardj2e93c502002-04-12 11:12:52 +0000352 case VG_USERREQ__DISCARD: /* discard */
sewardjde4a1d02002-03-22 01:27:54 +0000353 if (vg_cgbs == NULL
sewardj2e93c502002-04-12 11:12:52 +0000354 || arg[2] >= vg_cgb_used || vg_cgbs[arg[2]].kind == CG_NotInUse)
sewardjde4a1d02002-03-22 01:27:54 +0000355 return 1;
njne427a662002-10-02 11:08:25 +0000356 sk_assert(arg[2] >= 0 && arg[2] < vg_cgb_used);
sewardj2e93c502002-04-12 11:12:52 +0000357 vg_cgbs[arg[2]].kind = CG_NotInUse;
sewardjde4a1d02002-03-22 01:27:54 +0000358 vg_cgb_discards++;
sewardj34042512002-10-22 04:14:35 +0000359 *ret = 0;
360 break;
sewardjde4a1d02002-03-22 01:27:54 +0000361
sewardj2e93c502002-04-12 11:12:52 +0000362 case VG_USERREQ__MAKE_NOACCESS_STACK: /* make noaccess stack block */
sewardj8c824512002-04-14 04:16:48 +0000363 vg_add_client_stack_block ( tst, arg[1], arg[2] );
sewardj34042512002-10-22 04:14:35 +0000364 *ret = 0;
365 break;
sewardjde4a1d02002-03-22 01:27:54 +0000366
367 default:
368 VG_(message)(Vg_UserMsg,
njn25e49d8e72002-09-23 09:36:25 +0000369 "Warning: unknown memcheck client request code %d",
370 arg[0]);
sewardj34042512002-10-22 04:14:35 +0000371 return False;
sewardjde4a1d02002-03-22 01:27:54 +0000372 }
sewardj34042512002-10-22 04:14:35 +0000373 return True;
sewardjde4a1d02002-03-22 01:27:54 +0000374}
375
376
377/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +0000378/*--- end mc_clientreqs.c ---*/
sewardjde4a1d02002-03-22 01:27:54 +0000379/*--------------------------------------------------------------------*/