blob: 5f19e4b9c28b262f3366068bca307eda979eb432 [file] [log] [blame]
sewardjde4a1d02002-03-22 01:27:54 +00001
2/*--------------------------------------------------------------------*/
3/*--- For when the client advises Valgrind about permissions. ---*/
4/*--- vg_clientperms.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Valgrind, an x86 protected-mode emulator
9 designed for debugging and profiling binaries on x86-Unixes.
10
11 Copyright (C) 2000-2002 Julian Seward
12 jseward@acm.org
13 Julian_Seward@muraroa.demon.co.uk
14
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 02111-1307, USA.
29
30 The GNU General Public License is contained in the file LICENSE.
31*/
32
33#include "vg_include.h"
34#include "vg_constants.h"
35
36
37/*------------------------------------------------------------*/
38/*--- General client block management. ---*/
39/*------------------------------------------------------------*/
40
41/* This is managed as an expanding array of client block descriptors.
42 Indices of live descriptors are issued to the client, so it can ask
43 to free them later. Therefore we cannot slide live entries down
44 over dead ones. Instead we must use free/inuse flags and scan for
45 an empty slot at allocation time. This in turn means allocation is
46 relatively expensive, so we hope this does not happen too often.
47*/
48
49typedef
50 enum { CG_NotInUse, CG_NoAccess, CG_Writable, CG_Readable }
51 CGenBlockKind;
52
53typedef
54 struct {
55 Addr start;
56 UInt size;
57 ExeContext* where;
58 CGenBlockKind kind;
59 }
60 CGenBlock;
61
62/* This subsystem is self-initialising. */
63static UInt vg_cgb_size = 0;
64static UInt vg_cgb_used = 0;
65static CGenBlock* vg_cgbs = NULL;
66
67/* Stats for this subsystem. */
68static UInt vg_cgb_used_MAX = 0; /* Max in use. */
69static UInt vg_cgb_allocs = 0; /* Number of allocs. */
70static UInt vg_cgb_discards = 0; /* Number of discards. */
71static UInt vg_cgb_search = 0; /* Number of searches. */
72
73
74static
75Int vg_alloc_client_block ( void )
76{
77 Int i, sz_new;
78 CGenBlock* cgbs_new;
79
80 vg_cgb_allocs++;
81
82 for (i = 0; i < vg_cgb_used; i++) {
83 vg_cgb_search++;
84 if (vg_cgbs[i].kind == CG_NotInUse)
85 return i;
86 }
87
88 /* Not found. Try to allocate one at the end. */
89 if (vg_cgb_used < vg_cgb_size) {
90 vg_cgb_used++;
91 return vg_cgb_used-1;
92 }
93
94 /* Ok, we have to allocate a new one. */
95 vg_assert(vg_cgb_used == vg_cgb_size);
96 sz_new = (vg_cgbs == NULL) ? 10 : (2 * vg_cgb_size);
97
98 cgbs_new = VG_(malloc)( VG_AR_PRIVATE, sz_new * sizeof(CGenBlock) );
99 for (i = 0; i < vg_cgb_used; i++)
100 cgbs_new[i] = vg_cgbs[i];
101
102 if (vg_cgbs != NULL)
103 VG_(free)( VG_AR_PRIVATE, vg_cgbs );
104 vg_cgbs = cgbs_new;
105
106 vg_cgb_size = sz_new;
107 vg_cgb_used++;
108 if (vg_cgb_used > vg_cgb_used_MAX)
109 vg_cgb_used_MAX = vg_cgb_used;
110 return vg_cgb_used-1;
111}
112
113
114/*------------------------------------------------------------*/
115/*--- Stack block management. ---*/
116/*------------------------------------------------------------*/
117
118/* This is managed as an expanding array of CStackBlocks. They are
119 packed up against the left-hand end of the array, with no holes.
120 They are kept sorted by the start field, with the [0] having the
121 highest value. This means it's pretty cheap to put new blocks at
122 the end, corresponding to stack pushes, since the additions put
123 blocks on in what is presumably fairly close to strictly descending
124 order. If this assumption doesn't hold the performance
125 consequences will be horrible.
126
127 When the client's %ESP jumps back upwards as the result of a RET
128 insn, we shrink the array backwards from the end, in a
129 guaranteed-cheap linear scan.
130*/
131
132typedef
133 struct {
134 Addr start;
135 UInt size;
136 ExeContext* where;
137 }
138 CStackBlock;
139
140/* This subsystem is self-initialising. */
141static UInt vg_csb_size = 0;
142static UInt vg_csb_used = 0;
143static CStackBlock* vg_csbs = NULL;
144
145/* Stats for this subsystem. */
146static UInt vg_csb_used_MAX = 0; /* Max in use. */
147static UInt vg_csb_allocs = 0; /* Number of allocs. */
148static UInt vg_csb_discards = 0; /* Number of discards. */
149static UInt vg_csb_swaps = 0; /* Number of searches. */
150
151static
152void vg_add_client_stack_block ( Addr aa, UInt sz )
153{
154 UInt i, sz_new;
155 CStackBlock* csbs_new;
156 vg_csb_allocs++;
157
158 /* Ensure there is space for a new block. */
159
160 if (vg_csb_used >= vg_csb_size) {
161
162 /* No; we have to expand the array. */
163 vg_assert(vg_csb_used == vg_csb_size);
164
165 sz_new = (vg_csbs == NULL) ? 10 : (2 * vg_csb_size);
166
167 csbs_new = VG_(malloc)( VG_AR_PRIVATE, sz_new * sizeof(CStackBlock) );
168 for (i = 0; i < vg_csb_used; i++)
169 csbs_new[i] = vg_csbs[i];
170
171 if (vg_csbs != NULL)
172 VG_(free)( VG_AR_PRIVATE, vg_csbs );
173 vg_csbs = csbs_new;
174
175 vg_csb_size = sz_new;
176 }
177
178 /* Ok, we can use [vg_csb_used]. */
179 vg_csbs[vg_csb_used].start = aa;
180 vg_csbs[vg_csb_used].size = sz;
181 vg_csbs[vg_csb_used].where = VG_(get_ExeContext) ( False );
182 vg_csb_used++;
183
184 if (vg_csb_used > vg_csb_used_MAX)
185 vg_csb_used_MAX = vg_csb_used;
186
187 vg_assert(vg_csb_used <= vg_csb_size);
188
189 /* VG_(printf)("acsb %p %d\n", aa, sz); */
190 VGM_(make_noaccess) ( aa, sz );
191
192 /* And make sure that they are in descending order of address. */
193 i = vg_csb_used;
194 while (i > 0 && vg_csbs[i-1].start < vg_csbs[i].start) {
195 CStackBlock tmp = vg_csbs[i-1];
196 vg_csbs[i-1] = vg_csbs[i];
197 vg_csbs[i] = tmp;
198 vg_csb_swaps++;
199 }
200
201# if 1
202 for (i = 1; i < vg_csb_used; i++)
203 vg_assert(vg_csbs[i-1].start >= vg_csbs[i].start);
204# endif
205}
206
207
208/*------------------------------------------------------------*/
209/*--- Externally visible functions. ---*/
210/*------------------------------------------------------------*/
211
212void VG_(show_client_block_stats) ( void )
213{
214 VG_(message)(Vg_DebugMsg,
215 "general CBs: %d allocs, %d discards, %d maxinuse, %d search",
216 vg_cgb_allocs, vg_cgb_discards, vg_cgb_used_MAX, vg_cgb_search
217 );
218 VG_(message)(Vg_DebugMsg,
219 " stack CBs: %d allocs, %d discards, %d maxinuse, %d swap",
220 vg_csb_allocs, vg_csb_discards, vg_csb_used_MAX, vg_csb_swaps
221 );
222}
223
224
225Bool VG_(client_perm_maybe_describe)( Addr a, AddrInfo* ai )
226{
227 Int i;
228 /* VG_(printf)("try to identify %d\n", a); */
229
230 /* First see if it's a stack block. We do two passes, one exact
231 and one with a bit of slop, so as to try and get the most
232 accurate fix. */
233 for (i = 0; i < vg_csb_used; i++) {
234 if (vg_csbs[i].start <= a
235 && a < vg_csbs[i].start + vg_csbs[i].size) {
236 ai->akind = UserS;
237 ai->blksize = vg_csbs[i].size;
238 ai->rwoffset = (Int)(a) - (Int)(vg_csbs[i].start);
239 ai->lastchange = vg_csbs[i].where;
240 return True;
241 }
242 }
243
244 /* No exact match on the stack. Re-do the stack scan with a bit of
245 slop. */
246 for (i = 0; i < vg_csb_used; i++) {
247 if (vg_csbs[i].start - 8 <= a
248 && a < vg_csbs[i].start + vg_csbs[i].size + 8) {
249 ai->akind = UserS;
250 ai->blksize = vg_csbs[i].size;
251 ai->rwoffset = (Int)(a) - (Int)(vg_csbs[i].start);
252 ai->lastchange = vg_csbs[i].where;
253 return True;
254 }
255 }
256
257 /* No match on the stack. Perhaps it's a general block ? */
258 for (i = 0; i < vg_cgb_used; i++) {
259 if (vg_cgbs[i].kind == CG_NotInUse)
260 continue;
261 if (vg_cgbs[i].start - VG_AR_CLIENT_REDZONE_SZB <= a
262 && a < vg_cgbs[i].start + vg_cgbs[i].size + VG_AR_CLIENT_REDZONE_SZB) {
263 ai->akind = UserG;
264 ai->blksize = vg_cgbs[i].size;
265 ai->rwoffset = (Int)(a) - (Int)(vg_cgbs[i].start);
266 ai->lastchange = vg_cgbs[i].where;
267 return True;
268 }
269 }
270 return False;
271}
272
273
274void VG_(delete_client_stack_blocks_following_ESP_change) ( void )
275{
276 Addr newESP;
277 if (!VG_(clo_client_perms)) return;
278 newESP = VG_(baseBlock)[VGOFF_(m_esp)];
279 while (vg_csb_used > 0 &&
280 vg_csbs[vg_csb_used-1].start + vg_csbs[vg_csb_used-1].size <= newESP) {
281 vg_csb_used--;
282 vg_csb_discards++;
283 if (VG_(clo_verbosity) > 2)
284 VG_(printf)("discarding stack block %p for %d\n",
285 vg_csbs[vg_csb_used].start, vg_csbs[vg_csb_used].size);
286 }
287}
288
289
290UInt VG_(handle_client_request) ( UInt code, Addr aa, UInt nn )
291{
292 Int i;
293 Bool ok;
294 Addr bad_addr;
295
296 if (VG_(clo_verbosity) > 2)
297 VG_(printf)("client request: code %d, addr %p, len %d\n",
298 code, aa, nn );
299
300 vg_assert(VG_(clo_client_perms));
301 vg_assert(VG_(clo_instrument));
302
303 switch (code) {
304 case 1001: /* make no access */
305 i = vg_alloc_client_block();
306 /* VG_(printf)("allocated %d %p\n", i, vg_cgbs); */
307 vg_cgbs[i].kind = CG_NoAccess;
308 vg_cgbs[i].start = aa;
309 vg_cgbs[i].size = nn;
310 vg_cgbs[i].where = VG_(get_ExeContext) ( False );
311 VGM_(make_noaccess) ( aa, nn );
312 return i;
313 case 1002: /* make writable */
314 i = vg_alloc_client_block();
315 vg_cgbs[i].kind = CG_Writable;
316 vg_cgbs[i].start = aa;
317 vg_cgbs[i].size = nn;
318 vg_cgbs[i].where = VG_(get_ExeContext) ( False );
319 VGM_(make_writable) ( aa, nn );
320 return i;
321 case 1003: /* make readable */
322 i = vg_alloc_client_block();
323 vg_cgbs[i].kind = CG_Readable;
324 vg_cgbs[i].start = aa;
325 vg_cgbs[i].size = nn;
326 vg_cgbs[i].where = VG_(get_ExeContext) ( False );
327 VGM_(make_readable) ( aa, nn );
328 return i;
329
330 case 2002: /* check writable */
331 ok = VGM_(check_writable) ( aa, nn, &bad_addr );
332 if (!ok)
333 VG_(record_user_err) ( bad_addr, True );
334 return ok ? (UInt)NULL : bad_addr;
335 case 2003: /* check readable */
336 ok = VGM_(check_readable) ( aa, nn, &bad_addr );
337 if (!ok)
338 VG_(record_user_err) ( bad_addr, False );
339 return ok ? (UInt)NULL : bad_addr;
340
341 case 2004: /* discard */
342 if (vg_cgbs == NULL
343 || nn >= vg_cgb_used || vg_cgbs[nn].kind == CG_NotInUse)
344 return 1;
345 vg_assert(nn >= 0 && nn < vg_cgb_used);
346 vg_cgbs[nn].kind = CG_NotInUse;
347 vg_cgb_discards++;
348 return 0;
349
350 case 3001: /* make noaccess stack block */
351 vg_add_client_stack_block ( aa, nn );
352 return 0;
353
354 default:
355 VG_(message)(Vg_UserMsg,
356 "Warning: unknown client request code %d", code);
357 return 1;
358 }
359}
360
361
362/*--------------------------------------------------------------------*/
363/*--- end vg_clientperms.c ---*/
364/*--------------------------------------------------------------------*/