blob: dda72b59bd883c17fe0784b2971e4e887e289a61 [file] [log] [blame]
Alexey Samsonov9edf7502012-06-06 06:47:26 +00001//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries.
Alexey Samsonov230c3be2012-06-06 09:26:25 +000012// It declares common functions and classes that are used in both runtimes.
Alexey Samsonov9edf7502012-06-06 06:47:26 +000013// Implementation of some functions are provided in sanitizer_common, while
14// others must be defined by run-time library itself.
15//===----------------------------------------------------------------------===//
16#ifndef SANITIZER_COMMON_H
17#define SANITIZER_COMMON_H
18
19#include "sanitizer_internal_defs.h"
Kostya Serebryanyb5f95212013-02-26 13:30:27 +000020#include "sanitizer_libc.h"
Alexey Samsonov7ed46ff2013-04-05 07:30:29 +000021#include "sanitizer_mutex.h"
Alexey Samsonov9edf7502012-06-06 06:47:26 +000022
23namespace __sanitizer {
Kostya Serebryany2673fd82013-02-06 12:36:49 +000024struct StackTrace;
Alexey Samsonov9edf7502012-06-06 06:47:26 +000025
Alexey Samsonov230c3be2012-06-06 09:26:25 +000026// Constants.
Kostya Serebryany5af39e52012-11-21 12:38:58 +000027const uptr kWordSize = SANITIZER_WORDSIZE / 8;
Alexey Samsonov230c3be2012-06-06 09:26:25 +000028const uptr kWordSizeInBits = 8 * kWordSize;
Kostya Serebryanye89f1842012-11-24 05:03:11 +000029
Kostya Serebryanyd7d46502012-11-20 07:00:42 +000030#if defined(__powerpc__) || defined(__powerpc64__)
Kostya Serebryanyd7d46502012-11-20 07:00:42 +000031const uptr kCacheLineSize = 128;
Alexey Samsonovdd3a9112012-06-15 07:29:14 +000032#else
Kostya Serebryanyd7d46502012-11-20 07:00:42 +000033const uptr kCacheLineSize = 64;
Alexey Samsonovdd3a9112012-06-15 07:29:14 +000034#endif
Alexey Samsonov230c3be2012-06-06 09:26:25 +000035
Alexey Samsonovd64bcf42013-06-11 08:13:36 +000036const uptr kMaxPathLength = 512;
37
Kostya Serebryany859778a2013-01-31 14:11:21 +000038extern const char *SanitizerToolName; // Can be changed by the tool.
39
Kostya Serebryanyf67ec2b2012-11-23 15:38:49 +000040uptr GetPageSize();
41uptr GetPageSizeCached();
42uptr GetMmapGranularity();
Timur Iskhodzhanovbb7f2d82013-07-16 09:47:39 +000043uptr GetMaxVirtualAddress();
Alexey Samsonove5931fd2012-06-07 07:13:46 +000044// Threads
Dmitry Vyukove0023f72012-10-02 12:58:14 +000045uptr GetTid();
Alexey Samsonovfa3daaf2012-06-15 06:37:34 +000046uptr GetThreadSelf();
Alexey Samsonoved996f72012-06-07 07:32:00 +000047void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
Alexey Samsonove5931fd2012-06-07 07:13:46 +000048 uptr *stack_bottom);
Sergey Matveev24323de2013-05-07 14:41:43 +000049void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
50 uptr *tls_addr, uptr *tls_size);
Alexey Samsonova25b3462012-06-06 16:15:07 +000051
52// Memory management
53void *MmapOrDie(uptr size, const char *mem_type);
Alexey Samsonov230c3be2012-06-06 09:26:25 +000054void UnmapOrDie(void *addr, uptr size);
Alexey Samsonovf607fc12012-06-14 14:42:58 +000055void *MmapFixedNoReserve(uptr fixed_addr, uptr size);
Kostya Serebryany9bfe78f2012-12-13 05:36:00 +000056void *MmapFixedOrDie(uptr fixed_addr, uptr size);
Alexey Samsonovf607fc12012-06-14 14:42:58 +000057void *Mprotect(uptr fixed_addr, uptr size);
Kostya Serebryanycc752592012-12-06 06:10:31 +000058// Map aligned chunk of address space; size and alignment are powers of two.
59void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
Alexey Samsonovdd3a9112012-06-15 07:29:14 +000060// Used to check if we can map shadow memory to a fixed location.
61bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
Kostya Serebryanya54aec82012-12-27 07:37:24 +000062void FlushUnneededShadowMemory(uptr addr, uptr size);
Alexey Samsonovf607fc12012-06-14 14:42:58 +000063
Alexey Samsonov6611abe2012-08-21 08:13:37 +000064// InternalScopedBuffer can be used instead of large stack arrays to
65// keep frame size low.
Kostya Serebryany4fa111c2012-08-29 08:40:36 +000066// FIXME: use InternalAlloc instead of MmapOrDie once
67// InternalAlloc is made libc-free.
Alexey Samsonov6611abe2012-08-21 08:13:37 +000068template<typename T>
69class InternalScopedBuffer {
70 public:
71 explicit InternalScopedBuffer(uptr cnt) {
72 cnt_ = cnt;
Kostya Serebryany4fa111c2012-08-29 08:40:36 +000073 ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
Alexey Samsonov6611abe2012-08-21 08:13:37 +000074 }
75 ~InternalScopedBuffer() {
Kostya Serebryany4fa111c2012-08-29 08:40:36 +000076 UnmapOrDie(ptr_, cnt_ * sizeof(T));
Alexey Samsonov6611abe2012-08-21 08:13:37 +000077 }
78 T &operator[](uptr i) { return ptr_[i]; }
79 T *data() { return ptr_; }
80 uptr size() { return cnt_ * sizeof(T); }
81
82 private:
83 T *ptr_;
84 uptr cnt_;
85 // Disallow evil constructors.
86 InternalScopedBuffer(const InternalScopedBuffer&);
87 void operator=(const InternalScopedBuffer&);
88};
89
Alexey Samsonovd883c802012-08-27 14:51:36 +000090// Simple low-level (mmap-based) allocator for internal use. Doesn't have
91// constructor, so all instances of LowLevelAllocator should be
92// linker initialized.
Alexey Samsonov70e177e2012-08-27 09:30:58 +000093class LowLevelAllocator {
94 public:
Alexey Samsonovd883c802012-08-27 14:51:36 +000095 // Requires an external lock.
Alexey Samsonov70e177e2012-08-27 09:30:58 +000096 void *Allocate(uptr size);
97 private:
98 char *allocated_end_;
99 char *allocated_current_;
100};
101typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
102// Allows to register tool-specific callbacks for LowLevelAllocator.
103// Passing NULL removes the callback.
104void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
105
Alexey Samsonovf607fc12012-06-14 14:42:58 +0000106// IO
Alexey Samsonove5931fd2012-06-07 07:13:46 +0000107void RawWrite(const char *buffer);
Alexey Samsonov84d57b42012-11-02 15:18:34 +0000108bool PrintsToTty();
Sergey Matveev650c7d42013-09-03 13:31:03 +0000109// Caching version of PrintsToTty(). Not thread-safe.
110bool PrintsToTtyCached();
Alexey Samsonov7fdcdf52012-06-06 13:58:39 +0000111void Printf(const char *format, ...);
Alexey Samsonov7fdcdf52012-06-06 13:58:39 +0000112void Report(const char *format, ...);
Kostya Serebryany283c2962012-08-28 11:34:40 +0000113void SetPrintfAndReportCallback(void (*callback)(const char *));
Alexey Samsonov7ed46ff2013-04-05 07:30:29 +0000114// Can be used to prevent mixing error reports from different sanitizers.
115extern StaticSpinMutex CommonSanitizerReportMutex;
Peter Collingbourne0c547de2013-05-17 16:17:19 +0000116void MaybeOpenReportFile();
117extern fd_t report_fd;
Reid Kleckner923bac72013-09-05 03:19:57 +0000118extern bool log_to_file;
119extern char report_path_prefix[4096];
120extern uptr report_fd_pid;
Alexey Samsonove9541012012-06-06 13:11:29 +0000121
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000122uptr OpenFile(const char *filename, bool write);
Alexey Samsonovcffe2f52012-06-07 05:38:26 +0000123// Opens the file 'file_name" and reads up to 'max_len' bytes.
124// The resulting buffer is mmaped and stored in '*buff'.
125// The size of the mmaped region is stored in '*buff_size',
126// Returns the number of read bytes or 0 if file can not be opened.
127uptr ReadFileToBuffer(const char *file_name, char **buff,
128 uptr *buff_size, uptr max_len);
Alexey Samsonova68633f2012-07-03 08:24:14 +0000129// Maps given file to virtual memory, and returns pointer to it
130// (or NULL if the mapping failes). Stores the size of mmaped region
131// in '*buff_size'.
132void *MapFileToMemory(const char *file_name, uptr *buff_size);
133
Alexey Samsonov90b0f1e2013-10-04 08:55:03 +0000134// Error report formatting.
135const char *StripPathPrefix(const char *filepath,
136 const char *strip_file_prefix);
137void PrintSourceLocation(const char *file, int line, int column);
138void PrintModuleAndOffset(const char *module, uptr offset);
139
140
Alexey Samsonovd7e5bb42012-09-17 09:12:39 +0000141// OS
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000142void DisableCoreDumper();
143void DumpProcessMap();
Alexey Samsonov93b4caf2012-11-09 14:45:30 +0000144bool FileExists(const char *filename);
Alexey Samsonovd7e5bb42012-09-17 09:12:39 +0000145const char *GetEnv(const char *name);
Alexey Samsonovff7c14f2013-04-23 12:49:12 +0000146bool SetEnv(const char *name, const char *value);
Alexey Samsonovd7e5bb42012-09-17 09:12:39 +0000147const char *GetPwd();
Alexey Samsonov1dcd1d92013-09-03 13:20:48 +0000148char *FindPathToBinary(const char *name);
Alexey Samsonov0fa691b2013-02-18 07:17:12 +0000149u32 GetUid();
Alexey Samsonovd7e5bb42012-09-17 09:12:39 +0000150void ReExec();
151bool StackSizeIsUnlimited();
152void SetStackSizeLimitInBytes(uptr limit);
Alexander Potapenko25742572012-12-10 13:10:40 +0000153void PrepareForSandboxing();
Alexey Samsonovd7e5bb42012-09-17 09:12:39 +0000154
Evgeniy Stepanovb114ed82013-03-13 08:19:53 +0000155void InitTlsSize();
156uptr GetTlsSize();
157
Alexey Samsonovd7e5bb42012-09-17 09:12:39 +0000158// Other
Alexey Samsonovfa3daaf2012-06-15 06:37:34 +0000159void SleepForSeconds(int seconds);
Alexey Samsonov0969bcf2012-06-18 08:44:30 +0000160void SleepForMillis(int millis);
Dmitry Vyukov4bebe7b2013-03-21 06:24:31 +0000161u64 NanoTime();
Alexey Samsonovfa3daaf2012-06-15 06:37:34 +0000162int Atexit(void (*function)(void));
Alexey Samsonov4c496662012-06-15 07:00:31 +0000163void SortArray(uptr *array, uptr size);
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000164
Alexey Samsonov591616d2012-09-11 09:44:48 +0000165// Exit
166void NORETURN Abort();
Alexey Samsonov591616d2012-09-11 09:44:48 +0000167void NORETURN Die();
Timur Iskhodzhanov3c80c6c2013-08-13 11:42:45 +0000168void NORETURN
Alexey Samsonov591616d2012-09-11 09:44:48 +0000169CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
170
Kostya Serebryanydddb18b2012-12-07 11:27:24 +0000171// Set the name of the current thread to 'name', return true on succees.
172// The name may be truncated to a system-dependent limit.
173bool SanitizerSetThreadName(const char *name);
174// Get the name of the current thread (no more than max_len bytes),
175// return true on succees. name should have space for at least max_len+1 bytes.
176bool SanitizerGetThreadName(char *name, int max_len);
177
Alexey Samsonov591616d2012-09-11 09:44:48 +0000178// Specific tools may override behavior of "Die" and "CheckFailed" functions
179// to do tool-specific job.
Sergey Matveev90629fb2013-08-26 13:20:31 +0000180typedef void (*DieCallbackType)(void);
181void SetDieCallback(DieCallbackType);
182DieCallbackType GetDieCallback();
Alexey Samsonov591616d2012-09-11 09:44:48 +0000183typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
184 u64, u64);
185void SetCheckFailedCallback(CheckFailedCallbackType callback);
186
Alexey Samsonov2fb08722013-11-01 17:02:14 +0000187// We don't want a summary too long.
188const int kMaxSummaryLength = 1024;
189// Construct a one-line string:
190// SUMMARY: SanitizerToolName: error_message
191// and pass it to __sanitizer_report_error_summary.
192void ReportErrorSummary(const char *error_message);
193// Same as above, but construct error_message as:
194// error_type: file:line function
Kostya Serebryany2673fd82013-02-06 12:36:49 +0000195void ReportErrorSummary(const char *error_type, const char *file,
196 int line, const char *function);
Alexey Samsonov2fb08722013-11-01 17:02:14 +0000197void ReportErrorSummary(const char *error_type, StackTrace *trace);
Kostya Serebryany2673fd82013-02-06 12:36:49 +0000198
Alexey Samsonov4c496662012-06-15 07:00:31 +0000199// Math
Dmitry Vyukovc5288672013-06-10 10:02:02 +0000200#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
Timur Iskhodzhanov400a9462013-02-08 11:45:04 +0000201extern "C" {
202unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
203unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
204#if defined(_WIN64)
205unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
206unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
207#endif
208}
209#endif
210
211INLINE uptr MostSignificantSetBitIndex(uptr x) {
Kostya Serebryanyf155fcc2013-02-26 12:59:06 +0000212 CHECK_NE(x, 0U);
Timur Iskhodzhanov400a9462013-02-08 11:45:04 +0000213 unsigned long up; // NOLINT
Dmitry Vyukovc5288672013-06-10 10:02:02 +0000214#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
Timur Iskhodzhanov400a9462013-02-08 11:45:04 +0000215 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
216#elif defined(_WIN64)
217 _BitScanReverse64(&up, x);
218#else
219 _BitScanReverse(&up, x);
220#endif
221 return up;
222}
223
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000224INLINE bool IsPowerOfTwo(uptr x) {
Alexey Samsonov230c3be2012-06-06 09:26:25 +0000225 return (x & (x - 1)) == 0;
226}
Timur Iskhodzhanov400a9462013-02-08 11:45:04 +0000227
228INLINE uptr RoundUpToPowerOfTwo(uptr size) {
229 CHECK(size);
230 if (IsPowerOfTwo(size)) return size;
231
232 uptr up = MostSignificantSetBitIndex(size);
233 CHECK(size < (1ULL << (up + 1)));
234 CHECK(size > (1ULL << up));
235 return 1UL << (up + 1);
236}
237
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000238INLINE uptr RoundUpTo(uptr size, uptr boundary) {
Alexey Samsonov4c496662012-06-15 07:00:31 +0000239 CHECK(IsPowerOfTwo(boundary));
Alexey Samsonov230c3be2012-06-06 09:26:25 +0000240 return (size + boundary - 1) & ~(boundary - 1);
241}
Timur Iskhodzhanov400a9462013-02-08 11:45:04 +0000242
Kostya Serebryanybc9940e2012-12-14 12:15:09 +0000243INLINE uptr RoundDownTo(uptr x, uptr boundary) {
244 return x & ~(boundary - 1);
245}
Timur Iskhodzhanov400a9462013-02-08 11:45:04 +0000246
Kostya Serebryany84a996f2012-12-11 14:41:31 +0000247INLINE bool IsAligned(uptr a, uptr alignment) {
248 return (a & (alignment - 1)) == 0;
249}
Timur Iskhodzhanov400a9462013-02-08 11:45:04 +0000250
251INLINE uptr Log2(uptr x) {
252 CHECK(IsPowerOfTwo(x));
Dmitry Vyukovc5288672013-06-10 10:02:02 +0000253#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
Timur Iskhodzhanov400a9462013-02-08 11:45:04 +0000254 return __builtin_ctzl(x);
255#elif defined(_WIN64)
256 unsigned long ret; // NOLINT
257 _BitScanForward64(&ret, x);
258 return ret;
259#else
260 unsigned long ret; // NOLINT
261 _BitScanForward(&ret, x);
262 return ret;
263#endif
264}
265
Alexey Samsonov0dc31772012-07-16 11:27:17 +0000266// Don't use std::min, std::max or std::swap, to minimize dependency
267// on libstdc++.
Alexey Samsonov4c496662012-06-15 07:00:31 +0000268template<class T> T Min(T a, T b) { return a < b ? a : b; }
269template<class T> T Max(T a, T b) { return a > b ? a : b; }
Alexey Samsonov0dc31772012-07-16 11:27:17 +0000270template<class T> void Swap(T& a, T& b) {
271 T tmp = a;
272 a = b;
273 b = tmp;
274}
Alexey Samsonov9edf7502012-06-06 06:47:26 +0000275
Alexey Samsonovc9256972012-06-15 13:09:52 +0000276// Char handling
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000277INLINE bool IsSpace(int c) {
Alexey Samsonovc9256972012-06-15 13:09:52 +0000278 return (c == ' ') || (c == '\n') || (c == '\t') ||
279 (c == '\f') || (c == '\r') || (c == '\v');
280}
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000281INLINE bool IsDigit(int c) {
Alexey Samsonovc9256972012-06-15 13:09:52 +0000282 return (c >= '0') && (c <= '9');
283}
Dmitry Vyukovb78caa62012-07-05 16:18:28 +0000284INLINE int ToLower(int c) {
Alexey Samsonovc9256972012-06-15 13:09:52 +0000285 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
286}
287
Kostya Serebryany5af39e52012-11-21 12:38:58 +0000288#if SANITIZER_WORDSIZE == 64
Kostya Serebryany6dc48dd2012-06-06 16:33:46 +0000289# define FIRST_32_SECOND_64(a, b) (b)
290#else
291# define FIRST_32_SECOND_64(a, b) (a)
292#endif
293
Kostya Serebryanyb5f95212013-02-26 13:30:27 +0000294// A low-level vector based on mmap. May incur a significant memory overhead for
295// small vectors.
296// WARNING: The current implementation supports only POD types.
297template<typename T>
Alexey Samsonova64d4352013-06-14 09:59:40 +0000298class InternalMmapVector {
Kostya Serebryanyb5f95212013-02-26 13:30:27 +0000299 public:
Alexey Samsonova64d4352013-06-14 09:59:40 +0000300 explicit InternalMmapVector(uptr initial_capacity) {
Kostya Serebryanyb5f95212013-02-26 13:30:27 +0000301 CHECK_GT(initial_capacity, 0);
302 capacity_ = initial_capacity;
303 size_ = 0;
Alexey Samsonova64d4352013-06-14 09:59:40 +0000304 data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVector");
Kostya Serebryanyb5f95212013-02-26 13:30:27 +0000305 }
Alexey Samsonova64d4352013-06-14 09:59:40 +0000306 ~InternalMmapVector() {
Kostya Serebryanyb5f95212013-02-26 13:30:27 +0000307 UnmapOrDie(data_, capacity_ * sizeof(T));
308 }
309 T &operator[](uptr i) {
Alexey Samsonov96950032013-03-29 08:03:01 +0000310 CHECK_LT(i, size_);
Alexey Samsonovca280f22013-03-28 15:37:11 +0000311 return data_[i];
312 }
313 const T &operator[](uptr i) const {
Alexey Samsonov96950032013-03-29 08:03:01 +0000314 CHECK_LT(i, size_);
Kostya Serebryanyb5f95212013-02-26 13:30:27 +0000315 return data_[i];
316 }
317 void push_back(const T &element) {
318 CHECK_LE(size_, capacity_);
319 if (size_ == capacity_) {
320 uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
321 Resize(new_capacity);
322 }
323 data_[size_++] = element;
324 }
Alexey Samsonov352b2452013-03-05 11:58:25 +0000325 T &back() {
326 CHECK_GT(size_, 0);
327 return data_[size_ - 1];
328 }
329 void pop_back() {
330 CHECK_GT(size_, 0);
331 size_--;
332 }
Alexey Samsonovca280f22013-03-28 15:37:11 +0000333 uptr size() const {
Kostya Serebryanyb5f95212013-02-26 13:30:27 +0000334 return size_;
335 }
Alexander Potapenkoe5b398f2013-04-01 13:55:34 +0000336 const T *data() const {
337 return data_;
338 }
339 uptr capacity() const {
340 return capacity_;
341 }
Kostya Serebryanyb5f95212013-02-26 13:30:27 +0000342
343 private:
344 void Resize(uptr new_capacity) {
345 CHECK_GT(new_capacity, 0);
346 CHECK_LE(size_, new_capacity);
347 T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T),
Alexey Samsonova64d4352013-06-14 09:59:40 +0000348 "InternalMmapVector");
Kostya Serebryanyb5f95212013-02-26 13:30:27 +0000349 internal_memcpy(new_data, data_, size_ * sizeof(T));
350 T *old_data = data_;
351 data_ = new_data;
352 UnmapOrDie(old_data, capacity_ * sizeof(T));
353 capacity_ = new_capacity;
354 }
355 // Disallow evil constructors.
Alexey Samsonova64d4352013-06-14 09:59:40 +0000356 InternalMmapVector(const InternalMmapVector&);
357 void operator=(const InternalMmapVector&);
Kostya Serebryanyb5f95212013-02-26 13:30:27 +0000358
359 T *data_;
360 uptr capacity_;
361 uptr size_;
362};
Sergey Matveev15bb32b2013-05-13 11:58:48 +0000363
Alexey Samsonova64d4352013-06-14 09:59:40 +0000364// HeapSort for arrays and InternalMmapVector.
Sergey Matveev15bb32b2013-05-13 11:58:48 +0000365template<class Container, class Compare>
366void InternalSort(Container *v, uptr size, Compare comp) {
367 if (size < 2)
368 return;
369 // Stage 1: insert elements to the heap.
370 for (uptr i = 1; i < size; i++) {
371 uptr j, p;
372 for (j = i; j > 0; j = p) {
373 p = (j - 1) / 2;
374 if (comp((*v)[p], (*v)[j]))
375 Swap((*v)[j], (*v)[p]);
376 else
377 break;
378 }
379 }
380 // Stage 2: swap largest element with the last one,
381 // and sink the new top.
382 for (uptr i = size - 1; i > 0; i--) {
383 Swap((*v)[0], (*v)[i]);
384 uptr j, max_ind;
385 for (j = 0; j < i; j = max_ind) {
386 uptr left = 2 * j + 1;
387 uptr right = 2 * j + 2;
388 max_ind = j;
389 if (left < i && comp((*v)[max_ind], (*v)[left]))
390 max_ind = left;
391 if (right < i && comp((*v)[max_ind], (*v)[right]))
392 max_ind = right;
393 if (max_ind != j)
394 Swap((*v)[j], (*v)[max_ind]);
395 else
396 break;
397 }
398 }
399}
400
Sergey Matveev384a4482013-08-26 13:24:43 +0000401template<class Container, class Value, class Compare>
402uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
403 const Value &val, Compare comp) {
404 uptr not_found = last + 1;
405 while (last >= first) {
406 uptr mid = (first + last) / 2;
407 if (comp(v[mid], val))
408 first = mid + 1;
409 else if (comp(val, v[mid]))
410 last = mid - 1;
411 else
412 return mid;
413 }
414 return not_found;
415}
416
Alexey Samsonov7847d772013-09-10 14:36:16 +0000417// Represents a binary loaded into virtual memory (e.g. this can be an
418// executable or a shared object).
419class LoadedModule {
420 public:
421 LoadedModule(const char *module_name, uptr base_address);
422 void addAddressRange(uptr beg, uptr end);
423 bool containsAddress(uptr address) const;
424
425 const char *full_name() const { return full_name_; }
426 uptr base_address() const { return base_address_; }
427
428 private:
429 struct AddressRange {
430 uptr beg;
431 uptr end;
432 };
433 char *full_name_;
434 uptr base_address_;
435 static const uptr kMaxNumberOfAddressRanges = 6;
436 AddressRange ranges_[kMaxNumberOfAddressRanges];
437 uptr n_ranges_;
438};
439
440// OS-dependent function that fills array with descriptions of at most
441// "max_modules" currently loaded modules. Returns the number of
442// initialized modules. If filter is nonzero, ignores modules for which
443// filter(full_name) is false.
444typedef bool (*string_predicate_t)(const char *);
445uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
446 string_predicate_t filter);
447
Sergey Matveev70db33e2013-10-11 14:19:14 +0000448#if SANITIZER_POSIX
449const uptr kPthreadDestructorIterations = 4;
450#else
451// Unused on Windows.
452const uptr kPthreadDestructorIterations = 0;
453#endif
Sergey Matveevc5193352013-10-14 14:04:50 +0000454
455// Callback type for iterating over a set of memory ranges.
456typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
Alexey Samsonov9edf7502012-06-06 06:47:26 +0000457} // namespace __sanitizer
458
Peter Collingbourne53177242013-10-24 06:23:39 +0000459inline void *operator new(__sanitizer::operator_new_size_type size,
460 __sanitizer::LowLevelAllocator &alloc) {
461 return alloc.Allocate(size);
462}
463
Alexey Samsonov9edf7502012-06-06 06:47:26 +0000464#endif // SANITIZER_COMMON_H