blob: 404d3753f7e9060f73a0b8dc3a338ec56069d651 [file] [log] [blame]
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +00001//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Memory quarantine for AddressSanitizer and potentially other tools.
11// Quarantine caches some specified amount of memory in per-thread caches,
12// then evicts to global FIFO queue. When the queue reaches specified threshold,
13// oldest memory is recycled.
14//
15//===----------------------------------------------------------------------===//
16
17#ifndef SANITIZER_QUARANTINE_H
18#define SANITIZER_QUARANTINE_H
19
20#include "sanitizer_internal_defs.h"
21#include "sanitizer_mutex.h"
Dmitry Vyukova61ec812013-01-11 11:03:35 +000022#include "sanitizer_list.h"
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +000023
24namespace __sanitizer {
25
26template<typename Node> class QuarantineCache;
27
Dmitry Vyukova61ec812013-01-11 11:03:35 +000028struct QuarantineBatch {
Kostya Serebryanya650de82013-10-21 08:36:10 +000029 static const uptr kSize = 1021;
Dmitry Vyukova61ec812013-01-11 11:03:35 +000030 QuarantineBatch *next;
31 uptr size;
32 uptr count;
33 void *batch[kSize];
34};
35
Kostya Serebryanya650de82013-10-21 08:36:10 +000036COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
37
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +000038// The callback interface is:
Dmitry Vyukova61ec812013-01-11 11:03:35 +000039// void Callback::Recycle(Node *ptr);
40// void *cb.Allocate(uptr size);
41// void cb.Deallocate(void *ptr);
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +000042template<typename Callback, typename Node>
43class Quarantine {
44 public:
Dmitry Vyukova61ec812013-01-11 11:03:35 +000045 typedef QuarantineCache<Callback> Cache;
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +000046
47 explicit Quarantine(LinkerInitialized)
48 : cache_(LINKER_INITIALIZED) {
49 }
50
51 void Init(uptr size, uptr cache_size) {
Stephen Hines86277eb2015-03-23 12:06:32 -070052 atomic_store(&max_size_, size, memory_order_release);
53 atomic_store(&min_size_, size / 10 * 9,
54 memory_order_release); // 90% of max size.
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +000055 max_cache_size_ = cache_size;
56 }
57
Stephen Hines86277eb2015-03-23 12:06:32 -070058 uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
59
Dmitry Vyukova61ec812013-01-11 11:03:35 +000060 void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
61 c->Enqueue(cb, ptr, size);
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +000062 if (c->Size() > max_cache_size_)
63 Drain(c, cb);
64 }
65
Dmitry Vyukova61ec812013-01-11 11:03:35 +000066 void NOINLINE Drain(Cache *c, Callback cb) {
67 {
68 SpinMutexLock l(&cache_mutex_);
69 cache_.Transfer(c);
70 }
Stephen Hines86277eb2015-03-23 12:06:32 -070071 if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
Dmitry Vyukovf7941952013-01-11 11:39:59 +000072 Recycle(cb);
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +000073 }
74
75 private:
Dmitry Vyukova61ec812013-01-11 11:03:35 +000076 // Read-only data.
77 char pad0_[kCacheLineSize];
Stephen Hines86277eb2015-03-23 12:06:32 -070078 atomic_uintptr_t max_size_;
79 atomic_uintptr_t min_size_;
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +000080 uptr max_cache_size_;
Dmitry Vyukova61ec812013-01-11 11:03:35 +000081 char pad1_[kCacheLineSize];
82 SpinMutex cache_mutex_;
83 SpinMutex recycle_mutex_;
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +000084 Cache cache_;
Dmitry Vyukova61ec812013-01-11 11:03:35 +000085 char pad2_[kCacheLineSize];
Dmitry Vyukovf7941952013-01-11 11:39:59 +000086
87 void NOINLINE Recycle(Callback cb) {
88 Cache tmp;
Stephen Hines86277eb2015-03-23 12:06:32 -070089 uptr min_size = atomic_load(&min_size_, memory_order_acquire);
Dmitry Vyukovf7941952013-01-11 11:39:59 +000090 {
91 SpinMutexLock l(&cache_mutex_);
Stephen Hines86277eb2015-03-23 12:06:32 -070092 while (cache_.Size() > min_size) {
Dmitry Vyukovf7941952013-01-11 11:39:59 +000093 QuarantineBatch *b = cache_.DequeueBatch();
94 tmp.EnqueueBatch(b);
95 }
96 }
97 recycle_mutex_.Unlock();
Dmitry Vyukovf99b94e2013-01-11 16:40:01 +000098 DoRecycle(&tmp, cb);
99 }
100
101 void NOINLINE DoRecycle(Cache *c, Callback cb) {
102 while (QuarantineBatch *b = c->DequeueBatch()) {
103 const uptr kPrefetch = 16;
104 for (uptr i = 0; i < kPrefetch; i++)
105 PREFETCH(b->batch[i]);
106 for (uptr i = 0; i < b->count; i++) {
107 PREFETCH(b->batch[i + kPrefetch]);
Dmitry Vyukovf7941952013-01-11 11:39:59 +0000108 cb.Recycle((Node*)b->batch[i]);
Dmitry Vyukovf99b94e2013-01-11 16:40:01 +0000109 }
Dmitry Vyukovf7941952013-01-11 11:39:59 +0000110 cb.Deallocate(b);
111 }
112 }
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000113};
114
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000115// Per-thread cache of memory blocks.
116template<typename Callback>
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000117class QuarantineCache {
118 public:
119 explicit QuarantineCache(LinkerInitialized) {
120 }
121
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000122 QuarantineCache()
123 : size_() {
124 list_.clear();
125 }
126
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000127 uptr Size() const {
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000128 return atomic_load(&size_, memory_order_relaxed);
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000129 }
130
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000131 void Enqueue(Callback cb, void *ptr, uptr size) {
Kostya Serebryanya650de82013-10-21 08:36:10 +0000132 if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000133 AllocBatch(cb);
Kostya Serebryanya650de82013-10-21 08:36:10 +0000134 size += sizeof(QuarantineBatch); // Count the batch in Quarantine size.
135 }
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000136 QuarantineBatch *b = list_.back();
Stephen Hines86277eb2015-03-23 12:06:32 -0700137 CHECK(b);
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000138 b->batch[b->count++] = ptr;
139 b->size += size;
140 SizeAdd(size);
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000141 }
142
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000143 void Transfer(QuarantineCache *c) {
144 list_.append_back(&c->list_);
145 SizeAdd(c->Size());
146 atomic_store(&c->size_, 0, memory_order_relaxed);
147 }
148
149 void EnqueueBatch(QuarantineBatch *b) {
150 list_.push_back(b);
151 SizeAdd(b->size);
152 }
153
154 QuarantineBatch *DequeueBatch() {
155 if (list_.empty())
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000156 return 0;
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000157 QuarantineBatch *b = list_.front();
158 list_.pop_front();
Kostya Serebryanya650de82013-10-21 08:36:10 +0000159 SizeSub(b->size);
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000160 return b;
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000161 }
162
163 private:
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000164 IntrusiveList<QuarantineBatch> list_;
165 atomic_uintptr_t size_;
166
167 void SizeAdd(uptr add) {
168 atomic_store(&size_, Size() + add, memory_order_relaxed);
169 }
Kostya Serebryanya650de82013-10-21 08:36:10 +0000170 void SizeSub(uptr sub) {
171 atomic_store(&size_, Size() - sub, memory_order_relaxed);
172 }
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000173
Timur Iskhodzhanov2b10d392013-02-08 12:02:00 +0000174 NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000175 QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
Stephen Hines86277eb2015-03-23 12:06:32 -0700176 CHECK(b);
Dmitry Vyukova61ec812013-01-11 11:03:35 +0000177 b->count = 0;
178 b->size = 0;
179 list_.push_back(b);
180 return b;
181 }
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000182};
Alexey Samsonovba5e9962013-01-30 07:45:58 +0000183} // namespace __sanitizer
Dmitry Vyukov9fc0df82013-01-11 08:07:43 +0000184
185#endif // #ifndef SANITIZER_QUARANTINE_H