| Kostya Serebryany | d709a36 | 2018-08-29 21:00:01 +0000 | [diff] [blame] | 1 | //===-- sanitizer_ring_buffer.h ---------------------------------*- C++ -*-===// | 
|  | 2 | // | 
|  | 3 | //                     The LLVM Compiler Infrastructure | 
|  | 4 | // | 
|  | 5 | // This file is distributed under the University of Illinois Open Source | 
|  | 6 | // License. See LICENSE.TXT for details. | 
|  | 7 | // | 
|  | 8 | //===----------------------------------------------------------------------===// | 
|  | 9 | // | 
|  | 10 | // Simple ring buffer. | 
|  | 11 | // | 
|  | 12 | //===----------------------------------------------------------------------===// | 
|  | 13 | #ifndef SANITIZER_RING_BUFFER_H | 
|  | 14 | #define SANITIZER_RING_BUFFER_H | 
|  | 15 |  | 
|  | 16 | #include "sanitizer_common.h" | 
|  | 17 |  | 
|  | 18 | namespace __sanitizer { | 
|  | 19 | // RingBuffer<T>: fixed-size ring buffer optimized for speed of push(). | 
|  | 20 | // T should be a POD type and sizeof(T) should be divisible by sizeof(void*). | 
|  | 21 | // At creation, all elements are zero. | 
|  | 22 | template<class T> | 
|  | 23 | class RingBuffer { | 
|  | 24 | public: | 
|  | 25 | COMPILER_CHECK(sizeof(T) % sizeof(void *) == 0); | 
|  | 26 | static RingBuffer *New(uptr Size) { | 
|  | 27 | void *Ptr = MmapOrDie(SizeInBytes(Size), "RingBuffer"); | 
|  | 28 | RingBuffer *RB = reinterpret_cast<RingBuffer*>(Ptr); | 
|  | 29 | uptr End = reinterpret_cast<uptr>(Ptr) + SizeInBytes(Size); | 
|  | 30 | RB->last_ = RB->next_ = reinterpret_cast<T*>(End - sizeof(T)); | 
|  | 31 | return RB; | 
|  | 32 | } | 
|  | 33 | void Delete() { | 
|  | 34 | UnmapOrDie(this, SizeInBytes(size())); | 
|  | 35 | } | 
|  | 36 | uptr size() const { | 
|  | 37 | return last_ + 1 - | 
|  | 38 | reinterpret_cast<T *>(reinterpret_cast<uptr>(this) + | 
|  | 39 | 2 * sizeof(T *)); | 
|  | 40 | } | 
|  | 41 | void push(T t) { | 
|  | 42 | *next_ = t; | 
|  | 43 | next_--; | 
|  | 44 | // The condition below works only if sizeof(T) is divisible by sizeof(T*). | 
|  | 45 | if (next_ <= reinterpret_cast<T*>(&next_)) | 
|  | 46 | next_ = last_; | 
|  | 47 | } | 
|  | 48 |  | 
|  | 49 | T operator[](uptr Idx) const { | 
|  | 50 | CHECK_LT(Idx, size()); | 
|  | 51 | sptr IdxNext = Idx + 1; | 
|  | 52 | if (IdxNext > last_ - next_) | 
|  | 53 | IdxNext -= size(); | 
|  | 54 | return next_[IdxNext]; | 
|  | 55 | } | 
|  | 56 |  | 
|  | 57 | private: | 
|  | 58 | RingBuffer() {} | 
|  | 59 | ~RingBuffer() {} | 
|  | 60 | RingBuffer(const RingBuffer&) = delete; | 
|  | 61 |  | 
|  | 62 | static uptr SizeInBytes(uptr Size) { | 
|  | 63 | return Size * sizeof(T) + 2 * sizeof(T*); | 
|  | 64 | } | 
|  | 65 |  | 
|  | 66 | // Data layout: | 
|  | 67 | // LNDDDDDDDD | 
|  | 68 | // D: data elements. | 
|  | 69 | // L: last_, always points to the last data element. | 
|  | 70 | // N: next_, initially equals to last_, is decremented on every push, | 
|  | 71 | //    wraps around if it's less or equal than its own address. | 
|  | 72 |  | 
|  | 73 | T *last_; | 
|  | 74 | T *next_; | 
|  | 75 | T data_[1];  // flexible array. | 
|  | 76 | }; | 
|  | 77 |  | 
|  | 78 | }  // namespace __sanitizer | 
|  | 79 |  | 
|  | 80 | #endif  // SANITIZER_RING_BUFFER_H |