diff options
author | android-build-team Robot <android-build-team-robot@google.com> | 2019-11-05 12:50:11 +0000 |
---|---|---|
committer | android-build-team Robot <android-build-team-robot@google.com> | 2019-11-05 12:50:11 +0000 |
commit | d46af06422e0993e35928161be67e6d2b5831d6f (patch) | |
tree | d028ad6220d5bcd7743c701ec1ff9f7edeb1d658 | |
parent | 40acb7f22295b3b3fdd887a210a87169ea2baf33 (diff) | |
parent | e556f254a92665c160289075a2cf8ffcd40f9a2d (diff) | |
download | scudo-android10-mainline-networking-release.tar.gz |
Snap for 5988121 from e556f254a92665c160289075a2cf8ffcd40f9a2d to qt-aml-networking-releaseandroid-mainline-10.0.0_r6android10-mainline-networking-release
Change-Id: Id3b29e61e00ceedd3eb3d992a7a39457a379ace4
-rw-r--r-- | Android.bp | 1 | ||||
-rw-r--r-- | standalone/allocator_config.h | 5 | ||||
-rw-r--r-- | standalone/combined.h | 8 | ||||
-rw-r--r-- | standalone/list.h | 236 | ||||
-rw-r--r-- | standalone/primary32.h | 4 | ||||
-rw-r--r-- | standalone/primary64.h | 4 | ||||
-rw-r--r-- | standalone/quarantine.h | 2 | ||||
-rw-r--r-- | standalone/release.h | 14 | ||||
-rw-r--r-- | standalone/secondary.cpp | 135 | ||||
-rw-r--r-- | standalone/secondary.h | 168 | ||||
-rw-r--r-- | standalone/stats.h | 32 | ||||
-rw-r--r-- | standalone/tests/combined_test.cpp | 15 | ||||
-rw-r--r-- | standalone/tests/list_test.cpp | 116 | ||||
-rw-r--r-- | standalone/tests/release_test.cpp | 4 | ||||
-rw-r--r-- | standalone/tests/secondary_test.cpp | 26 |
15 files changed, 457 insertions, 313 deletions
diff --git a/Android.bp b/Android.bp index fa53990c981..5bb999e2039 100644 --- a/Android.bp +++ b/Android.bp @@ -56,7 +56,6 @@ cc_library_static { "standalone/flags_parser.cpp", "standalone/linux.cpp", "standalone/report.cpp", - "standalone/secondary.cpp", "standalone/string_utils.cpp", "standalone/wrappers_c_bionic.cpp" ], diff --git a/standalone/allocator_config.h b/standalone/allocator_config.h index 62c6f287510..166e19e2b8f 100644 --- a/standalone/allocator_config.h +++ b/standalone/allocator_config.h @@ -14,6 +14,7 @@ #include "flags.h" #include "primary32.h" #include "primary64.h" +#include "secondary.h" #include "size_class_map.h" #include "tsd_exclusive.h" #include "tsd_shared.h" @@ -31,6 +32,7 @@ struct DefaultConfig { // 512KB regions typedef SizeClassAllocator32<SizeClassMap, 19U> Primary; #endif + typedef MapAllocator<> Secondary; template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive }; @@ -43,6 +45,7 @@ struct AndroidConfig { // 512KB regions typedef SizeClassAllocator32<SizeClassMap, 19U> Primary; #endif + typedef MapAllocator<> Secondary; template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs. }; @@ -56,6 +59,7 @@ struct AndroidSvelteConfig { // 64KB regions typedef SizeClassAllocator32<SizeClassMap, 16U> Primary; #endif + typedef MapAllocator<0U> Secondary; template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD. }; @@ -63,6 +67,7 @@ struct AndroidSvelteConfig { struct FuchsiaConfig { // 1GB Regions typedef SizeClassAllocator64<DefaultSizeClassMap, 30U> Primary; + typedef MapAllocator<> Secondary; template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 8U>; // Shared, max 8 TSDs. }; diff --git a/standalone/combined.h b/standalone/combined.h index 60be1dd20d3..2f8d82b58db 100644 --- a/standalone/combined.h +++ b/standalone/combined.h @@ -161,6 +161,7 @@ public: uptr Alignment = MinAlignment, bool ZeroContents = false) { initThreadMaybe(); + ZeroContents = ZeroContents || Options.ZeroContents; if (UNLIKELY(Alignment > MaxAlignment)) { if (Options.MayReturnNull) @@ -200,7 +201,8 @@ public: TSD->unlock(); } else { ClassId = 0; - Block = Secondary.allocate(NeededSize, Alignment, &BlockEnd); + Block = + Secondary.allocate(NeededSize, Alignment, &BlockEnd, ZeroContents); } if (UNLIKELY(!Block)) { @@ -212,7 +214,7 @@ public: // We only need to zero the contents for Primary backed allocations. This // condition is not necessarily unlikely, but since memset is costly, we // might as well mark it as such. - if (UNLIKELY((ZeroContents || Options.ZeroContents) && ClassId)) + if (UNLIKELY(ZeroContents && ClassId)) memset(Block, 0, PrimaryT::getSizeByClassId(ClassId)); Chunk::UnpackedHeader Header = {}; @@ -449,7 +451,7 @@ public: } private: - typedef MapAllocator SecondaryT; + using SecondaryT = typename Params::Secondary; typedef typename PrimaryT::SizeClassMap SizeClassMap; static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG; diff --git a/standalone/list.h b/standalone/list.h index 6a7b9bd747a..c3b898a328c 100644 --- a/standalone/list.h +++ b/standalone/list.h @@ -13,43 +13,93 @@ namespace scudo { -// Intrusive POD singly-linked list. +// Intrusive POD singly and doubly linked list. // An object with all zero fields should represent a valid empty list. clear() // should be called on all non-zero-initialized objects before using. -template <class Item> struct IntrusiveList { - friend class Iterator; + +template <class T> class IteratorBase { +public: + explicit IteratorBase(T *CurrentT) : Current(CurrentT) {} + IteratorBase &operator++() { + Current = Current->Next; + return *this; + } + bool operator!=(IteratorBase Other) const { return Current != Other.Current; } + T &operator*() { return *Current; } + +private: + T *Current; +}; + +template <class T> struct IntrusiveList { + bool empty() const { return Size == 0; } + uptr size() const { return Size; } + + T *front() { return First; } + const T *front() const { return First; } + T *back() { return Last; } + const T *back() const { return Last; } void clear() { First = Last = nullptr; Size = 0; } - bool empty() const { return Size == 0; } - uptr size() const { return Size; } + typedef IteratorBase<T> Iterator; + typedef IteratorBase<const T> ConstIterator; - void push_back(Item *X) { - if (empty()) { - X->Next = nullptr; - First = Last = X; - Size = 1; - } else { - X->Next = nullptr; - Last->Next = X; - Last = X; - Size++; + Iterator begin() { return Iterator(First); } + Iterator end() { return Iterator(nullptr); } + + ConstIterator begin() const { return ConstIterator(First); } + ConstIterator end() const { return ConstIterator(nullptr); } + + void checkConsistency() const; + +protected: + uptr Size; + T *First; + T *Last; +}; + +template <class T> void IntrusiveList<T>::checkConsistency() const { + if (Size == 0) { + CHECK_EQ(First, nullptr); + CHECK_EQ(Last, nullptr); + } else { + uptr Count = 0; + for (T *I = First;; I = I->Next) { + Count++; + if (I == Last) + break; } + CHECK_EQ(this->size(), Count); + CHECK_EQ(Last->Next, nullptr); } +} - void push_front(Item *X) { - if (empty()) { - X->Next = nullptr; - First = Last = X; - Size = 1; - } else { - X->Next = First; +template <class T> struct SinglyLinkedList : public IntrusiveList<T> { + using IntrusiveList<T>::First; + using IntrusiveList<T>::Last; + using IntrusiveList<T>::Size; + using IntrusiveList<T>::empty; + + void push_back(T *X) { + X->Next = nullptr; + if (empty()) First = X; - Size++; - } + else + Last->Next = X; + Last = X; + Size++; + } + + void push_front(T *X) { + if (empty()) + Last = X; + X->Next = First; + First = X; + Size++; } void pop_front() { @@ -60,7 +110,7 @@ template <class Item> struct IntrusiveList { Size--; } - void extract(Item *Prev, Item *X) { + void extract(T *Prev, T *X) { DCHECK(!empty()); DCHECK_NE(Prev, nullptr); DCHECK_NE(X, nullptr); @@ -71,84 +121,106 @@ template <class Item> struct IntrusiveList { Size--; } - Item *front() { return First; } - const Item *front() const { return First; } - Item *back() { return Last; } - const Item *back() const { return Last; } - - void append_front(IntrusiveList<Item> *L) { + void append_back(SinglyLinkedList<T> *L) { DCHECK_NE(this, L); if (L->empty()) return; if (empty()) { *this = *L; - } else if (!L->empty()) { - L->Last->Next = First; - First = L->First; + } else { + Last->Next = L->First; + Last = L->Last; Size += L->size(); } L->clear(); } +}; - void append_back(IntrusiveList<Item> *L) { - DCHECK_NE(this, L); - if (L->empty()) - return; +template <class T> struct DoublyLinkedList : IntrusiveList<T> { + using IntrusiveList<T>::First; + using IntrusiveList<T>::Last; + using IntrusiveList<T>::Size; + using IntrusiveList<T>::empty; + + void push_front(T *X) { + X->Prev = nullptr; if (empty()) { - *this = *L; + Last = X; } else { - Last->Next = L->First; - Last = L->Last; - Size += L->size(); + DCHECK_EQ(First->Prev, nullptr); + First->Prev = X; } - L->clear(); + X->Next = First; + First = X; + Size++; + } + + // Inserts X before Y. + void insert(T *X, T *Y) { + if (Y == First) + return push_front(X); + T *Prev = Y->Prev; + // This is a hard CHECK to ensure consistency in the event of an intentional + // corruption of Y->Prev, to prevent a potential write-{4,8}. + CHECK_EQ(Prev->Next, Y); + Prev->Next = X; + X->Prev = Prev; + X->Next = Y; + Y->Prev = X; + Size++; } - void checkConsistency() { - if (Size == 0) { - CHECK_EQ(First, nullptr); - CHECK_EQ(Last, nullptr); + void push_back(T *X) { + X->Next = nullptr; + if (empty()) { + First = X; } else { - uptr Count = 0; - for (Item *I = First;; I = I->Next) { - Count++; - if (I == Last) - break; - } - CHECK_EQ(size(), Count); - CHECK_EQ(Last->Next, nullptr); + DCHECK_EQ(Last->Next, nullptr); + Last->Next = X; } + X->Prev = Last; + Last = X; + Size++; } - template <class ItemT> class IteratorBase { - public: - explicit IteratorBase(ItemT *CurrentItem) : Current(CurrentItem) {} - IteratorBase &operator++() { - Current = Current->Next; - return *this; + void pop_front() { + DCHECK(!empty()); + First = First->Next; + if (!First) + Last = nullptr; + else + First->Prev = nullptr; + Size--; + } + + // The consistency of the adjacent links is aggressively checked in order to + // catch potential corruption attempts, that could yield a mirrored + // write-{4,8} primitive. nullptr checks are deemed less vital. + void remove(T *X) { + T *Prev = X->Prev; + T *Next = X->Next; + if (Prev) { + CHECK_EQ(Prev->Next, X); + Prev->Next = Next; } - bool operator!=(IteratorBase Other) const { - return Current != Other.Current; + if (Next) { + CHECK_EQ(Next->Prev, X); + Next->Prev = Prev; } - ItemT &operator*() { return *Current; } - - private: - ItemT *Current; - }; - - typedef IteratorBase<Item> Iterator; - typedef IteratorBase<const Item> ConstIterator; - - Iterator begin() { return Iterator(First); } - Iterator end() { return Iterator(nullptr); } - - ConstIterator begin() const { return ConstIterator(First); } - ConstIterator end() const { return ConstIterator(nullptr); } - -private: - uptr Size; - Item *First; - Item *Last; + if (First == X) { + DCHECK_EQ(Prev, nullptr); + First = Next; + } else { + DCHECK_NE(Prev, nullptr); + } + if (Last == X) { + DCHECK_EQ(Next, nullptr); + Last = Prev; + } else { + DCHECK_NE(Next, nullptr); + } + Size--; + } }; } // namespace scudo diff --git a/standalone/primary32.h b/standalone/primary32.h index 9123d07b49b..453b06ee554 100644 --- a/standalone/primary32.h +++ b/standalone/primary32.h @@ -197,7 +197,7 @@ private: struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo { HybridMutex Mutex; - IntrusiveList<TransferBatch> FreeList; + SinglyLinkedList<TransferBatch> FreeList; SizeClassStats Stats; bool CanRelease; u32 RandState; @@ -376,7 +376,7 @@ private: for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) { if (PossibleRegions[I] == ClassId) { ReleaseRecorder Recorder(I * RegionSize); - releaseFreeMemoryToOS(&Sci->FreeList, I * RegionSize, + releaseFreeMemoryToOS(Sci->FreeList, I * RegionSize, RegionSize / PageSize, BlockSize, &Recorder); if (Recorder.getReleasedRangesCount() > 0) { Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks; diff --git a/standalone/primary64.h b/standalone/primary64.h index 8f443ea7fa3..409472c8777 100644 --- a/standalone/primary64.h +++ b/standalone/primary64.h @@ -202,7 +202,7 @@ private: struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo { HybridMutex Mutex; - IntrusiveList<TransferBatch> FreeList; + SinglyLinkedList<TransferBatch> FreeList; RegionStats Stats; bool CanRelease; bool Exhausted; @@ -372,7 +372,7 @@ private: } ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data); - releaseFreeMemoryToOS(&Region->FreeList, Region->RegionBeg, + releaseFreeMemoryToOS(Region->FreeList, Region->RegionBeg, roundUpTo(Region->AllocatedUser, PageSize) / PageSize, BlockSize, &Recorder); diff --git a/standalone/quarantine.h b/standalone/quarantine.h index 35fd0bc197e..4b3f368ad96 100644 --- a/standalone/quarantine.h +++ b/standalone/quarantine.h @@ -160,7 +160,7 @@ public: } private: - IntrusiveList<QuarantineBatch> List; + SinglyLinkedList<QuarantineBatch> List; atomic_uptr Size; void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); } diff --git a/standalone/release.h b/standalone/release.h index 4fe29fde4bd..4b5c56ce7c1 100644 --- a/standalone/release.h +++ b/standalone/release.h @@ -149,7 +149,7 @@ private: template <class TransferBatchT, class ReleaseRecorderT> NOINLINE void -releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> *FreeList, uptr Base, +releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base, uptr AllocatedPagesCount, uptr BlockSize, ReleaseRecorderT *Recorder) { const uptr PageSize = getPageSizeCached(); @@ -199,18 +199,18 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> *FreeList, uptr Base, // allocated page. if (BlockSize <= PageSize && PageSize % BlockSize == 0) { // Each chunk affects one page only. - for (auto It = FreeList->begin(); It != FreeList->end(); ++It) { - for (u32 I = 0; I < (*It).getCount(); I++) { - const uptr P = reinterpret_cast<uptr>((*It).get(I)); + for (const auto &It : FreeList) { + for (u32 I = 0; I < It.getCount(); I++) { + const uptr P = reinterpret_cast<uptr>(It.get(I)); if (P >= Base && P < End) Counters.inc((P - Base) >> PageSizeLog); } } } else { // In all other cases chunks might affect more than one page. - for (auto It = FreeList->begin(); It != FreeList->end(); ++It) { - for (u32 I = 0; I < (*It).getCount(); I++) { - const uptr P = reinterpret_cast<uptr>((*It).get(I)); + for (const auto &It : FreeList) { + for (u32 I = 0; I < It.getCount(); I++) { + const uptr P = reinterpret_cast<uptr>(It.get(I)); if (P >= Base && P < End) Counters.incRange((P - Base) >> PageSizeLog, (P - Base + BlockSize - 1) >> PageSizeLog); diff --git a/standalone/secondary.cpp b/standalone/secondary.cpp deleted file mode 100644 index db7361d7134..00000000000 --- a/standalone/secondary.cpp +++ /dev/null @@ -1,135 +0,0 @@ -//===-- secondary.cpp -------------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#include "secondary.h" - -#include "string_utils.h" - -namespace scudo { - -// As with the Primary, the size passed to this function includes any desired -// alignment, so that the frontend can align the user allocation. The hint -// parameter allows us to unmap spurious memory when dealing with larger -// (greater than a page) alignments on 32-bit platforms. -// Due to the sparsity of address space available on those platforms, requesting -// an allocation from the Secondary with a large alignment would end up wasting -// VA space (even though we are not committing the whole thing), hence the need -// to trim off some of the reserved space. -// For allocations requested with an alignment greater than or equal to a page, -// the committed memory will amount to something close to Size - AlignmentHint -// (pending rounding and headers). -void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) { - DCHECK_GT(Size, AlignmentHint); - const uptr PageSize = getPageSizeCached(); - const uptr MapSize = - roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize) + 2 * PageSize; - MapPlatformData Data = {}; - uptr MapBase = - reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary", - MAP_NOACCESS | MAP_ALLOWNOMEM, &Data)); - if (UNLIKELY(!MapBase)) - return nullptr; - uptr CommitBase = MapBase + PageSize; - uptr MapEnd = MapBase + MapSize; - - // In the unlikely event of alignments larger than a page, adjust the amount - // of memory we want to commit, and trim the extra memory. - if (UNLIKELY(AlignmentHint >= PageSize)) { - // For alignments greater than or equal to a page, the user pointer (eg: the - // pointer that is returned by the C or C++ allocation APIs) ends up on a - // page boundary , and our headers will live in the preceding page. - CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize; - const uptr NewMapBase = CommitBase - PageSize; - DCHECK_GE(NewMapBase, MapBase); - // We only trim the extra memory on 32-bit platforms: 64-bit platforms - // are less constrained memory wise, and that saves us two syscalls. - if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) { - unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data); - MapBase = NewMapBase; - } - const uptr NewMapEnd = CommitBase + PageSize + - roundUpTo((Size - AlignmentHint), PageSize) + - PageSize; - DCHECK_LE(NewMapEnd, MapEnd); - if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) { - unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data); - MapEnd = NewMapEnd; - } - } - - const uptr CommitSize = MapEnd - PageSize - CommitBase; - const uptr Ptr = - reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase), - CommitSize, "scudo:secondary", 0, &Data)); - LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr); - H->MapBase = MapBase; - H->MapSize = MapEnd - MapBase; - H->BlockEnd = CommitBase + CommitSize; - H->Data = Data; - { - ScopedLock L(Mutex); - if (LIKELY(Tail)) { - Tail->Next = H; - H->Prev = Tail; - } - Tail = H; - AllocatedBytes += CommitSize; - if (LargestSize < CommitSize) - LargestSize = CommitSize; - NumberOfAllocs++; - Stats.add(StatAllocated, CommitSize); - Stats.add(StatMapped, H->MapSize); - } - if (BlockEnd) - *BlockEnd = CommitBase + CommitSize; - return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize()); -} - -void MapAllocator::deallocate(void *Ptr) { - LargeBlock::Header *H = LargeBlock::getHeader(Ptr); - { - ScopedLock L(Mutex); - LargeBlock::Header *Prev = H->Prev; - LargeBlock::Header *Next = H->Next; - if (Prev) { - CHECK_EQ(Prev->Next, H); - Prev->Next = Next; - } - if (Next) { - CHECK_EQ(Next->Prev, H); - Next->Prev = Prev; - } - if (UNLIKELY(Tail == H)) { - CHECK(!Next); - Tail = Prev; - } else { - CHECK(Next); - } - const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H); - FreedBytes += CommitSize; - NumberOfFrees++; - Stats.sub(StatAllocated, CommitSize); - Stats.sub(StatMapped, H->MapSize); - } - void *Addr = reinterpret_cast<void *>(H->MapBase); - const uptr Size = H->MapSize; - MapPlatformData Data; - Data = H->Data; - unmap(Addr, Size, UNMAP_ALL, &Data); -} - -void MapAllocator::getStats(ScopedString *Str) const { - Str->append( - "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times " - "(%zuK), remains %zu (%zuK) max %zuM\n", - NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10, - NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10, - LargestSize >> 20); -} - -} // namespace scudo diff --git a/standalone/secondary.h b/standalone/secondary.h index 9d074a57c77..bca783a3b60 100644 --- a/standalone/secondary.h +++ b/standalone/secondary.h @@ -10,6 +10,7 @@ #define SCUDO_SECONDARY_H_ #include "common.h" +#include "list.h" #include "mutex.h" #include "stats.h" #include "string_utils.h" @@ -47,7 +48,7 @@ static Header *getHeader(const void *Ptr) { } // namespace LargeBlock -class MapAllocator { +template <uptr MaxFreeListSize = 32U> class MapAllocator { public: void initLinkerInitialized(GlobalStats *S) { Stats.initLinkerInitialized(); @@ -59,7 +60,8 @@ public: initLinkerInitialized(S); } - void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr); + void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr, + bool ZeroContents = false); void deallocate(void *Ptr); @@ -78,13 +80,17 @@ public: void enable() { Mutex.unlock(); } template <typename F> void iterateOverBlocks(F Callback) const { - for (LargeBlock::Header *H = Tail; H != nullptr; H = H->Prev) - Callback(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize()); + for (const auto &H : InUseBlocks) + Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize()); } + static uptr getMaxFreeListSize(void) { return MaxFreeListSize; } + private: HybridMutex Mutex; - LargeBlock::Header *Tail; + DoublyLinkedList<LargeBlock::Header> InUseBlocks; + // The free list is sorted based on the committed size of blocks. + DoublyLinkedList<LargeBlock::Header> FreeBlocks; uptr AllocatedBytes; uptr FreedBytes; uptr LargestSize; @@ -93,6 +99,158 @@ private: LocalStats Stats; }; +// As with the Primary, the size passed to this function includes any desired +// alignment, so that the frontend can align the user allocation. The hint +// parameter allows us to unmap spurious memory when dealing with larger +// (greater than a page) alignments on 32-bit platforms. +// Due to the sparsity of address space available on those platforms, requesting +// an allocation from the Secondary with a large alignment would end up wasting +// VA space (even though we are not committing the whole thing), hence the need +// to trim off some of the reserved space. +// For allocations requested with an alignment greater than or equal to a page, +// the committed memory will amount to something close to Size - AlignmentHint +// (pending rounding and headers). +template <uptr MaxFreeListSize> +void *MapAllocator<MaxFreeListSize>::allocate(uptr Size, uptr AlignmentHint, + uptr *BlockEnd, + bool ZeroContents) { + DCHECK_GT(Size, AlignmentHint); + const uptr PageSize = getPageSizeCached(); + const uptr RoundedSize = + roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize); + + if (MaxFreeListSize && AlignmentHint < PageSize) { + ScopedLock L(Mutex); + for (auto &H : FreeBlocks) { + const uptr FreeBlockSize = H.BlockEnd - reinterpret_cast<uptr>(&H); + if (FreeBlockSize < RoundedSize) + continue; + // Candidate free block should only be at most 4 pages larger. + if (FreeBlockSize > RoundedSize + 4 * PageSize) + break; + FreeBlocks.remove(&H); + InUseBlocks.push_back(&H); + AllocatedBytes += FreeBlockSize; + NumberOfAllocs++; + Stats.add(StatAllocated, FreeBlockSize); + if (BlockEnd) + *BlockEnd = H.BlockEnd; + void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(&H) + + LargeBlock::getHeaderSize()); + if (ZeroContents) + memset(Ptr, 0, H.BlockEnd - reinterpret_cast<uptr>(Ptr)); + return Ptr; + } + } + + MapPlatformData Data = {}; + const uptr MapSize = RoundedSize + 2 * PageSize; + uptr MapBase = + reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary", + MAP_NOACCESS | MAP_ALLOWNOMEM, &Data)); + if (UNLIKELY(!MapBase)) + return nullptr; + uptr CommitBase = MapBase + PageSize; + uptr MapEnd = MapBase + MapSize; + + // In the unlikely event of alignments larger than a page, adjust the amount + // of memory we want to commit, and trim the extra memory. + if (UNLIKELY(AlignmentHint >= PageSize)) { + // For alignments greater than or equal to a page, the user pointer (eg: the + // pointer that is returned by the C or C++ allocation APIs) ends up on a + // page boundary , and our headers will live in the preceding page. + CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize; + const uptr NewMapBase = CommitBase - PageSize; + DCHECK_GE(NewMapBase, MapBase); + // We only trim the extra memory on 32-bit platforms: 64-bit platforms + // are less constrained memory wise, and that saves us two syscalls. + if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) { + unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data); + MapBase = NewMapBase; + } + const uptr NewMapEnd = CommitBase + PageSize + + roundUpTo((Size - AlignmentHint), PageSize) + + PageSize; + DCHECK_LE(NewMapEnd, MapEnd); + if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) { + unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data); + MapEnd = NewMapEnd; + } + } + + const uptr CommitSize = MapEnd - PageSize - CommitBase; + const uptr Ptr = + reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase), + CommitSize, "scudo:secondary", 0, &Data)); + LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr); + H->MapBase = MapBase; + H->MapSize = MapEnd - MapBase; + H->BlockEnd = CommitBase + CommitSize; + H->Data = Data; + { + ScopedLock L(Mutex); + InUseBlocks.push_back(H); + AllocatedBytes += CommitSize; + if (LargestSize < CommitSize) + LargestSize = CommitSize; + NumberOfAllocs++; + Stats.add(StatAllocated, CommitSize); + Stats.add(StatMapped, H->MapSize); + } + if (BlockEnd) + *BlockEnd = CommitBase + CommitSize; + return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize()); +} + +template <uptr MaxFreeListSize> +void MapAllocator<MaxFreeListSize>::deallocate(void *Ptr) { + LargeBlock::Header *H = LargeBlock::getHeader(Ptr); + { + ScopedLock L(Mutex); + InUseBlocks.remove(H); + const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H); + FreedBytes += CommitSize; + NumberOfFrees++; + Stats.sub(StatAllocated, CommitSize); + if (MaxFreeListSize && FreeBlocks.size() < MaxFreeListSize) { + bool Inserted = false; + for (auto &F : FreeBlocks) { + const uptr FreeBlockSize = F.BlockEnd - reinterpret_cast<uptr>(&F); + if (FreeBlockSize >= CommitSize) { + FreeBlocks.insert(H, &F); + Inserted = true; + break; + } + } + if (!Inserted) + FreeBlocks.push_back(H); + const uptr RoundedAllocationStart = + roundUpTo(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize(), + getPageSizeCached()); + MapPlatformData Data = H->Data; + // TODO(kostyak): use release_to_os_interval_ms + releasePagesToOS(H->MapBase, RoundedAllocationStart - H->MapBase, + H->BlockEnd - RoundedAllocationStart, &Data); + return; + } + Stats.sub(StatMapped, H->MapSize); + } + void *Addr = reinterpret_cast<void *>(H->MapBase); + const uptr Size = H->MapSize; + MapPlatformData Data = H->Data; + unmap(Addr, Size, UNMAP_ALL, &Data); +} + +template <uptr MaxFreeListSize> +void MapAllocator<MaxFreeListSize>::getStats(ScopedString *Str) const { + Str->append( + "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times " + "(%zuK), remains %zu (%zuK) max %zuM\n", + NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10, + NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10, + LargestSize >> 20); +} + } // namespace scudo #endif // SCUDO_SECONDARY_H_ diff --git a/standalone/stats.h b/standalone/stats.h index 16ef5b89b85..294b891d7bb 100644 --- a/standalone/stats.h +++ b/standalone/stats.h @@ -10,6 +10,7 @@ #define SCUDO_STATS_H_ #include "atomic_helpers.h" +#include "list.h" #include "mutex.h" #include <string.h> @@ -45,20 +46,17 @@ public: uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); } -private: - friend class GlobalStats; - atomic_uptr StatsArray[StatCount]; LocalStats *Next; LocalStats *Prev; + +private: + atomic_uptr StatsArray[StatCount]; }; // Global stats, used for aggregation and querying. class GlobalStats : public LocalStats { public: - void initLinkerInitialized() { - Next = this; - Prev = this; - } + void initLinkerInitialized() {} void init() { memset(this, 0, sizeof(*this)); initLinkerInitialized(); @@ -66,30 +64,23 @@ public: void link(LocalStats *S) { ScopedLock L(Mutex); - S->Next = Next; - S->Prev = this; - Next->Prev = S; - Next = S; + StatsList.push_back(S); } void unlink(LocalStats *S) { ScopedLock L(Mutex); - S->Prev->Next = S->Next; - S->Next->Prev = S->Prev; + StatsList.remove(S); for (uptr I = 0; I < StatCount; I++) add(static_cast<StatType>(I), S->get(static_cast<StatType>(I))); } void get(uptr *S) const { - memset(S, 0, StatCount * sizeof(uptr)); ScopedLock L(Mutex); - const LocalStats *Stats = this; - for (;;) { + for (uptr I = 0; I < StatCount; I++) + S[I] = LocalStats::get(static_cast<StatType>(I)); + for (const auto &Stats : StatsList) { for (uptr I = 0; I < StatCount; I++) - S[I] += Stats->get(static_cast<StatType>(I)); - Stats = Stats->Next; - if (Stats == this) - break; + S[I] += Stats.get(static_cast<StatType>(I)); } // All stats must be non-negative. for (uptr I = 0; I < StatCount; I++) @@ -98,6 +89,7 @@ public: private: mutable HybridMutex Mutex; + DoublyLinkedList<LocalStats> StatsList; }; } // namespace scudo diff --git a/standalone/tests/combined_test.cpp b/standalone/tests/combined_test.cpp index d74c07e8458..d32ea89e0ea 100644 --- a/standalone/tests/combined_test.cpp +++ b/standalone/tests/combined_test.cpp @@ -65,6 +65,20 @@ template <class Config> static void testAllocator() { } Allocator->releaseToOS(); + // Ensure that specifying ZeroContents returns a zero'd out block. + for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) { + for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) { + const scudo::uptr Size = (1U << SizeLog) + Delta * 128U; + void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true); + EXPECT_NE(P, nullptr); + for (scudo::uptr I = 0; I < Size; I++) + EXPECT_EQ((reinterpret_cast<char *>(P))[I], 0); + memset(P, 0xaa, Size); + Allocator->deallocate(P, Origin, Size); + } + } + Allocator->releaseToOS(); + // Verify that a chunk will end up being reused, at some point. const scudo::uptr NeedleSize = 1024U; void *NeedleP = Allocator->allocate(NeedleSize, Origin); @@ -223,6 +237,7 @@ struct DeathConfig { // Tiny allocator, its Primary only serves chunks of 1024 bytes. using DeathSizeClassMap = scudo::SizeClassMap<1U, 10U, 10U, 10U, 1U, 10U>; typedef scudo::SizeClassAllocator32<DeathSizeClassMap, 18U> Primary; + typedef scudo::MapAllocator<0U> Secondary; template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U>; }; diff --git a/standalone/tests/list_test.cpp b/standalone/tests/list_test.cpp index ce6cadde123..0a0c050c98c 100644 --- a/standalone/tests/list_test.cpp +++ b/standalone/tests/list_test.cpp @@ -11,24 +11,34 @@ struct ListItem { ListItem *Next; + ListItem *Prev; }; -typedef scudo::IntrusiveList<ListItem> List; +static ListItem Items[6]; +static ListItem *X = &Items[0]; +static ListItem *Y = &Items[1]; +static ListItem *Z = &Items[2]; +static ListItem *A = &Items[3]; +static ListItem *B = &Items[4]; +static ListItem *C = &Items[5]; -static List StaticList; +typedef scudo::SinglyLinkedList<ListItem> SLList; +typedef scudo::DoublyLinkedList<ListItem> DLList; -static void setList(List *L, ListItem *X = nullptr, ListItem *Y = nullptr, - ListItem *Z = nullptr) { +template <typename ListT> +static void setList(ListT *L, ListItem *I1 = nullptr, ListItem *I2 = nullptr, + ListItem *I3 = nullptr) { L->clear(); - if (X) - L->push_back(X); - if (Y) - L->push_back(Y); - if (Z) - L->push_back(Z); + if (I1) + L->push_back(I1); + if (I2) + L->push_back(I2); + if (I3) + L->push_back(I3); } -static void checkList(List *L, ListItem *I1, ListItem *I2 = nullptr, +template <typename ListT> +static void checkList(ListT *L, ListItem *I1, ListItem *I2 = nullptr, ListItem *I3 = nullptr, ListItem *I4 = nullptr, ListItem *I5 = nullptr, ListItem *I6 = nullptr) { if (I1) { @@ -58,20 +68,10 @@ static void checkList(List *L, ListItem *I1, ListItem *I2 = nullptr, EXPECT_TRUE(L->empty()); } -TEST(ScudoListTest, IntrusiveList) { - ListItem Items[6]; - EXPECT_EQ(StaticList.size(), 0U); - - List L; +template <typename ListT> static void testListCommon(void) { + ListT L; L.clear(); - ListItem *X = &Items[0]; - ListItem *Y = &Items[1]; - ListItem *Z = &Items[2]; - ListItem *A = &Items[3]; - ListItem *B = &Items[4]; - ListItem *C = &Items[5]; - EXPECT_EQ(L.size(), 0U); L.push_back(X); EXPECT_EQ(L.size(), 1U); @@ -122,6 +122,16 @@ TEST(ScudoListTest, IntrusiveList) { L.pop_front(); EXPECT_TRUE(L.empty()); L.checkConsistency(); +} + +TEST(ScudoListTest, LinkedListCommon) { + testListCommon<SLList>(); + testListCommon<DLList>(); +} + +TEST(ScudoListTest, SinglyLinkedList) { + SLList L; + L.clear(); L.push_back(X); L.push_back(Y); @@ -139,14 +149,10 @@ TEST(ScudoListTest, IntrusiveList) { L.pop_front(); EXPECT_TRUE(L.empty()); - List L1, L2; + SLList L1, L2; L1.clear(); L2.clear(); - L1.append_front(&L2); - EXPECT_TRUE(L1.empty()); - EXPECT_TRUE(L2.empty()); - L1.append_back(&L2); EXPECT_TRUE(L1.empty()); EXPECT_TRUE(L2.empty()); @@ -160,26 +166,46 @@ TEST(ScudoListTest, IntrusiveList) { checkList(&L1, X, Y, Z, A, B, C); EXPECT_TRUE(L2.empty()); - setList(&L1, X, Y); - setList(&L2); - L1.append_front(&L2); - checkList(&L1, X, Y); - EXPECT_TRUE(L2.empty()); + L1.clear(); + L2.clear(); + L1.push_back(X); + L1.append_back(&L2); + EXPECT_EQ(L1.back(), X); + EXPECT_EQ(L1.front(), X); + EXPECT_EQ(L1.size(), 1U); } -TEST(ScudoListTest, IntrusiveListAppendEmpty) { - ListItem I; - List L; +TEST(ScudoListTest, DoublyLinkedList) { + DLList L; L.clear(); - L.push_back(&I); - List L2; - L2.clear(); - L.append_back(&L2); - EXPECT_EQ(L.back(), &I); - EXPECT_EQ(L.front(), &I); + + L.push_back(X); + L.push_back(Y); + L.push_back(Z); + L.remove(Y); + EXPECT_EQ(L.size(), 2U); + EXPECT_EQ(L.front(), X); + EXPECT_EQ(L.back(), Z); + L.checkConsistency(); + L.remove(Z); EXPECT_EQ(L.size(), 1U); - L.append_front(&L2); - EXPECT_EQ(L.back(), &I); - EXPECT_EQ(L.front(), &I); + EXPECT_EQ(L.front(), X); + EXPECT_EQ(L.back(), X); + L.checkConsistency(); + L.pop_front(); + EXPECT_TRUE(L.empty()); + + L.push_back(X); + L.insert(Y, X); + EXPECT_EQ(L.size(), 2U); + EXPECT_EQ(L.front(), Y); + EXPECT_EQ(L.back(), X); + L.checkConsistency(); + L.remove(Y); EXPECT_EQ(L.size(), 1U); + EXPECT_EQ(L.front(), X); + EXPECT_EQ(L.back(), X); + L.checkConsistency(); + L.pop_front(); + EXPECT_TRUE(L.empty()); } diff --git a/standalone/tests/release_test.cpp b/standalone/tests/release_test.cpp index 1de7fed1c4c..3776768e9a8 100644 --- a/standalone/tests/release_test.cpp +++ b/standalone/tests/release_test.cpp @@ -173,7 +173,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() { std::shuffle(FreeArray.begin(), FreeArray.end(), R); // Build the FreeList from the FreeArray. - scudo::IntrusiveList<Batch> FreeList; + scudo::SinglyLinkedList<Batch> FreeList; FreeList.clear(); Batch *CurrentBatch = nullptr; for (auto const &Block : FreeArray) { @@ -189,7 +189,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() { // Release the memory. ReleasedPagesRecorder Recorder; - releaseFreeMemoryToOS(&FreeList, 0, AllocatedPagesCount, BlockSize, + releaseFreeMemoryToOS(FreeList, 0, AllocatedPagesCount, BlockSize, &Recorder); // Verify that there are no released pages touched by used chunks and all diff --git a/standalone/tests/secondary_test.cpp b/standalone/tests/secondary_test.cpp index b602b8d63e3..be620a6937c 100644 --- a/standalone/tests/secondary_test.cpp +++ b/standalone/tests/secondary_test.cpp @@ -16,18 +16,20 @@ #include <mutex> #include <thread> -TEST(ScudoSecondaryTest, SecondaryBasic) { +template <class SecondaryT> static void testSecondaryBasic(void) { scudo::GlobalStats S; S.init(); - scudo::MapAllocator *L = new scudo::MapAllocator; + SecondaryT *L = new SecondaryT; L->init(&S); const scudo::uptr Size = 1U << 16; void *P = L->allocate(Size); EXPECT_NE(P, nullptr); memset(P, 'A', Size); - EXPECT_GE(scudo::MapAllocator::getBlockSize(P), Size); + EXPECT_GE(SecondaryT::getBlockSize(P), Size); L->deallocate(P); - EXPECT_DEATH(memset(P, 'A', Size), ""); + // If we are not using a free list, blocks are unmapped on deallocation. + if (SecondaryT::getMaxFreeListSize() == 0U) + EXPECT_DEATH(memset(P, 'A', Size), ""); const scudo::uptr Align = 1U << 16; P = L->allocate(Size + Align, Align); @@ -50,13 +52,21 @@ TEST(ScudoSecondaryTest, SecondaryBasic) { Str.output(); } +TEST(ScudoSecondaryTest, SecondaryBasic) { + testSecondaryBasic<scudo::MapAllocator<>>(); + testSecondaryBasic<scudo::MapAllocator<0U>>(); + testSecondaryBasic<scudo::MapAllocator<64U>>(); +} + +using LargeAllocator = scudo::MapAllocator<>; + // This exercises a variety of combinations of size and alignment for the // MapAllocator. The size computation done here mimic the ones done by the // combined allocator. TEST(ScudoSecondaryTest, SecondaryCombinations) { constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16); constexpr scudo::uptr HeaderSize = scudo::roundUpTo(8, MinAlign); - scudo::MapAllocator *L = new scudo::MapAllocator; + LargeAllocator *L = new LargeAllocator; L->init(nullptr); for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) { for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16; @@ -84,7 +94,7 @@ TEST(ScudoSecondaryTest, SecondaryCombinations) { } TEST(ScudoSecondaryTest, SecondaryIterate) { - scudo::MapAllocator *L = new scudo::MapAllocator; + LargeAllocator *L = new LargeAllocator; L->init(nullptr); std::vector<void *> V; const scudo::uptr PageSize = scudo::getPageSizeCached(); @@ -110,7 +120,7 @@ static std::mutex Mutex; static std::condition_variable Cv; static bool Ready = false; -static void performAllocations(scudo::MapAllocator *L) { +static void performAllocations(LargeAllocator *L) { std::vector<void *> V; const scudo::uptr PageSize = scudo::getPageSizeCached(); { @@ -127,7 +137,7 @@ static void performAllocations(scudo::MapAllocator *L) { } TEST(ScudoSecondaryTest, SecondaryThreadsRace) { - scudo::MapAllocator *L = new scudo::MapAllocator; + LargeAllocator *L = new LargeAllocator; L->init(nullptr); std::thread Threads[10]; for (scudo::uptr I = 0; I < 10U; I++) |