summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-10-19 03:41:58 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2022-10-19 03:41:58 +0000
commit6d144995e9cbfd3e96897dfc0a74305a3964abd5 (patch)
tree282febf4b655b7a72247e0ac83e88fcb5183baed
parent97d77d57721f6773a300022b631e40fec8852599 (diff)
parentf548c9d070ec2dad79c1819389802823d65ba008 (diff)
downloadscudo-gki13-boot-release.tar.gz
Snap for 9192524 from f548c9d070ec2dad79c1819389802823d65ba008 to gki13-boot-releasegki13-boot-release
Change-Id: Iaa0744d9af072f6084e899458b722517d70363bf
-rw-r--r--OWNERS2
-rw-r--r--standalone/local_cache.h28
-rw-r--r--standalone/primary32.h7
-rw-r--r--standalone/primary64.h7
-rw-r--r--standalone/release.cpp4
-rw-r--r--standalone/release.h303
-rw-r--r--standalone/size_class_map.h29
-rw-r--r--standalone/tests/combined_test.cpp4
-rw-r--r--standalone/tests/primary_test.cpp4
-rw-r--r--standalone/tests/release_test.cpp55
-rw-r--r--standalone/tests/size_class_map_test.cpp4
-rw-r--r--standalone/tools/compute_size_class_config.cpp2
12 files changed, 272 insertions, 177 deletions
diff --git a/OWNERS b/OWNERS
index 45e67e71368..4f31bdec2b6 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,3 +1,3 @@
cferris@google.com
enh@google.com
-kostyak@google.com
+chiahungduan@google.com
diff --git a/standalone/local_cache.h b/standalone/local_cache.h
index f46645f9bad..b36ec8fab98 100644
--- a/standalone/local_cache.h
+++ b/standalone/local_cache.h
@@ -10,6 +10,7 @@
#define SCUDO_LOCAL_CACHE_H_
#include "internal_defs.h"
+#include "platform.h"
#include "report.h"
#include "stats.h"
@@ -20,8 +21,8 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
typedef typename SizeClassAllocator::CompactPtrT CompactPtrT;
struct TransferBatch {
- static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
- void setFromArray(CompactPtrT *Array, u32 N) {
+ static const u16 MaxNumCached = SizeClassMap::MaxNumCachedHint;
+ void setFromArray(CompactPtrT *Array, u16 N) {
DCHECK_LE(N, MaxNumCached);
Count = N;
memcpy(Batch, Array, sizeof(Batch[0]) * Count);
@@ -34,19 +35,19 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
void copyToArray(CompactPtrT *Array) const {
memcpy(Array, Batch, sizeof(Batch[0]) * Count);
}
- u32 getCount() const { return Count; }
- CompactPtrT get(u32 I) const {
+ u16 getCount() const { return Count; }
+ CompactPtrT get(u16 I) const {
DCHECK_LE(I, Count);
return Batch[I];
}
- static u32 getMaxCached(uptr Size) {
+ static u16 getMaxCached(uptr Size) {
return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
}
TransferBatch *Next;
private:
- u32 Count;
CompactPtrT Batch[MaxNumCached];
+ u16 Count;
};
void init(GlobalStats *S, SizeClassAllocator *A) {
@@ -128,9 +129,9 @@ template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
private:
static const uptr NumClasses = SizeClassMap::NumClasses;
static const uptr BatchClassId = SizeClassMap::BatchClassId;
- struct PerClass {
- u32 Count;
- u32 MaxCount;
+ struct alignas(SCUDO_CACHE_LINE_SIZE) PerClass {
+ u16 Count;
+ u16 MaxCount;
// Note: ClassSize is zero for the transfer batch.
uptr ClassSize;
CompactPtrT Chunks[2 * TransferBatch::MaxNumCached];
@@ -150,7 +151,7 @@ private:
for (uptr I = 0; I < NumClasses; I++) {
PerClass *P = &PerClassArray[I];
const uptr Size = SizeClassAllocator::getSizeByClassId(I);
- P->MaxCount = 2 * TransferBatch::getMaxCached(Size);
+ P->MaxCount = static_cast<u16>(2 * TransferBatch::getMaxCached(Size));
if (I != BatchClassId) {
P->ClassSize = Size;
} else {
@@ -180,14 +181,15 @@ private:
}
NOINLINE void drain(PerClass *C, uptr ClassId) {
- const u32 Count = Min(C->MaxCount / 2, C->Count);
+ const u16 Count = Min(static_cast<u16>(C->MaxCount / 2), C->Count);
TransferBatch *B =
createBatch(ClassId, Allocator->decompactPtr(ClassId, C->Chunks[0]));
if (UNLIKELY(!B))
reportOutOfMemory(SizeClassAllocator::getSizeByClassId(BatchClassId));
B->setFromArray(&C->Chunks[0], Count);
- C->Count -= Count;
- for (uptr I = 0; I < C->Count; I++)
+ // u16 will be promoted to int by arithmetic type conversion.
+ C->Count = static_cast<u16>(C->Count - Count);
+ for (u16 I = 0; I < C->Count; I++)
C->Chunks[I] = C->Chunks[I + Count];
Allocator->pushBatch(ClassId, B);
}
diff --git a/standalone/primary32.h b/standalone/primary32.h
index 326c10a32a8..d9905a05717 100644
--- a/standalone/primary32.h
+++ b/standalone/primary32.h
@@ -351,7 +351,7 @@ private:
}
const uptr Size = getSizeByClassId(ClassId);
- const u32 MaxCount = TransferBatch::getMaxCached(Size);
+ const u16 MaxCount = TransferBatch::getMaxCached(Size);
DCHECK_GT(MaxCount, 0U);
// The maximum number of blocks we should carve in the region is dictated
// by the maximum number of batches we want to fill, and the amount of
@@ -382,7 +382,8 @@ private:
C->createBatch(ClassId, reinterpret_cast<void *>(ShuffleArray[I]));
if (UNLIKELY(!B))
return nullptr;
- const u32 N = Min(MaxCount, NumberOfBlocks - I);
+ // `MaxCount` is u16 so the result will also fit in u16.
+ const u16 N = static_cast<u16>(Min<u32>(MaxCount, NumberOfBlocks - I));
B->setFromArray(&ShuffleArray[I], N);
Sci->FreeList.push_back(B);
I += N;
@@ -477,7 +478,7 @@ private:
return reinterpret_cast<uptr>(CompactPtr);
};
releaseFreeMemoryToOS(Sci->FreeList, RegionSize, NumberOfRegions, BlockSize,
- &Recorder, DecompactPtr, SkipRegion);
+ Recorder, DecompactPtr, SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
diff --git a/standalone/primary64.h b/standalone/primary64.h
index 14784ee8f37..d52a1d9a6c4 100644
--- a/standalone/primary64.h
+++ b/standalone/primary64.h
@@ -333,7 +333,7 @@ private:
NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
RegionInfo *Region) {
const uptr Size = getSizeByClassId(ClassId);
- const u32 MaxCount = TransferBatch::getMaxCached(Size);
+ const u16 MaxCount = TransferBatch::getMaxCached(Size);
const uptr RegionBeg = Region->RegionBeg;
const uptr MappedUser = Region->MappedUser;
@@ -392,7 +392,8 @@ private:
CompactPtrBase, ShuffleArray[I])));
if (UNLIKELY(!B))
return nullptr;
- const u32 N = Min(MaxCount, NumberOfBlocks - I);
+ // `MaxCount` is u16 so the result will also fit in u16.
+ const u16 N = static_cast<u16>(Min<u32>(MaxCount, NumberOfBlocks - I));
B->setFromArray(&ShuffleArray[I], N);
Region->FreeList.push_back(B);
I += N;
@@ -473,7 +474,7 @@ private:
};
auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
releaseFreeMemoryToOS(Region->FreeList, Region->AllocatedUser, 1U,
- BlockSize, &Recorder, DecompactPtr, SkipRegion);
+ BlockSize, Recorder, DecompactPtr, SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Region->ReleaseInfo.PushedBlocksAtLastRelease =
diff --git a/standalone/release.cpp b/standalone/release.cpp
index 5d7c6c5fc11..3f40dbec6d7 100644
--- a/standalone/release.cpp
+++ b/standalone/release.cpp
@@ -10,7 +10,7 @@
namespace scudo {
-HybridMutex PackedCounterArray::Mutex = {};
-uptr PackedCounterArray::StaticBuffer[PackedCounterArray::StaticBufferCount];
+HybridMutex RegionPageMap::Mutex = {};
+uptr RegionPageMap::StaticBuffer[RegionPageMap::StaticBufferCount];
} // namespace scudo
diff --git a/standalone/release.h b/standalone/release.h
index 49cc6ae618a..12cc8d2c717 100644
--- a/standalone/release.h
+++ b/standalone/release.h
@@ -41,22 +41,49 @@ private:
MapPlatformData *Data = nullptr;
};
-// A packed array of Counters. Each counter occupies 2^N bits, enough to store
-// counter's MaxValue. Ctor will try to use a static buffer first, and if that
-// fails (the buffer is too small or already locked), will allocate the
+// A Region page map is used to record the usage of pages in the regions. It
+// implements a packed array of Counters. Each counter occupies 2^N bits, enough
+// to store counter's MaxValue. Ctor will try to use a static buffer first, and
+// if that fails (the buffer is too small or already locked), will allocate the
// required Buffer via map(). The caller is expected to check whether the
// initialization was successful by checking isAllocated() result. For
// performance sake, none of the accessors check the validity of the arguments,
// It is assumed that Index is always in [0, N) range and the value is not
// incremented past MaxValue.
-class PackedCounterArray {
+class RegionPageMap {
public:
- PackedCounterArray(uptr NumberOfRegions, uptr CountersPerRegion,
- uptr MaxValue)
- : Regions(NumberOfRegions), NumCounters(CountersPerRegion) {
- DCHECK_GT(Regions, 0);
- DCHECK_GT(NumCounters, 0);
+ RegionPageMap()
+ : Regions(0),
+ NumCounters(0),
+ CounterSizeBitsLog(0),
+ CounterMask(0),
+ PackingRatioLog(0),
+ BitOffsetMask(0),
+ SizePerRegion(0),
+ BufferSize(0),
+ Buffer(nullptr) {}
+ RegionPageMap(uptr NumberOfRegions, uptr CountersPerRegion, uptr MaxValue) {
+ reset(NumberOfRegions, CountersPerRegion, MaxValue);
+ }
+ ~RegionPageMap() {
+ if (!isAllocated())
+ return;
+ if (Buffer == &StaticBuffer[0])
+ Mutex.unlock();
+ else
+ unmap(reinterpret_cast<void *>(Buffer),
+ roundUpTo(BufferSize, getPageSizeCached()));
+ Buffer = nullptr;
+ }
+
+ void reset(uptr NumberOfRegion, uptr CountersPerRegion, uptr MaxValue) {
+ DCHECK_GT(NumberOfRegion, 0);
+ DCHECK_GT(CountersPerRegion, 0);
DCHECK_GT(MaxValue, 0);
+
+ Regions = NumberOfRegion;
+ NumCounters = CountersPerRegion;
+
constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
// Rounding counter storage size up to the power of two allows for using
// bit shifts calculating particular counter's Index and offset.
@@ -85,15 +112,6 @@ public:
"scudo:counters", MAP_ALLOWNOMEM, &MapData));
}
}
- ~PackedCounterArray() {
- if (!isAllocated())
- return;
- if (Buffer == &StaticBuffer[0])
- Mutex.unlock();
- else
- unmap(reinterpret_cast<void *>(Buffer),
- roundUpTo(BufferSize, getPageSizeCached()), 0, &MapData);
- }
bool isAllocated() const { return !!Buffer; }
@@ -112,6 +130,7 @@ public:
const uptr Index = I >> PackingRatioLog;
const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ DCHECK_EQ(isAllCounted(Region, I), false);
Buffer[Region * SizePerRegion + Index] += static_cast<uptr>(1U)
<< BitOffset;
}
@@ -123,13 +142,28 @@ public:
inc(Region, I);
}
+ // Set the counter to the max value. Note that the max number of blocks in a
+ // page may vary. To provide an easier way to tell if all the blocks are
+ // counted for different pages, set to the same max value to denote the
+ // all-counted status.
+ void setAsAllCounted(uptr Region, uptr I) const {
+ DCHECK_LE(get(Region, I), CounterMask);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ Buffer[Region * SizePerRegion + Index] |= CounterMask << BitOffset;
+ }
+ bool isAllCounted(uptr Region, uptr I) const {
+ return get(Region, I) == CounterMask;
+ }
+
uptr getBufferSize() const { return BufferSize; }
static const uptr StaticBufferCount = 2048U;
private:
- const uptr Regions;
- const uptr NumCounters;
+ uptr Regions;
+ uptr NumCounters;
uptr CounterSizeBitsLog;
uptr CounterMask;
uptr PackingRatioLog;
@@ -146,11 +180,11 @@ private:
template <class ReleaseRecorderT> class FreePagesRangeTracker {
public:
- explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
+ explicit FreePagesRangeTracker(ReleaseRecorderT &Recorder)
: Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
- void processNextPage(bool Freed) {
- if (Freed) {
+ void processNextPage(bool Released) {
+ if (Released) {
if (!InRange) {
CurrentRangeStatePage = CurrentPage;
InRange = true;
@@ -171,113 +205,138 @@ public:
private:
void closeOpenedRange() {
if (InRange) {
- Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
- (CurrentPage << PageSizeLog));
+ Recorder.releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
+ (CurrentPage << PageSizeLog));
InRange = false;
}
}
- ReleaseRecorderT *const Recorder;
+ ReleaseRecorderT &Recorder;
const uptr PageSizeLog;
bool InRange = false;
uptr CurrentPage = 0;
uptr CurrentRangeStatePage = 0;
};
-template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
- typename SkipRegionT>
-NOINLINE void
-releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
- uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
- ReleaseRecorderT *Recorder, DecompactPtrT DecompactPtr,
- SkipRegionT SkipRegion) {
- const uptr PageSize = getPageSizeCached();
-
- // Figure out the number of chunks per page and whether we can take a fast
- // path (the number of chunks per page is the same for all pages).
- uptr FullPagesBlockCountMax;
- bool SameBlockCountPerPage;
- if (BlockSize <= PageSize) {
- if (PageSize % BlockSize == 0) {
- // Same number of chunks per page, no cross overs.
- FullPagesBlockCountMax = PageSize / BlockSize;
- SameBlockCountPerPage = true;
- } else if (BlockSize % (PageSize % BlockSize) == 0) {
- // Some chunks are crossing page boundaries, which means that the page
- // contains one or two partial chunks, but all pages contain the same
- // number of chunks.
- FullPagesBlockCountMax = PageSize / BlockSize + 1;
- SameBlockCountPerPage = true;
- } else {
- // Some chunks are crossing page boundaries, which means that the page
- // contains one or two partial chunks.
- FullPagesBlockCountMax = PageSize / BlockSize + 2;
- SameBlockCountPerPage = false;
- }
- } else {
- if (BlockSize % PageSize == 0) {
- // One chunk covers multiple pages, no cross overs.
- FullPagesBlockCountMax = 1;
- SameBlockCountPerPage = true;
+struct PageReleaseContext {
+ PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions) :
+ BlockSize(BlockSize),
+ RegionSize(RegionSize),
+ NumberOfRegions(NumberOfRegions) {
+ PageSize = getPageSizeCached();
+ if (BlockSize <= PageSize) {
+ if (PageSize % BlockSize == 0) {
+ // Same number of chunks per page, no cross overs.
+ FullPagesBlockCountMax = PageSize / BlockSize;
+ SameBlockCountPerPage = true;
+ } else if (BlockSize % (PageSize % BlockSize) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 2;
+ SameBlockCountPerPage = false;
+ }
} else {
- // One chunk covers multiple pages, Some chunks are crossing page
- // boundaries. Some pages contain one chunk, some contain two.
- FullPagesBlockCountMax = 2;
- SameBlockCountPerPage = false;
+ if (BlockSize % PageSize == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ FullPagesBlockCountMax = 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ FullPagesBlockCountMax = 2;
+ SameBlockCountPerPage = false;
+ }
}
+
+ PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
+ PageSizeLog = getLog2(PageSize);
+ RoundedRegionSize = PagesCount << PageSizeLog;
+ RoundedSize = NumberOfRegions * RoundedRegionSize;
+
+ PageMap.reset(NumberOfRegions, PagesCount, FullPagesBlockCountMax);
+ DCHECK(PageMap.isAllocated());
}
- const uptr PagesCount = roundUpTo(RegionSize, PageSize) / PageSize;
- PackedCounterArray Counters(NumberOfRegions, PagesCount,
- FullPagesBlockCountMax);
- if (!Counters.isAllocated())
- return;
-
- const uptr PageSizeLog = getLog2(PageSize);
- const uptr RoundedRegionSize = PagesCount << PageSizeLog;
- const uptr RoundedSize = NumberOfRegions * RoundedRegionSize;
-
- // Iterate over free chunks and count how many free chunks affect each
- // allocated page.
- if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
- // Each chunk affects one page only.
- for (const auto &It : FreeList) {
- for (u32 I = 0; I < It.getCount(); I++) {
- const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
- if (P >= RoundedSize)
- continue;
- const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
- const uptr PInRegion = P - RegionIndex * RegionSize;
- Counters.inc(RegionIndex, PInRegion >> PageSizeLog);
+ template<class TransferBatchT, typename DecompactPtrT>
+ void markFreeBlocks(const IntrusiveList<TransferBatchT> &FreeList,
+ DecompactPtrT DecompactPtr, uptr Base) {
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
+ // Each chunk affects one page only.
+ for (const auto &It : FreeList) {
+ for (u16 I = 0; I < It.getCount(); I++) {
+ const uptr P = DecompactPtr(It.get(I)) - Base;
+ if (P >= RoundedSize)
+ continue;
+ const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
+ const uptr PInRegion = P - RegionIndex * RegionSize;
+ PageMap.inc(RegionIndex, PInRegion >> PageSizeLog);
+ }
}
- }
- } else {
- // In all other cases chunks might affect more than one page.
- DCHECK_GE(RegionSize, BlockSize);
- const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
- for (const auto &It : FreeList) {
- for (u32 I = 0; I < It.getCount(); I++) {
- const uptr P = DecompactPtr(It.get(I)) - Recorder->getBase();
- if (P >= RoundedSize)
- continue;
- const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
- uptr PInRegion = P - RegionIndex * RegionSize;
- Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
- (PInRegion + BlockSize - 1) >> PageSizeLog);
- // The last block in a region might straddle a page, so if it's
- // free, we mark the following "pretend" memory block(s) as free.
- if (PInRegion == LastBlockInRegion) {
- PInRegion += BlockSize;
- while (PInRegion < RoundedRegionSize) {
- Counters.incRange(RegionIndex, PInRegion >> PageSizeLog,
- (PInRegion + BlockSize - 1) >> PageSizeLog);
+ } else {
+ // In all other cases chunks might affect more than one page.
+ DCHECK_GE(RegionSize, BlockSize);
+ const uptr LastBlockInRegion =
+ ((RegionSize / BlockSize) - 1U) * BlockSize;
+ for (const auto &It : FreeList) {
+ for (u16 I = 0; I < It.getCount(); I++) {
+ const uptr P = DecompactPtr(It.get(I)) - Base;
+ if (P >= RoundedSize)
+ continue;
+ const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
+ uptr PInRegion = P - RegionIndex * RegionSize;
+ PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
+ (PInRegion + BlockSize - 1) >> PageSizeLog);
+ // The last block in a region might straddle a page, so if it's
+ // free, we mark the following "pretend" memory block(s) as free.
+ if (PInRegion == LastBlockInRegion) {
PInRegion += BlockSize;
+ while (PInRegion < RoundedRegionSize) {
+ PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
+ (PInRegion + BlockSize - 1) >> PageSizeLog);
+ PInRegion += BlockSize;
+ }
}
}
}
}
}
+ uptr BlockSize;
+ uptr RegionSize;
+ uptr NumberOfRegions;
+ uptr PageSize;
+ uptr PagesCount;
+ uptr PageSizeLog;
+ uptr RoundedRegionSize;
+ uptr RoundedSize;
+ uptr FullPagesBlockCountMax;
+ bool SameBlockCountPerPage;
+ RegionPageMap PageMap;
+};
+
+// Try to release the page which doesn't have any in-used block, i.e., they are
+// all free blocks. The `PageMap` will record the number of free blocks in each
+// page.
+template <class ReleaseRecorderT, typename SkipRegionT>
+NOINLINE void
+releaseFreeMemoryToOS(PageReleaseContext &Context,
+ ReleaseRecorderT &Recorder, SkipRegionT SkipRegion) {
+ const uptr PageSize = Context.PageSize;
+ const uptr BlockSize = Context.BlockSize;
+ const uptr PagesCount = Context.PagesCount;
+ const uptr NumberOfRegions = Context.NumberOfRegions;
+ const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
+ const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
+ RegionPageMap &PageMap = Context.PageMap;
+
// Iterate over pages detecting ranges of pages with chunk Counters equal
// to the expected number of chunks for the particular page.
FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
@@ -288,9 +347,12 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
RangeTracker.skipPages(PagesCount);
continue;
}
- for (uptr J = 0; J < PagesCount; J++)
- RangeTracker.processNextPage(Counters.get(I, J) ==
- FullPagesBlockCountMax);
+ for (uptr J = 0; J < PagesCount; J++) {
+ const bool CanRelease = PageMap.get(I, J) == FullPagesBlockCountMax;
+ if (CanRelease)
+ PageMap.setAsAllCounted(I, J);
+ RangeTracker.processNextPage(CanRelease);
+ }
}
} else {
// Slow path, go through the pages keeping count how many chunks affect
@@ -322,13 +384,30 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
}
}
PrevPageBoundary = PageBoundary;
- RangeTracker.processNextPage(Counters.get(I, J) == BlocksPerPage);
+ const bool CanRelease = PageMap.get(I, J) == BlocksPerPage;
+ if (CanRelease)
+ PageMap.setAsAllCounted(I, J);
+ RangeTracker.processNextPage(CanRelease);
}
}
}
RangeTracker.finish();
}
+// An overload releaseFreeMemoryToOS which doesn't require the page usage
+// information after releasing.
+template <class TransferBatchT, class ReleaseRecorderT, typename DecompactPtrT,
+ typename SkipRegionT>
+NOINLINE void
+releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList,
+ uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
+ ReleaseRecorderT &Recorder, DecompactPtrT DecompactPtr,
+ SkipRegionT SkipRegion) {
+ PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
+ Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+}
+
} // namespace scudo
#endif // SCUDO_RELEASE_H_
diff --git a/standalone/size_class_map.h b/standalone/size_class_map.h
index 8d5a560dc5b..cc369ff5797 100644
--- a/standalone/size_class_map.h
+++ b/standalone/size_class_map.h
@@ -23,7 +23,7 @@ inline uptr scaledLog2(uptr Size, uptr ZeroLog, uptr LogBits) {
}
template <typename Config> struct SizeClassMapBase {
- static u32 getMaxCachedHint(uptr Size) {
+ static u16 getMaxCachedHint(uptr Size) {
DCHECK_NE(Size, 0);
u32 N;
// Force a 32-bit division if the template parameters allow for it.
@@ -31,7 +31,10 @@ template <typename Config> struct SizeClassMapBase {
N = static_cast<u32>((1UL << Config::MaxBytesCachedLog) / Size);
else
N = (1U << Config::MaxBytesCachedLog) / static_cast<u32>(Size);
- return Max(1U, Min(Config::MaxNumCachedHint, N));
+
+ // Note that Config::MaxNumCachedHint is u16 so the result is guaranteed to
+ // fit in u16.
+ return static_cast<u16>(Max(1U, Min<u32>(Config::MaxNumCachedHint, N)));
}
};
@@ -65,7 +68,7 @@ class FixedSizeClassMap : public SizeClassMapBase<Config> {
static const uptr M = (1UL << S) - 1;
public:
- static const u32 MaxNumCachedHint = Config::MaxNumCachedHint;
+ static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
static const uptr MaxSize = (1UL << Config::MaxSizeLog) + Config::SizeDelta;
static const uptr NumClasses =
@@ -99,7 +102,7 @@ public:
return MidClass + 1 + scaledLog2(Size - 1, Config::MidSizeLog, S);
}
- static u32 getMaxCachedHint(uptr Size) {
+ static u16 getMaxCachedHint(uptr Size) {
DCHECK_LE(Size, MaxSize);
return Base::getMaxCachedHint(Size);
}
@@ -178,7 +181,7 @@ class TableSizeClassMap : public SizeClassMapBase<Config> {
static constexpr LSBTable LTable = {};
public:
- static const u32 MaxNumCachedHint = Config::MaxNumCachedHint;
+ static const u16 MaxNumCachedHint = Config::MaxNumCachedHint;
static const uptr NumClasses = ClassesSize + 1;
static_assert(NumClasses < 256, "");
@@ -212,7 +215,7 @@ public:
return SzTable.Tab[scaledLog2(Size - 1, Config::MidSizeLog, S)];
}
- static u32 getMaxCachedHint(uptr Size) {
+ static u16 getMaxCachedHint(uptr Size) {
DCHECK_LE(Size, MaxSize);
return Base::getMaxCachedHint(Size);
}
@@ -223,7 +226,7 @@ struct DefaultSizeClassConfig {
static const uptr MinSizeLog = 5;
static const uptr MidSizeLog = 8;
static const uptr MaxSizeLog = 17;
- static const u32 MaxNumCachedHint = 14;
+ static const u16 MaxNumCachedHint = 14;
static const uptr MaxBytesCachedLog = 10;
static const uptr SizeDelta = 0;
};
@@ -235,7 +238,7 @@ struct FuchsiaSizeClassConfig {
static const uptr MinSizeLog = 5;
static const uptr MidSizeLog = 8;
static const uptr MaxSizeLog = 17;
- static const u32 MaxNumCachedHint = 12;
+ static const u16 MaxNumCachedHint = 12;
static const uptr MaxBytesCachedLog = 10;
static const uptr SizeDelta = Chunk::getHeaderSize();
};
@@ -248,7 +251,7 @@ struct AndroidSizeClassConfig {
static const uptr MinSizeLog = 4;
static const uptr MidSizeLog = 6;
static const uptr MaxSizeLog = 16;
- static const u32 MaxNumCachedHint = 13;
+ static const u16 MaxNumCachedHint = 13;
static const uptr MaxBytesCachedLog = 13;
static constexpr u32 Classes[] = {
@@ -263,7 +266,7 @@ struct AndroidSizeClassConfig {
static const uptr MinSizeLog = 4;
static const uptr MidSizeLog = 7;
static const uptr MaxSizeLog = 16;
- static const u32 MaxNumCachedHint = 14;
+ static const u16 MaxNumCachedHint = 14;
static const uptr MaxBytesCachedLog = 13;
static constexpr u32 Classes[] = {
@@ -292,7 +295,7 @@ struct SvelteSizeClassConfig {
static const uptr MinSizeLog = 4;
static const uptr MidSizeLog = 8;
static const uptr MaxSizeLog = 14;
- static const u32 MaxNumCachedHint = 13;
+ static const u16 MaxNumCachedHint = 13;
static const uptr MaxBytesCachedLog = 10;
static const uptr SizeDelta = Chunk::getHeaderSize();
#else
@@ -300,7 +303,7 @@ struct SvelteSizeClassConfig {
static const uptr MinSizeLog = 3;
static const uptr MidSizeLog = 7;
static const uptr MaxSizeLog = 14;
- static const u32 MaxNumCachedHint = 14;
+ static const u16 MaxNumCachedHint = 14;
static const uptr MaxBytesCachedLog = 10;
static const uptr SizeDelta = Chunk::getHeaderSize();
#endif
@@ -315,7 +318,7 @@ struct TrustySizeClassConfig {
static const uptr MinSizeLog = 7;
static const uptr MidSizeLog = 7;
static const uptr MaxSizeLog = 7;
- static const u32 MaxNumCachedHint = 8;
+ static const u16 MaxNumCachedHint = 8;
static const uptr MaxBytesCachedLog = 10;
static const uptr SizeDelta = 0;
};
diff --git a/standalone/tests/combined_test.cpp b/standalone/tests/combined_test.cpp
index 94d97df8167..53c0562c152 100644
--- a/standalone/tests/combined_test.cpp
+++ b/standalone/tests/combined_test.cpp
@@ -506,12 +506,12 @@ struct DeathSizeClassConfig {
static const scudo::uptr MinSizeLog = 10;
static const scudo::uptr MidSizeLog = 10;
static const scudo::uptr MaxSizeLog = 13;
- static const scudo::u32 MaxNumCachedHint = 4;
+ static const scudo::u16 MaxNumCachedHint = 4;
static const scudo::uptr MaxBytesCachedLog = 12;
static const scudo::uptr SizeDelta = 0;
};
-static const scudo::uptr DeathRegionSizeLog = 20U;
+static const scudo::uptr DeathRegionSizeLog = 21U;
struct DeathConfig {
static const bool MaySupportMemoryTagging = false;
diff --git a/standalone/tests/primary_test.cpp b/standalone/tests/primary_test.cpp
index 283e2973c1e..b338542bae8 100644
--- a/standalone/tests/primary_test.cpp
+++ b/standalone/tests/primary_test.cpp
@@ -145,7 +145,7 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
struct SmallRegionsConfig {
using SizeClassMap = scudo::DefaultSizeClassMap;
- static const scudo::uptr PrimaryRegionSizeLog = 20U;
+ static const scudo::uptr PrimaryRegionSizeLog = 21U;
static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
static const bool MaySupportMemoryTagging = false;
@@ -176,7 +176,7 @@ TEST(ScudoPrimaryTest, Primary64OOM) {
AllocationFailed = true;
break;
}
- for (scudo::u32 J = 0; J < B->getCount(); J++)
+ for (scudo::u16 J = 0; J < B->getCount(); J++)
memset(Allocator.decompactPtr(ClassId, B->get(J)), 'B', Size);
Batches.push_back(B);
}
diff --git a/standalone/tests/release_test.cpp b/standalone/tests/release_test.cpp
index 04c02891e91..0c3c043d1e0 100644
--- a/standalone/tests/release_test.cpp
+++ b/standalone/tests/release_test.cpp
@@ -18,19 +18,19 @@
#include <random>
#include <set>
-TEST(ScudoReleaseTest, PackedCounterArray) {
+TEST(ScudoReleaseTest, RegionPageMap) {
for (scudo::uptr I = 0; I < SCUDO_WORDSIZE; I++) {
// Various valid counter's max values packed into one word.
- scudo::PackedCounterArray Counters2N(1U, 1U, 1UL << I);
- EXPECT_EQ(sizeof(scudo::uptr), Counters2N.getBufferSize());
+ scudo::RegionPageMap PageMap2N(1U, 1U, 1UL << I);
+ EXPECT_EQ(sizeof(scudo::uptr), PageMap2N.getBufferSize());
// Check the "all bit set" values too.
- scudo::PackedCounterArray Counters2N1_1(1U, 1U, ~0UL >> I);
- EXPECT_EQ(sizeof(scudo::uptr), Counters2N1_1.getBufferSize());
+ scudo::RegionPageMap PageMap2N1_1(1U, 1U, ~0UL >> I);
+ EXPECT_EQ(sizeof(scudo::uptr), PageMap2N1_1.getBufferSize());
// Verify the packing ratio, the counter is Expected to be packed into the
// closest power of 2 bits.
- scudo::PackedCounterArray Counters(1U, SCUDO_WORDSIZE, 1UL << I);
+ scudo::RegionPageMap PageMap(1U, SCUDO_WORDSIZE, 1UL << I);
EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpToPowerOfTwo(I + 1),
- Counters.getBufferSize());
+ PageMap.getBufferSize());
}
// Go through 1, 2, 4, 8, .. {32,64} bits per counter.
@@ -38,20 +38,20 @@ TEST(ScudoReleaseTest, PackedCounterArray) {
// Make sure counters request one memory page for the buffer.
const scudo::uptr NumCounters =
(scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
- scudo::PackedCounterArray Counters(1U, NumCounters,
+ scudo::RegionPageMap PageMap(1U, NumCounters,
1UL << ((1UL << I) - 1));
- Counters.inc(0U, 0U);
+ PageMap.inc(0U, 0U);
for (scudo::uptr C = 1; C < NumCounters - 1; C++) {
- EXPECT_EQ(0UL, Counters.get(0U, C));
- Counters.inc(0U, C);
- EXPECT_EQ(1UL, Counters.get(0U, C - 1));
+ EXPECT_EQ(0UL, PageMap.get(0U, C));
+ PageMap.inc(0U, C);
+ EXPECT_EQ(1UL, PageMap.get(0U, C - 1));
}
- EXPECT_EQ(0UL, Counters.get(0U, NumCounters - 1));
- Counters.inc(0U, NumCounters - 1);
+ EXPECT_EQ(0UL, PageMap.get(0U, NumCounters - 1));
+ PageMap.inc(0U, NumCounters - 1);
if (I > 0) {
- Counters.incRange(0u, 0U, NumCounters - 1);
+ PageMap.incRange(0u, 0U, NumCounters - 1);
for (scudo::uptr C = 0; C < NumCounters; C++)
- EXPECT_EQ(2UL, Counters.get(0U, C));
+ EXPECT_EQ(2UL, PageMap.get(0U, C));
}
}
}
@@ -102,7 +102,7 @@ TEST(ScudoReleaseTest, FreePagesRangeTracker) {
for (auto TestCase : TestCases) {
StringRangeRecorder Recorder;
- RangeTracker Tracker(&Recorder);
+ RangeTracker Tracker(Recorder);
for (scudo::uptr I = 0; TestCase[I] != 0; I++)
Tracker.processNextPage(TestCase[I] == 'x');
Tracker.finish();
@@ -130,28 +130,29 @@ public:
// Simplified version of a TransferBatch.
template <class SizeClassMap> struct FreeBatch {
- static const scudo::u32 MaxCount = SizeClassMap::MaxNumCachedHint;
+ static const scudo::u16 MaxCount = SizeClassMap::MaxNumCachedHint;
void clear() { Count = 0; }
void add(scudo::uptr P) {
DCHECK_LT(Count, MaxCount);
Batch[Count++] = P;
}
- scudo::u32 getCount() const { return Count; }
- scudo::uptr get(scudo::u32 I) const {
+ scudo::u16 getCount() const { return Count; }
+ scudo::uptr get(scudo::u16 I) const {
DCHECK_LE(I, Count);
return Batch[I];
}
FreeBatch *Next;
private:
- scudo::u32 Count;
scudo::uptr Batch[MaxCount];
+ scudo::u16 Count;
};
template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
typedef FreeBatch<SizeClassMap> Batch;
const scudo::uptr PagesCount = 1024;
const scudo::uptr PageSize = scudo::getPageSizeCached();
+ const scudo::uptr PageSizeLog = scudo::getLog2(PageSize);
std::mt19937 R;
scudo::u32 RandState = 42;
@@ -195,8 +196,12 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
auto DecompactPtr = [](scudo::uptr P) { return P; };
ReleasedPagesRecorder Recorder;
- releaseFreeMemoryToOS(FreeList, MaxBlocks * BlockSize, 1U, BlockSize,
- &Recorder, DecompactPtr, SkipRegion);
+ scudo::PageReleaseContext Context(BlockSize,
+ /*RegionSize=*/MaxBlocks * BlockSize,
+ /*NumberOfRegions=*/1U);
+ Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+ scudo::RegionPageMap &PageMap = Context.PageMap;
// Verify that there are no released pages touched by used chunks and all
// ranges of free chunks big enough to contain the entire memory pages had
@@ -223,6 +228,8 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
const bool PageReleased = Recorder.ReportedPages.find(J * PageSize) !=
Recorder.ReportedPages.end();
EXPECT_EQ(false, PageReleased);
+ EXPECT_EQ(false,
+ PageMap.isAllCounted(0, (J * PageSize) >> PageSizeLog));
}
if (InFreeRange) {
@@ -234,6 +241,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
const bool PageReleased =
Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
EXPECT_EQ(true, PageReleased);
+ EXPECT_EQ(true, PageMap.isAllCounted(0, P >> PageSizeLog));
VerifiedReleasedPages++;
P += PageSize;
}
@@ -251,6 +259,7 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
const bool PageReleased =
Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
EXPECT_EQ(true, PageReleased);
+ EXPECT_EQ(true, PageMap.isAllCounted(0, P >> PageSizeLog));
VerifiedReleasedPages++;
P += PageSize;
}
diff --git a/standalone/tests/size_class_map_test.cpp b/standalone/tests/size_class_map_test.cpp
index 076f36f86be..b11db1e9f64 100644
--- a/standalone/tests/size_class_map_test.cpp
+++ b/standalone/tests/size_class_map_test.cpp
@@ -33,7 +33,7 @@ struct OneClassSizeClassConfig {
static const scudo::uptr MinSizeLog = 5;
static const scudo::uptr MidSizeLog = 5;
static const scudo::uptr MaxSizeLog = 5;
- static const scudo::u32 MaxNumCachedHint = 0;
+ static const scudo::u16 MaxNumCachedHint = 0;
static const scudo::uptr MaxBytesCachedLog = 0;
static const scudo::uptr SizeDelta = 0;
};
@@ -48,7 +48,7 @@ struct LargeMaxSizeClassConfig {
static const scudo::uptr MinSizeLog = 4;
static const scudo::uptr MidSizeLog = 8;
static const scudo::uptr MaxSizeLog = 63;
- static const scudo::u32 MaxNumCachedHint = 128;
+ static const scudo::u16 MaxNumCachedHint = 128;
static const scudo::uptr MaxBytesCachedLog = 16;
static const scudo::uptr SizeDelta = 0;
};
diff --git a/standalone/tools/compute_size_class_config.cpp b/standalone/tools/compute_size_class_config.cpp
index 8b17be0e965..bcaa5834932 100644
--- a/standalone/tools/compute_size_class_config.cpp
+++ b/standalone/tools/compute_size_class_config.cpp
@@ -140,7 +140,7 @@ struct MySizeClassConfig {
static const uptr MinSizeLog = %zu;
static const uptr MidSizeLog = %zu;
static const uptr MaxSizeLog = %zu;
- static const u32 MaxNumCachedHint = 14;
+ static const u16 MaxNumCachedHint = 14;
static const uptr MaxBytesCachedLog = 14;
static constexpr u32 Classes[] = {)",