diff options
Diffstat (limited to 'src/base/low_level_alloc.cc')
-rw-r--r-- | src/base/low_level_alloc.cc | 80 |
1 files changed, 10 insertions, 70 deletions
diff --git a/src/base/low_level_alloc.cc b/src/base/low_level_alloc.cc index 6b467cf..c043cb6 100644 --- a/src/base/low_level_alloc.cc +++ b/src/base/low_level_alloc.cc @@ -1,4 +1,3 @@ -// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*- /* Copyright (c) 2006, Google Inc. * All rights reserved. * @@ -57,9 +56,6 @@ // A first-fit allocator with amortized logarithmic free() time. -LowLevelAlloc::PagesAllocator::~PagesAllocator() { -} - // --------------------------------------------------------------------------- static const int kMaxLevel = 30; @@ -107,7 +103,7 @@ static int IntLog2(size_t size, size_t base) { // Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1. static int Random() { - static uint32 r = 1; // no locking---it's not critical + static int32 r = 1; // no locking---it's not critical ANNOTATE_BENIGN_RACE(&r, "benign race, not critical."); int result = 1; while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) { @@ -199,7 +195,6 @@ struct LowLevelAlloc::Arena { // (init under mu, then ro) size_t min_size; // smallest allocation block size // (init under mu, then ro) - PagesAllocator *allocator; }; // The default arena, which is used when 0 is passed instead of an Arena @@ -212,17 +207,6 @@ static struct LowLevelAlloc::Arena default_arena; static struct LowLevelAlloc::Arena unhooked_arena; static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena; -namespace { - - class DefaultPagesAllocator : public LowLevelAlloc::PagesAllocator { - public: - virtual ~DefaultPagesAllocator() {}; - virtual void *MapPages(int32 flags, size_t size); - virtual void UnMapPages(int32 flags, void *addr, size_t size); - }; - -} - // magic numbers to identify allocated and unallocated blocks static const intptr_t kMagicAllocated = 0x4c833e95; static const intptr_t kMagicUnallocated = ~kMagicAllocated; @@ -304,20 +288,12 @@ static void ArenaInit(LowLevelAlloc::Arena *arena) { arena->flags = 0; // other arenas' flags may be overridden by client, // but unhooked_arena will have 0 in 'flags'. } - arena->allocator = LowLevelAlloc::GetDefaultPagesAllocator(); } } // L < meta_data_arena->mu LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags, Arena *meta_data_arena) { - return NewArenaWithCustomAlloc(flags, meta_data_arena, NULL); -} - -// L < meta_data_arena->mu -LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32 flags, - Arena *meta_data_arena, - PagesAllocator *allocator) { RAW_CHECK(meta_data_arena != 0, "must pass a valid arena"); if (meta_data_arena == &default_arena) { if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { @@ -331,9 +307,6 @@ LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32 flags, new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0); ArenaInit(result); result->flags = flags; - if (allocator) { - result->allocator = allocator; - } return result; } @@ -484,7 +457,15 @@ static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { // mmap generous 64K chunks to decrease // the chances/impact of fragmentation: size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); - void *new_pages = arena->allocator->MapPages(arena->flags, new_pages_size); + void *new_pages; + if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { + new_pages = MallocHook::UnhookedMMap(0, new_pages_size, + PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + } else { + new_pages = mmap(0, new_pages_size, + PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); + } + RAW_CHECK(new_pages != MAP_FAILED, "mmap error"); arena->mu.Lock(); s = reinterpret_cast<AllocList *>(new_pages); s->header.size = new_pages_size; @@ -539,44 +520,3 @@ void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { return &default_arena; } - -static DefaultPagesAllocator *default_pages_allocator; -static union { - char chars[sizeof(DefaultPagesAllocator)]; - void *ptr; -} debug_pages_allocator_space; - -LowLevelAlloc::PagesAllocator *LowLevelAlloc::GetDefaultPagesAllocator(void) { - if (default_pages_allocator) { - return default_pages_allocator; - } - default_pages_allocator = new (debug_pages_allocator_space.chars) DefaultPagesAllocator(); - return default_pages_allocator; -} - -void *DefaultPagesAllocator::MapPages(int32 flags, size_t size) { - void *new_pages; - if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { - new_pages = MallocHook::UnhookedMMap(0, size, - PROT_WRITE|PROT_READ, - MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); - } else { - new_pages = mmap(0, size, - PROT_WRITE|PROT_READ, - MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); - } - RAW_CHECK(new_pages != MAP_FAILED, "mmap error"); - - return new_pages; -} - -void DefaultPagesAllocator::UnMapPages(int32 flags, void *region, size_t size) { - int munmap_result; - if ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { - munmap_result = munmap(region, size); - } else { - munmap_result = MallocHook::UnhookedMUnmap(region, size); - } - RAW_CHECK(munmap_result == 0, - "LowLevelAlloc::DeleteArena: munmap failed address"); -} |