aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorgabor@google.com <gabor@google.com@62dab493-f737-651d-591e-8d6aee1b9529>2011-06-29 00:30:50 +0000
committergabor@google.com <gabor@google.com@62dab493-f737-651d-591e-8d6aee1b9529>2011-06-29 00:30:50 +0000
commit9cb7b73e2668626608a4a8706d69d3e7000cebfc (patch)
tree695abad99252bbfe5fcf0d04f6d85b5759fc4927 /util
parent70b7ec1b3b394d7b79291edf4928d1094b6a760d (diff)
downloadsrc-9cb7b73e2668626608a4a8706d69d3e7000cebfc.tar.gz
Platform detection during build, plus compatibility patches for machines without <cstdatomic>.
This revision adds two major changes: 1. build_detect_platform which generates build_config.mk with platform-dependent flags for the build process 2. /port/atomic_pointer.h with anAtomicPointerimplementation for platforms without <cstdatomic> Some of this code is loosely based on patches submitted to the LevelDB mailing list at https://groups.google.com/forum/#!forum/leveldb Tip of the hat to Dave Smith and Edouard A, who both sent patches. The presence of Snappy (http://code.google.com/p/snappy/) and cstdatomic are now both detected in the build_detect_platform script (1.) which gets executing during make. For (2.), instead of broadly importing atomicops_* from Chromium or the Google performance tools, we chose to just implement AtomicPointer and the limited atomic load and store operations it needs. This resulted in much less code and fewer files - everything is contained in atomic_pointer.h. git-svn-id: http://leveldb.googlecode.com/svn/trunk@34 62dab493-f737-651d-591e-8d6aee1b9529
Diffstat (limited to 'util')
-rw-r--r--util/cache.cc153
1 files changed, 93 insertions, 60 deletions
diff --git a/util/cache.cc b/util/cache.cc
index 968e6a0..5829b79 100644
--- a/util/cache.cc
+++ b/util/cache.cc
@@ -2,17 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-#if defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_ANDROID)
-#include <unordered_set>
-#elif defined(LEVELDB_PLATFORM_OSX)
-#include <ext/hash_set>
-#elif defined(LEVELDB_PLATFORM_CHROMIUM)
-#include "base/hash_tables.h"
-#else
-#include <hash_set> // TODO(sanjay): Switch to unordered_set when possible.
-#endif
-
#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
#include "leveldb/cache.h"
#include "port/port.h"
@@ -33,6 +25,7 @@ namespace {
struct LRUHandle {
void* value;
void (*deleter)(const Slice&, void* value);
+ LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
size_t charge; // TODO(opt): Only allow uint32_t?
@@ -51,43 +44,93 @@ struct LRUHandle {
}
};
-// Pick a platform specific hash_set instantiation
-#if defined(LEVELDB_PLATFORM_CHROMIUM) && defined(OS_WIN)
- // Microsoft's hash_set deviates from the standard. See
- // http://msdn.microsoft.com/en-us/library/1t4xas78(v=vs.80).aspx
- // for details. Basically the 2 param () operator is a less than and
- // the 1 param () operator is a hash function.
- struct HandleHashCompare : public stdext::hash_compare<LRUHandle*> {
- size_t operator() (LRUHandle* h) const {
- Slice k = h->key();
- return Hash(k.data(), k.size(), 0);
+// We provide our own simple hash table since it removes a whole bunch
+// of porting hacks and is also faster than some of the built-in hash
+// table implementations in some of the compiler/runtime combinations
+// we have tested. E.g., readrandom speeds up by ~5% over the g++
+// 4.4.3's builtin hashtable.
+class HandleTable {
+ public:
+ HandleTable() : length_(0), elems_(0), list_(NULL) { Resize(); }
+ ~HandleTable() { delete[] list_; }
+
+ LRUHandle* Lookup(LRUHandle* h) {
+ return *FindPointer(h);
+ }
+
+ LRUHandle* Insert(LRUHandle* h) {
+ LRUHandle** ptr = FindPointer(h);
+ LRUHandle* old = *ptr;
+ h->next_hash = (old == NULL ? NULL : old->next_hash);
+ *ptr = h;
+ if (old == NULL) {
+ ++elems_;
+ if (elems_ > length_) {
+ // Since each cache entry is fairly large, we aim for a small
+ // average linked list length (<= 1).
+ Resize();
+ }
}
- bool operator() (LRUHandle* a, LRUHandle* b) const {
- return a->key().compare(b->key()) < 0;
+ return old;
+ }
+
+ LRUHandle* Remove(LRUHandle* h) {
+ LRUHandle** ptr = FindPointer(h);
+ LRUHandle* result = *ptr;
+ if (result != NULL) {
+ *ptr = result->next_hash;
+ --elems_;
}
- };
- typedef base::hash_set<LRUHandle*, HandleHashCompare> HandleTable;
-#else
- struct HandleHash {
- inline size_t operator()(LRUHandle* h) const {
- Slice k = h->key();
- return Hash(k.data(), k.size(), 0);
+ return result;
+ }
+
+ private:
+ // The table consists of an array of buckets where each bucket is
+ // a linked list of cache entries that hash into the bucket.
+ uint32_t length_;
+ uint32_t elems_;
+ LRUHandle** list_;
+
+ // Return a pointer to slot that points to a cache entry that
+ // matches *h. If there is no such cache entry, return a pointer to
+ // the trailing slot in the corresponding linked list.
+ LRUHandle** FindPointer(LRUHandle* h) {
+ Slice key = h->key();
+ uint32_t hash = Hash(key.data(), key.size(), 0);
+ LRUHandle** ptr = &list_[hash & (length_ - 1)];
+ while (*ptr != NULL && key != (*ptr)->key()) {
+ ptr = &(*ptr)->next_hash;
}
- };
+ return ptr;
+ }
- struct HandleEq {
- inline bool operator()(LRUHandle* a, LRUHandle* b) const {
- return a->key() == b->key();
+ void Resize() {
+ uint32_t new_length = 4;
+ while (new_length < elems_) {
+ new_length *= 2;
+ }
+ LRUHandle** new_list = new LRUHandle*[new_length];
+ memset(new_list, 0, sizeof(new_list[0]) * new_length);
+ uint32_t count = 0;
+ for (int i = 0; i < length_; i++) {
+ LRUHandle* h = list_[i];
+ while (h != NULL) {
+ LRUHandle* next = h->next_hash;
+ Slice key = h->key();
+ uint32_t hash = Hash(key.data(), key.size(), 0);
+ LRUHandle** ptr = &new_list[hash & (new_length - 1)];
+ h->next_hash = *ptr;
+ *ptr = h;
+ h = next;
+ count++;
+ }
}
- };
-# if defined(LEVELDB_PLATFORM_CHROMIUM)
- typedef base::hash_set<LRUHandle*, HandleHash, HandleEq> HandleTable;
-# elif defined(LEVELDB_PLATFORM_POSIX) || defined(LEVELDB_PLATFORM_ANDROID)
- typedef std::unordered_set<LRUHandle*, HandleHash, HandleEq> HandleTable;
-# else
- typedef __gnu_cxx::hash_set<LRUHandle*, HandleHash, HandleEq> HandleTable;
-# endif
-#endif
+ assert(elems_ == count);
+ delete[] list_;
+ list_ = new_list;
+ length_ = new_length;
+ }
+};
class LRUCache : public Cache {
public:
@@ -132,7 +175,6 @@ LRUCache::LRUCache(size_t capacity)
}
LRUCache::~LRUCache() {
- table_.clear();
for (LRUHandle* e = lru_.next; e != &lru_; ) {
LRUHandle* next = e->next;
assert(e->refs == 1); // Error if caller has an unreleased handle
@@ -170,16 +212,13 @@ Cache::Handle* LRUCache::Lookup(const Slice& key) {
LRUHandle dummy;
dummy.next = &dummy;
dummy.value = const_cast<Slice*>(&key);
- HandleTable::iterator iter = table_.find(&dummy);
- if (iter == table_.end()) {
- return NULL;
- } else {
- LRUHandle* e = const_cast<LRUHandle*>(*iter);
+ LRUHandle* e = table_.Lookup(&dummy);
+ if (e != NULL) {
e->refs++;
LRU_Remove(e);
LRU_Append(e);
- return reinterpret_cast<Handle*>(e);
}
+ return reinterpret_cast<Handle*>(e);
}
void* LRUCache::Value(Handle* handle) {
@@ -206,20 +245,16 @@ Cache::Handle* LRUCache::Insert(const Slice& key, void* value, size_t charge,
LRU_Append(e);
usage_ += charge;
- std::pair<HandleTable::iterator,bool> p = table_.insert(e);
- if (!p.second) {
- // Kill existing entry
- LRUHandle* old = const_cast<LRUHandle*>(*(p.first));
+ LRUHandle* old = table_.Insert(e);
+ if (old != NULL) {
LRU_Remove(old);
- table_.erase(p.first);
- table_.insert(e);
Unref(old);
}
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
LRU_Remove(old);
- table_.erase(old);
+ table_.Remove(old);
Unref(old);
}
@@ -232,11 +267,9 @@ void LRUCache::Erase(const Slice& key) {
LRUHandle dummy;
dummy.next = &dummy;
dummy.value = const_cast<Slice*>(&key);
- HandleTable::iterator iter = table_.find(&dummy);
- if (iter != table_.end()) {
- LRUHandle* e = const_cast<LRUHandle*>(*iter);
+ LRUHandle* e = table_.Remove(&dummy);
+ if (e != NULL) {
LRU_Remove(e);
- table_.erase(iter);
Unref(e);
}
}