aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorandroid-build-team Robot <android-build-team-robot@google.com>2019-05-05 13:51:04 +0000
committerandroid-build-team Robot <android-build-team-robot@google.com>2019-05-05 13:51:04 +0000
commit8ca8f3eb923a01ec4e201512d123a8c2949372ba (patch)
tree5d0471227f485ce69c3df9e65b93e1186ad9fee8
parent4235b5c6798e04ddd11a7de7c45933f7cdd3f61a (diff)
parent8129245231d25fdcd829e00fda895d177d552cdf (diff)
downloadjemalloc_new-android10-mainline-release.tar.gz
Change-Id: Id0de53cccd8434d9142a29102df3089f3c288837
-rw-r--r--src/android_je_mallinfo.c73
1 files changed, 39 insertions, 34 deletions
diff --git a/src/android_je_mallinfo.c b/src/android_je_mallinfo.c
index 8a7ff232..53bf6644 100644
--- a/src/android_je_mallinfo.c
+++ b/src/android_je_mallinfo.c
@@ -14,6 +14,37 @@
* limitations under the License.
*/
+static size_t accumulate_large_allocs(arena_t* arena) {
+ size_t total_bytes = 0;
+
+ /* Accumulate the large allocation stats.
+ * Do not include stats.allocated_large, it is only updated by
+ * arena_stats_merge, and would include the data counted below.
+ */
+ for (unsigned j = 0; j < NSIZES - NBINS; j++) {
+ /* Read ndalloc first so that we guarantee nmalloc >= ndalloc. */
+ uint64_t ndalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].ndalloc);
+ uint64_t nmalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].nmalloc);
+ size_t allocs = (size_t)(nmalloc - ndalloc);
+ total_bytes += sz_index2size(NBINS + j) * allocs;
+ }
+ return total_bytes;
+}
+
+static size_t accumulate_small_allocs(arena_t* arena) {
+ size_t total_bytes = 0;
+ for (unsigned j = 0; j < NBINS; j++) {
+ bin_t* bin = &arena->bins[j];
+
+ /* NOTE: This includes allocations cached on every thread. */
+ malloc_mutex_lock(TSDN_NULL, &bin->lock);
+ total_bytes += bin_infos[j].reg_size * bin->stats.curregs;
+ malloc_mutex_unlock(TSDN_NULL, &bin->lock);
+ }
+ return total_bytes;
+}
+
+
/* Only use bin locks since the stats are now all atomic and can be read
* without taking the stats lock.
*/
@@ -27,27 +58,8 @@ struct mallinfo je_mallinfo() {
if (arena != NULL) {
mi.hblkhd += atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
- /* Accumulate the small bins. */
- for (unsigned j = 0; j < NBINS; j++) {
- bin_t* bin = &arena->bins[j];
-
- /* NOTE: This includes allocations cached on every thread. */
- malloc_mutex_lock(TSDN_NULL, &bin->lock);
- mi.uordblks += bin_infos[j].reg_size * bin->stats.curregs;
- malloc_mutex_unlock(TSDN_NULL, &bin->lock);
- }
-
- /* Accumulate the large allocation stats.
- * Do not include stats.allocated_large, it is only updated by
- * arena_stats_merge, and would include the data counted below.
- */
- for (unsigned j = 0; j < NSIZES - NBINS; j++) {
- /* Read ndalloc first so that we guarantee nmalloc >= ndalloc. */
- uint64_t ndalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].ndalloc);
- uint64_t nmalloc = arena_stats_read_u64(TSDN_NULL, &arena->stats, &arena->stats.lstats[j].nmalloc);
- size_t allocs = (size_t)(nmalloc - ndalloc);
- mi.uordblks += sz_index2size(NBINS + j) * allocs;
- }
+ mi.uordblks += accumulate_small_allocs(arena);
+ mi.uordblks += accumulate_large_allocs(arena);
}
}
malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
@@ -56,15 +68,15 @@ struct mallinfo je_mallinfo() {
return mi;
}
-size_t __mallinfo_narenas() {
+size_t je_mallinfo_narenas() {
return narenas_auto;
}
-size_t __mallinfo_nbins() {
+size_t je_mallinfo_nbins() {
return NBINS;
}
-struct mallinfo __mallinfo_arena_info(size_t aidx) {
+struct mallinfo je_mallinfo_arena_info(size_t aidx) {
struct mallinfo mi;
memset(&mi, 0, sizeof(mi));
@@ -73,22 +85,15 @@ struct mallinfo __mallinfo_arena_info(size_t aidx) {
arena_t* arena = atomic_load_p(&arenas[aidx], ATOMIC_ACQUIRE);
if (arena != NULL) {
mi.hblkhd = atomic_load_zu(&arena->stats.mapped, ATOMIC_ACQUIRE);
- mi.ordblks = atomic_load_zu(&arena->stats.allocated_large, ATOMIC_ACQUIRE);
-
- for (unsigned j = 0; j < NBINS; j++) {
- bin_t* bin = &arena->bins[j];
-
- malloc_mutex_lock(TSDN_NULL, &bin->lock);
- mi.fsmblks += bin_infos[j].reg_size * bin->stats.curregs;
- malloc_mutex_unlock(TSDN_NULL, &bin->lock);
- }
+ mi.ordblks = accumulate_large_allocs(arena);
+ mi.fsmblks = accumulate_small_allocs(arena);
}
}
malloc_mutex_unlock(TSDN_NULL, &arenas_lock);
return mi;
}
-struct mallinfo __mallinfo_bin_info(size_t aidx, size_t bidx) {
+struct mallinfo je_mallinfo_bin_info(size_t aidx, size_t bidx) {
struct mallinfo mi;
memset(&mi, 0, sizeof(mi));