summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJared Duke <jdduke@google.com>2024-04-23 20:40:37 +0000
committerTreehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com>2024-04-25 01:12:13 +0000
commit3b6f3b2d025c9bf2dac4de61ea92ca0df077b171 (patch)
treef1cad0e6b3d5a214b58907a4edeb49bee1c5023b
parentd3e3da35d257b325d195bdc99b6b5db5c4634c37 (diff)
downloadart-3b6f3b2d025c9bf2dac4de61ea92ca0df077b171.tar.gz
Madvise uncompressed app images
We currently only madvise compressed app images, which accelerates the immediate software decompression. However, uncompressed app images also benefit from explicit madvise calls, as they are immediately patched after mmap. This patching ends up dirtying most pages, and a preceding madvise avoids excessive faulting during the fixup. Bug: 336349058 Test: m + ART benchmark service w/ uncompressed app images Change-Id: Ica9786e2dd01df338bff44d787d1dd0b82c48809
-rw-r--r--runtime/gc/space/image_space.cc49
1 files changed, 25 insertions, 24 deletions
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 7f1bde2426..852039a206 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -995,24 +995,34 @@ class ImageSpace::Loader {
/*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
TimingLogger::ScopedTiming timing("MapImageFile", logger);
- std::string temp_error_msg;
+
+ // The runtime might not be available at this point if we're running dex2oat or oatdump, in
+ // which case we just truncate the madvise optimization limit completely.
+ Runtime* runtime = Runtime::Current();
+ const size_t madvise_size_limit = runtime ? runtime->GetMadviseWillNeedSizeArt() : 0;
+
const bool is_compressed = image_header.HasCompressedBlock();
if (!is_compressed && allow_direct_mapping) {
uint8_t* address = (image_reservation != nullptr) ? image_reservation->Begin() : nullptr;
// The reserved memory size is aligned up to kElfSegmentAlignment to ensure
// that the next reserved area will be aligned to the value.
- return MemMap::MapFileAtAddress(address,
- CondRoundUp<kPageSizeAgnostic>(image_header.GetImageSize(),
- kElfSegmentAlignment),
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- fd,
- /*start=*/ 0,
- /*low_4gb=*/ true,
- image_filename,
- /*reuse=*/ false,
- image_reservation,
- error_msg);
+ MemMap map = MemMap::MapFileAtAddress(
+ address,
+ CondRoundUp<kPageSizeAgnostic>(image_header.GetImageSize(), kElfSegmentAlignment),
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ fd,
+ /*start=*/0,
+ /*low_4gb=*/true,
+ image_filename,
+ /*reuse=*/false,
+ image_reservation,
+ error_msg);
+ if (map.IsValid()) {
+ Runtime::MadviseFileForRange(
+ madvise_size_limit, map.Size(), map.Begin(), map.End(), image_filename);
+ }
+ return map;
}
// Reserve output and copy/decompress into it.
@@ -1040,17 +1050,8 @@ class ImageSpace::Loader {
return MemMap::Invalid();
}
- Runtime* runtime = Runtime::Current();
- // The runtime might not be available at this point if we're running
- // dex2oat or oatdump.
- if (runtime != nullptr) {
- size_t madvise_size_limit = runtime->GetMadviseWillNeedSizeArt();
- Runtime::MadviseFileForRange(madvise_size_limit,
- temp_map.Size(),
- temp_map.Begin(),
- temp_map.End(),
- image_filename);
- }
+ Runtime::MadviseFileForRange(
+ madvise_size_limit, temp_map.Size(), temp_map.Begin(), temp_map.End(), image_filename);
if (is_compressed) {
memcpy(map.Begin(), &image_header, sizeof(ImageHeader));