summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-12-04 13:28:06 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-12-04 13:28:06 +0000
commita67fdb6d0b175d03d14d676001554966f46aa8a8 (patch)
tree867d105bc89f8822eb12dfa12b327b71edf7645a
parent26a5f68b5b14fe86ddb920ec5e9a584e6c3f9eb2 (diff)
parent3414d01c7f3606bd700a9bff22df47d6d53c8b6a (diff)
downloadart-android14-mainline-sdkext-release.tar.gz
Snap for 11173240 from 3414d01c7f3606bd700a9bff22df47d6d53c8b6a to mainline-sdkext-releaseaml_sdk_341410000android14-mainline-sdkext-release
Change-Id: I6b3ee2cd36be287d37a16f73fcfebfa1a5cbd031
-rw-r--r--TEST_MAPPING9
-rw-r--r--artd/Android.bp4
-rw-r--r--artd/artd.cc160
-rw-r--r--artd/artd.h2
-rw-r--r--artd/artd_test.cc271
-rw-r--r--artd/binder/com/android/server/art/CopyAndRewriteProfileResult.aidl39
-rw-r--r--artd/binder/com/android/server/art/IArtd.aidl7
-rw-r--r--artd/binder/com/android/server/art/MergeProfileOptions.aidl2
-rw-r--r--artd/file_utils.cc14
-rw-r--r--artd/path_utils.cc32
-rw-r--r--artd/testing.h4
-rw-r--r--compiler/optimizing/write_barrier_elimination.cc9
-rw-r--r--dex2oat/linker/image_writer.cc12
-rw-r--r--libartbase/base/macros.h4
-rw-r--r--libartservice/service/java/com/android/server/art/ArtManagerLocal.java114
-rw-r--r--libartservice/service/java/com/android/server/art/ArtShellCommand.java68
-rw-r--r--libartservice/service/java/com/android/server/art/BackgroundDexoptJob.java65
-rw-r--r--libartservice/service/java/com/android/server/art/BackgroundDexoptJobStatsReporter.java67
-rw-r--r--libartservice/service/java/com/android/server/art/DexUseManagerLocal.java11
-rw-r--r--libartservice/service/java/com/android/server/art/Dexopter.java65
-rw-r--r--libartservice/service/java/com/android/server/art/ReasonMapping.java3
-rw-r--r--libartservice/service/java/com/android/server/art/Utils.java69
-rw-r--r--libartservice/service/java/com/android/server/art/model/ArtFlags.java67
-rw-r--r--libartservice/service/java/com/android/server/art/model/DexoptParams.java12
-rw-r--r--libartservice/service/java/com/android/server/art/model/DexoptResult.java123
-rw-r--r--libartservice/service/java/com/android/server/art/model/OperationProgress.java2
-rw-r--r--libartservice/service/javatests/com/android/server/art/ArtManagerLocalTest.java127
-rw-r--r--libartservice/service/javatests/com/android/server/art/BackgroundDexoptJobTest.java42
-rw-r--r--libartservice/service/javatests/com/android/server/art/DexUseManagerTest.java24
-rw-r--r--libartservice/service/javatests/com/android/server/art/DexoptHelperTest.java12
-rw-r--r--libartservice/service/javatests/com/android/server/art/PrimaryDexopterParameterizedTest.java39
-rw-r--r--libartservice/service/javatests/com/android/server/art/PrimaryDexopterTest.java141
-rw-r--r--libartservice/service/javatests/com/android/server/art/ReasonMappingTest.java23
-rw-r--r--libartservice/service/javatests/com/android/server/art/SecondaryDexopterTest.java16
-rw-r--r--libartservice/service/javatests/com/android/server/art/model/DexoptParamsTest.java34
-rw-r--r--libartservice/service/javatests/com/android/server/art/testing/TestingUtils.java22
-rw-r--r--libarttools/Android.bp1
-rw-r--r--libarttools/tools/art_exec.cc5
-rw-r--r--libarttools/tools/art_exec_test.cc29
-rw-r--r--libarttools/tools/tools.cc8
-rw-r--r--libnativeloader/library_namespaces.cpp5
-rw-r--r--libnativeloader/native_loader_test.cpp18
-rw-r--r--libnativeloader/public_libraries.cpp61
-rw-r--r--libnativeloader/public_libraries.h10
-rw-r--r--libprofile/profile/profile_compilation_info.cc78
-rw-r--r--libprofile/profile/profile_compilation_info.h2
-rw-r--r--libprofile/profile/profile_compilation_info_test.cc48
-rw-r--r--odrefresh/odr_common.cc14
-rw-r--r--odrefresh/odr_common.h5
-rw-r--r--odrefresh/odr_common_test.cc19
-rw-r--r--odrefresh/odr_config.h45
-rw-r--r--odrefresh/odrefresh.cc357
-rw-r--r--odrefresh/odrefresh.h3
-rw-r--r--odrefresh/odrefresh_test.cc28
-rw-r--r--perfetto_hprof/perfetto_hprof.cc2
-rw-r--r--profman/include/profman/profman_result.h10
-rw-r--r--profman/profile_assistant.cc37
-rw-r--r--profman/profile_assistant.h17
-rw-r--r--profman/profile_assistant_test.cc209
-rw-r--r--profman/profman.cc13
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/base/gc_visited_arena_pool.cc244
-rw-r--r--runtime/base/gc_visited_arena_pool.h207
-rw-r--r--runtime/class_linker.cc54
-rw-r--r--runtime/class_linker.h11
-rw-r--r--runtime/class_table-inl.h36
-rw-r--r--runtime/class_table.h26
-rw-r--r--runtime/gc/accounting/bitmap.cc6
-rw-r--r--runtime/gc/accounting/bitmap.h16
-rw-r--r--runtime/gc/accounting/space_bitmap.h6
-rw-r--r--runtime/gc/collector/concurrent_copying.cc13
-rw-r--r--runtime/gc/collector/mark_compact-inl.h40
-rw-r--r--runtime/gc/collector/mark_compact.cc559
-rw-r--r--runtime/gc/collector/mark_compact.h65
-rw-r--r--runtime/gc/collector/mark_sweep-inl.h2
-rw-r--r--runtime/gc/collector_type.h2
-rw-r--r--runtime/gc/heap-inl.h3
-rw-r--r--runtime/gc/heap.cc32
-rw-r--r--runtime/gc/heap.h12
-rw-r--r--runtime/gc/reference_queue.cc11
-rw-r--r--runtime/gc/reference_queue.h6
-rw-r--r--runtime/gc/space/bump_pointer_space-walk-inl.h2
-rw-r--r--runtime/gc/space/bump_pointer_space.cc70
-rw-r--r--runtime/gc/space/bump_pointer_space.h51
-rw-r--r--runtime/gc/space/image_space.cc20
-rw-r--r--runtime/gc/space/large_object_space.cc21
-rw-r--r--runtime/gc/space/large_object_space.h4
-rw-r--r--runtime/image.cc11
-rw-r--r--runtime/intern_table.h13
-rw-r--r--runtime/javaheapprof/javaheapsampler.cc4
-rw-r--r--runtime/javaheapprof/javaheapsampler.h4
-rw-r--r--runtime/jit/jit.cc2
-rw-r--r--runtime/jni/local_reference_table.cc7
-rw-r--r--runtime/linear_alloc-inl.h12
-rw-r--r--runtime/linear_alloc.h7
-rw-r--r--runtime/metrics/statsd.cc3
-rw-r--r--runtime/mirror/object-readbarrier-inl.h7
-rw-r--r--runtime/mirror/object-refvisitor-inl.h263
-rw-r--r--runtime/mirror/object.h5
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc10
-rw-r--r--runtime/oat_file_assistant.cc46
-rw-r--r--runtime/runtime.cc92
-rw-r--r--runtime/runtime_image.cc6
-rw-r--r--runtime/thread_list.cc8
-rw-r--r--runtime/thread_list.h6
-rw-r--r--test/2247-checker-write-barrier-elimination/Android.bp2
-rw-r--r--test/849-records/build.py22
-rw-r--r--test/849-records/expected-stderr.txt0
-rw-r--r--test/849-records/expected-stdout.txt0
-rw-r--r--test/849-records/info.txt1
-rw-r--r--test/849-records/src/Main.java28
-rw-r--r--test/knownfailures.json5
-rw-r--r--test/odsign/test-src/com/android/tests/odsign/OdrefreshFactoryHostTestBase.java8
-rw-r--r--test/odsign/test-src/com/android/tests/odsign/OdrefreshHostTest.java12
-rwxr-xr-xtest/run_test_build.py7
-rwxr-xr-xtest/utils/regen-test-files4
-rwxr-xr-xtools/buildbot-build.sh2
117 files changed, 3464 insertions, 1392 deletions
diff --git a/TEST_MAPPING b/TEST_MAPPING
index d9d5431e3e..084f3d7386 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -455,9 +455,6 @@
"name": "art-run-test-2244-checker-remove-try-boundary[com.google.android.art.apex]"
},
{
- "name": "art-run-test-2247-checker-write-barrier-elimination[com.google.android.art.apex]"
- },
- {
"name": "art-run-test-2249-checker-return-try-boundary-exit-in-loop[com.google.android.art.apex]"
},
{
@@ -1880,9 +1877,6 @@
"name": "art-run-test-2244-checker-remove-try-boundary"
},
{
- "name": "art-run-test-2247-checker-write-barrier-elimination"
- },
- {
"name": "art-run-test-2249-checker-return-try-boundary-exit-in-loop"
},
{
@@ -3289,9 +3283,6 @@
"name": "art-run-test-2244-checker-remove-try-boundary"
},
{
- "name": "art-run-test-2247-checker-write-barrier-elimination"
- },
- {
"name": "art-run-test-2249-checker-return-try-boundary-exit-in-loop"
},
{
diff --git a/artd/Android.bp b/artd/Android.bp
index cacbe289ce..1c964f489c 100644
--- a/artd/Android.bp
+++ b/artd/Android.bp
@@ -65,8 +65,12 @@ art_cc_binary {
art_cc_defaults {
name: "art_artd_tests_defaults",
defaults: ["artd_defaults"],
+ shared_libs: [
+ "libz", // libziparchive dependency; must be repeated here since it's a static lib.
+ ],
static_libs: [
"libgmock",
+ "libziparchive",
],
srcs: [
"artd_test.cc",
diff --git a/artd/artd.cc b/artd/artd.cc
index 59ec07fbd3..04a01a9ca4 100644
--- a/artd/artd.cc
+++ b/artd/artd.cc
@@ -56,14 +56,16 @@
#include "android/binder_manager.h"
#include "android/binder_process.h"
#include "base/compiler_filter.h"
+#include "base/file_magic.h"
#include "base/file_utils.h"
#include "base/globals.h"
#include "base/logging.h"
+#include "base/macros.h"
#include "base/os.h"
+#include "base/zip_archive.h"
#include "cmdline_types.h"
#include "exec_utils.h"
#include "file_utils.h"
-#include "fmt/format.h"
#include "fstab/fstab.h"
#include "oat_file_assistant.h"
#include "oat_file_assistant_context.h"
@@ -80,6 +82,7 @@ namespace {
using ::aidl::com::android::server::art::ArtdDexoptResult;
using ::aidl::com::android::server::art::ArtifactsPath;
+using ::aidl::com::android::server::art::CopyAndRewriteProfileResult;
using ::aidl::com::android::server::art::DexMetadataPath;
using ::aidl::com::android::server::art::DexoptOptions;
using ::aidl::com::android::server::art::DexoptTrigger;
@@ -107,8 +110,6 @@ using ::android::fs_mgr::FstabEntry;
using ::art::tools::CmdlineBuilder;
using ::ndk::ScopedAStatus;
-using ::fmt::literals::operator""_format; // NOLINT
-
using ArtifactsLocation = GetDexoptNeededResult::ArtifactsLocation;
using TmpProfilePath = ProfilePath::TmpProfilePath;
@@ -130,7 +131,7 @@ std::optional<int64_t> GetSize(std::string_view path) {
if (ec) {
// It is okay if the file does not exist. We don't have to log it.
if (ec.value() != ENOENT) {
- LOG(ERROR) << "Failed to get the file size of '{}': {}"_format(path, ec.message());
+ LOG(ERROR) << ART_FORMAT("Failed to get the file size of '{}': {}", path, ec.message());
}
return std::nullopt;
}
@@ -147,7 +148,7 @@ int64_t GetSizeAndDeleteFile(const std::string& path) {
std::error_code ec;
if (!std::filesystem::remove(path, ec)) {
- LOG(ERROR) << "Failed to remove '{}': {}"_format(path, ec.message());
+ LOG(ERROR) << ART_FORMAT("Failed to remove '{}': {}", path, ec.message());
return 0;
}
@@ -347,6 +348,52 @@ Result<void> SetLogVerbosity() {
return {};
}
+CopyAndRewriteProfileResult AnalyzeCopyAndRewriteProfileFailure(
+ File* src, ProfmanResult::CopyAndUpdateResult result) {
+ DCHECK(result == ProfmanResult::kCopyAndUpdateNoMatch ||
+ result == ProfmanResult::kCopyAndUpdateErrorFailedToLoadProfile);
+
+ auto bad_profile = [&](std::string_view error_msg) {
+ return CopyAndRewriteProfileResult{
+ .status = CopyAndRewriteProfileResult::Status::BAD_PROFILE,
+ .errorMsg = ART_FORMAT("Failed to load profile '{}': {}", src->GetPath(), error_msg)};
+ };
+ CopyAndRewriteProfileResult no_profile{.status = CopyAndRewriteProfileResult::Status::NO_PROFILE,
+ .errorMsg = ""};
+
+ int64_t length = src->GetLength();
+ if (length < 0) {
+ return bad_profile(strerror(-length));
+ }
+ if (length == 0) {
+ return no_profile;
+ }
+
+ std::string error_msg;
+ uint32_t magic;
+ if (!ReadMagicAndReset(src->Fd(), &magic, &error_msg)) {
+ return bad_profile(error_msg);
+ }
+ if (IsZipMagic(magic)) {
+ std::unique_ptr<ZipArchive> zip_archive(
+ ZipArchive::OpenFromOwnedFd(src->Fd(), src->GetPath().c_str(), &error_msg));
+ if (zip_archive == nullptr) {
+ return bad_profile(error_msg);
+ }
+ std::unique_ptr<ZipEntry> zip_entry(zip_archive->Find("primary.prof", &error_msg));
+ if (zip_entry == nullptr || zip_entry->GetUncompressedLength() == 0) {
+ return no_profile;
+ }
+ }
+
+ if (result == ProfmanResult::kCopyAndUpdateNoMatch) {
+ return bad_profile(
+ "The profile does not match the APK (The checksums in the profile do not match the "
+ "checksums of the .dex files in the APK)");
+ }
+ return bad_profile("The profile is in the wrong format or an I/O error has occurred");
+}
+
class FdLogger {
public:
void Add(const NewFile& file) { fd_mapping_.emplace_back(file.Fd(), file.TempPath()); }
@@ -464,7 +511,7 @@ ndk::ScopedAStatus Artd::isProfileUsable(const ProfilePath& in_profile,
return ScopedAStatus::ok();
}
return NonFatal(
- "Failed to open profile '{}': {}"_format(profile_path, profile.error().message()));
+ ART_FORMAT("Failed to open profile '{}': {}", profile_path, profile.error().message()));
}
args.Add("--reference-profile-file-fd=%d", profile.value()->Fd());
fd_logger.Add(*profile.value());
@@ -483,11 +530,11 @@ ndk::ScopedAStatus Artd::isProfileUsable(const ProfilePath& in_profile,
return NonFatal("Failed to run profman: " + result.error().message());
}
- LOG(INFO) << "profman returned code {}"_format(result.value());
+ LOG(INFO) << ART_FORMAT("profman returned code {}", result.value());
if (result.value() != ProfmanResult::kSkipCompilationSmallDelta &&
result.value() != ProfmanResult::kSkipCompilationEmptyProfiles) {
- return NonFatal("profman returned an unexpected code: {}"_format(result.value()));
+ return NonFatal(ART_FORMAT("profman returned an unexpected code: {}", result.value()));
}
*_aidl_return = result.value() == ProfmanResult::kSkipCompilationSmallDelta;
@@ -497,7 +544,7 @@ ndk::ScopedAStatus Artd::isProfileUsable(const ProfilePath& in_profile,
ndk::ScopedAStatus Artd::copyAndRewriteProfile(const ProfilePath& in_src,
OutputProfile* in_dst,
const std::string& in_dexFile,
- bool* _aidl_return) {
+ CopyAndRewriteProfileResult* _aidl_return) {
std::string src_path = OR_RETURN_FATAL(BuildProfileOrDmPath(in_src));
std::string dst_path = OR_RETURN_FATAL(BuildFinalProfilePath(in_dst->profilePath));
OR_RETURN_FATAL(ValidateDexPath(in_dexFile));
@@ -513,10 +560,11 @@ ndk::ScopedAStatus Artd::copyAndRewriteProfile(const ProfilePath& in_src,
Result<std::unique_ptr<File>> src = OpenFileForReading(src_path);
if (!src.ok()) {
if (src.error().code() == ENOENT) {
- *_aidl_return = false;
+ _aidl_return->status = CopyAndRewriteProfileResult::Status::NO_PROFILE;
return ScopedAStatus::ok();
}
- return NonFatal("Failed to open src profile '{}': {}"_format(src_path, src.error().message()));
+ return NonFatal(
+ ART_FORMAT("Failed to open src profile '{}': {}", src_path, src.error().message()));
}
args.Add("--profile-file-fd=%d", src.value()->Fd());
fd_logger.Add(*src.value());
@@ -540,19 +588,21 @@ ndk::ScopedAStatus Artd::copyAndRewriteProfile(const ProfilePath& in_src,
return NonFatal("Failed to run profman: " + result.error().message());
}
- LOG(INFO) << "profman returned code {}"_format(result.value());
+ LOG(INFO) << ART_FORMAT("profman returned code {}", result.value());
- if (result.value() == ProfmanResult::kCopyAndUpdateNoMatch) {
- *_aidl_return = false;
+ if (result.value() == ProfmanResult::kCopyAndUpdateNoMatch ||
+ result.value() == ProfmanResult::kCopyAndUpdateErrorFailedToLoadProfile) {
+ *_aidl_return = AnalyzeCopyAndRewriteProfileFailure(
+ src->get(), static_cast<ProfmanResult::CopyAndUpdateResult>(result.value()));
return ScopedAStatus::ok();
}
if (result.value() != ProfmanResult::kCopyAndUpdateSuccess) {
- return NonFatal("profman returned an unexpected code: {}"_format(result.value()));
+ return NonFatal(ART_FORMAT("profman returned an unexpected code: {}", result.value()));
}
OR_RETURN_NON_FATAL(dst->Keep());
- *_aidl_return = true;
+ _aidl_return->status = CopyAndRewriteProfileResult::Status::SUCCESS;
in_dst->profilePath.id = dst->TempId();
in_dst->profilePath.tmpPath = dst->TempPath();
return ScopedAStatus::ok();
@@ -565,8 +615,8 @@ ndk::ScopedAStatus Artd::commitTmpProfile(const TmpProfilePath& in_profile) {
std::error_code ec;
std::filesystem::rename(tmp_profile_path, ref_profile_path, ec);
if (ec) {
- return NonFatal(
- "Failed to move '{}' to '{}': {}"_format(tmp_profile_path, ref_profile_path, ec.message()));
+ return NonFatal(ART_FORMAT(
+ "Failed to move '{}' to '{}': {}", tmp_profile_path, ref_profile_path, ec.message()));
}
return ScopedAStatus::ok();
@@ -578,7 +628,7 @@ ndk::ScopedAStatus Artd::deleteProfile(const ProfilePath& in_profile) {
std::error_code ec;
std::filesystem::remove(profile_path, ec);
if (ec) {
- LOG(ERROR) << "Failed to remove '{}': {}"_format(profile_path, ec.message());
+ LOG(ERROR) << ART_FORMAT("Failed to remove '{}': {}", profile_path, ec.message());
}
return ScopedAStatus::ok();
@@ -622,7 +672,7 @@ ndk::ScopedAStatus Artd::mergeProfiles(const std::vector<ProfilePath>& in_profil
for (const ProfilePath& profile : in_profiles) {
std::string profile_path = OR_RETURN_FATAL(BuildProfileOrDmPath(profile));
if (profile.getTag() == ProfilePath::dexMetadataPath) {
- return Fatal("Does not support DM file, got '{}'"_format(profile_path));
+ return Fatal(ART_FORMAT("Does not support DM file, got '{}'", profile_path));
}
profile_paths.push_back(std::move(profile_path));
}
@@ -651,8 +701,8 @@ ndk::ScopedAStatus Artd::mergeProfiles(const std::vector<ProfilePath>& in_profil
// Skip non-existing file.
continue;
}
- return NonFatal(
- "Failed to open profile '{}': {}"_format(profile_path, profile_file.error().message()));
+ return NonFatal(ART_FORMAT(
+ "Failed to open profile '{}': {}", profile_path, profile_file.error().message()));
}
args.Add("--profile-file-fd=%d", profile_file.value()->Fd());
fd_logger.Add(*profile_file.value());
@@ -669,15 +719,14 @@ ndk::ScopedAStatus Artd::mergeProfiles(const std::vector<ProfilePath>& in_profil
OR_RETURN_NON_FATAL(NewFile::Create(output_profile_path, in_outputProfile->fsPermission));
if (in_referenceProfile.has_value()) {
- if (in_options.forceMerge || in_options.dumpOnly || in_options.dumpClassesAndMethods) {
+ if (in_options.dumpOnly || in_options.dumpClassesAndMethods) {
return Fatal(
- "Reference profile must not be set when 'forceMerge', 'dumpOnly', or "
- "'dumpClassesAndMethods' is set");
+ "Reference profile must not be set when 'dumpOnly' or 'dumpClassesAndMethods' is set");
}
std::string reference_profile_path =
OR_RETURN_FATAL(BuildProfileOrDmPath(*in_referenceProfile));
if (in_referenceProfile->getTag() == ProfilePath::dexMetadataPath) {
- return Fatal("Does not support DM file, got '{}'"_format(reference_profile_path));
+ return Fatal(ART_FORMAT("Does not support DM file, got '{}'", reference_profile_path));
}
OR_RETURN_NON_FATAL(CopyFile(reference_profile_path, *output_profile_file));
}
@@ -705,7 +754,7 @@ ndk::ScopedAStatus Artd::mergeProfiles(const std::vector<ProfilePath>& in_profil
props_->GetOrEmpty("dalvik.vm.bgdexopt.new-classes-percent"))
.AddIfNonEmpty("--min-new-methods-percent-change=%s",
props_->GetOrEmpty("dalvik.vm.bgdexopt.new-methods-percent"))
- .AddIf(in_options.forceMerge, "--force-merge")
+ .AddIf(in_options.forceMerge, "--force-merge-and-analyze")
.AddIf(in_options.forBootImage, "--boot-image-merge");
}
@@ -719,7 +768,7 @@ ndk::ScopedAStatus Artd::mergeProfiles(const std::vector<ProfilePath>& in_profil
return NonFatal("Failed to run profman: " + result.error().message());
}
- LOG(INFO) << "profman returned code {}"_format(result.value());
+ LOG(INFO) << ART_FORMAT("profman returned code {}", result.value());
if (result.value() == ProfmanResult::kSkipCompilationSmallDelta ||
result.value() == ProfmanResult::kSkipCompilationEmptyProfiles) {
@@ -728,11 +777,10 @@ ndk::ScopedAStatus Artd::mergeProfiles(const std::vector<ProfilePath>& in_profil
}
ProfmanResult::ProcessingResult expected_result =
- (in_options.forceMerge || in_options.dumpOnly || in_options.dumpClassesAndMethods) ?
- ProfmanResult::kSuccess :
- ProfmanResult::kCompile;
+ (in_options.dumpOnly || in_options.dumpClassesAndMethods) ? ProfmanResult::kSuccess :
+ ProfmanResult::kCompile;
if (result.value() != expected_result) {
- return NonFatal("profman returned an unexpected code: {}"_format(result.value()));
+ return NonFatal(ART_FORMAT("profman returned an unexpected code: {}", result.value()));
}
OR_RETURN_NON_FATAL(output_profile_file->Keep());
@@ -814,7 +862,8 @@ ndk::ScopedAStatus Artd::dexopt(
if (in_classLoaderContext.has_value()) {
context = ClassLoaderContext::Create(in_classLoaderContext->c_str());
if (context == nullptr) {
- return Fatal("Class loader context '{}' is invalid"_format(in_classLoaderContext.value()));
+ return Fatal(
+ ART_FORMAT("Class loader context '{}' is invalid", in_classLoaderContext.value()));
}
}
@@ -844,9 +893,9 @@ ndk::ScopedAStatus Artd::dexopt(
struct stat dex_st = OR_RETURN_NON_FATAL(Fstat(*dex_file));
if ((dex_st.st_mode & S_IROTH) == 0) {
if (fs_permission.isOtherReadable) {
- return NonFatal(
- "Outputs cannot be other-readable because the dex file '{}' is not other-readable"_format(
- dex_file->GetPath()));
+ return NonFatal(ART_FORMAT(
+ "Outputs cannot be other-readable because the dex file '{}' is not other-readable",
+ dex_file->GetPath()));
}
// Negative numbers mean no `chown`. 0 means root.
// Note: this check is more strict than it needs to be. For example, it doesn't allow the
@@ -855,13 +904,13 @@ ndk::ScopedAStatus Artd::dexopt(
if ((fs_permission.uid > 0 && static_cast<uid_t>(fs_permission.uid) != dex_st.st_uid) ||
(fs_permission.gid > 0 && static_cast<gid_t>(fs_permission.gid) != dex_st.st_uid &&
static_cast<gid_t>(fs_permission.gid) != dex_st.st_gid)) {
- return NonFatal(
- "Outputs' owner doesn't match the dex file '{}' (outputs: {}:{}, dex file: {}:{})"_format(
- dex_file->GetPath(),
- fs_permission.uid,
- fs_permission.gid,
- dex_st.st_uid,
- dex_st.st_gid));
+ return NonFatal(ART_FORMAT(
+ "Outputs' owner doesn't match the dex file '{}' (outputs: {}:{}, dex file: {}:{})",
+ dex_file->GetPath(),
+ fs_permission.uid,
+ fs_permission.gid,
+ dex_st.st_uid,
+ dex_st.st_gid));
}
}
@@ -890,8 +939,9 @@ ndk::ScopedAStatus Artd::dexopt(
std::unique_ptr<NewFile> swap_file = nullptr;
if (ShouldCreateSwapFileForDexopt()) {
- swap_file = OR_RETURN_NON_FATAL(
- NewFile::Create("{}.swap"_format(oat_path), FsPermission{.uid = -1, .gid = -1}));
+ std::string swap_file_path = ART_FORMAT("{}.swap", oat_path);
+ swap_file =
+ OR_RETURN_NON_FATAL(NewFile::Create(swap_file_path, FsPermission{.uid = -1, .gid = -1}));
args.Add("--swap-fd=%d", swap_file->Fd());
fd_logger.Add(*swap_file);
}
@@ -937,9 +987,9 @@ ndk::ScopedAStatus Artd::dexopt(
fd_logger.Add(*profile_file);
struct stat profile_st = OR_RETURN_NON_FATAL(Fstat(*profile_file));
if (fs_permission.isOtherReadable && (profile_st.st_mode & S_IROTH) == 0) {
- return NonFatal(
- "Outputs cannot be other-readable because the profile '{}' is not other-readable"_format(
- profile_file->GetPath()));
+ return NonFatal(ART_FORMAT(
+ "Outputs cannot be other-readable because the profile '{}' is not other-readable",
+ profile_file->GetPath()));
}
// TODO(b/260228411): Check uid and gid.
}
@@ -1001,10 +1051,10 @@ ndk::ScopedAStatus Artd::dexopt(
return NonFatal("Failed to run dex2oat: " + result.error().message());
}
- LOG(INFO) << "dex2oat returned code {}"_format(result.value());
+ LOG(INFO) << ART_FORMAT("dex2oat returned code {}", result.value());
if (result.value() != 0) {
- return NonFatal("dex2oat returned an unexpected code: {}"_format(result.value()));
+ return NonFatal(ART_FORMAT("dex2oat returned an unexpected code: {}", result.value()));
}
int64_t size_bytes = 0;
@@ -1064,7 +1114,7 @@ ScopedAStatus Artd::cleanup(const std::vector<ProfilePath>& in_profilesToKeep,
*_aidl_return = 0;
for (const std::string& file : OR_RETURN_NON_FATAL(ListManagedFiles())) {
if (files_to_keep.find(file) == files_to_keep.end()) {
- LOG(INFO) << "Cleaning up obsolete file '{}'"_format(file);
+ LOG(INFO) << ART_FORMAT("Cleaning up obsolete file '{}'", file);
*_aidl_return += GetSizeAndDeleteFile(file);
}
}
@@ -1099,7 +1149,7 @@ ScopedAStatus Artd::isInDalvikCache(const std::string& in_dexFile, bool* _aidl_r
return ScopedAStatus::ok();
}
- return NonFatal("Fstab entries not found for '{}'"_format(in_dexFile));
+ return NonFatal(ART_FORMAT("Fstab entries not found for '{}'", in_dexFile));
}
ScopedAStatus Artd::validateDexPath(const std::string& in_dexPath,
@@ -1122,7 +1172,7 @@ ScopedAStatus Artd::validateClassLoaderContext(const std::string& in_dexPath,
std::unique_ptr<ClassLoaderContext> context = ClassLoaderContext::Create(in_classLoaderContext);
if (context == nullptr) {
- *_aidl_return = "Class loader context '{}' is invalid"_format(in_classLoaderContext);
+ *_aidl_return = ART_FORMAT("Class loader context '{}' is invalid", in_classLoaderContext);
return ScopedAStatus::ok();
}
@@ -1291,9 +1341,9 @@ void Artd::AddCompilerConfigFlags(const std::string& instruction_set,
const DexoptOptions& dexopt_options,
/*out*/ CmdlineBuilder& args) {
args.Add("--instruction-set=%s", instruction_set);
- std::string features_prop = "dalvik.vm.isa.{}.features"_format(instruction_set);
+ std::string features_prop = ART_FORMAT("dalvik.vm.isa.{}.features", instruction_set);
args.AddIfNonEmpty("--instruction-set-features=%s", props_->GetOrEmpty(features_prop));
- std::string variant_prop = "dalvik.vm.isa.{}.variant"_format(instruction_set);
+ std::string variant_prop = ART_FORMAT("dalvik.vm.isa.{}.variant", instruction_set);
args.AddIfNonEmpty("--instruction-set-variant=%s", props_->GetOrEmpty(variant_prop));
args.Add("--compiler-filter=%s", compiler_filter)
diff --git a/artd/artd.h b/artd/artd.h
index a4012c6da1..774f11a1d1 100644
--- a/artd/artd.h
+++ b/artd/artd.h
@@ -99,7 +99,7 @@ class Artd : public aidl::com::android::server::art::BnArtd {
const aidl::com::android::server::art::ProfilePath& in_src,
aidl::com::android::server::art::OutputProfile* in_dst,
const std::string& in_dexFile,
- bool* _aidl_return) override;
+ aidl::com::android::server::art::CopyAndRewriteProfileResult* _aidl_return) override;
ndk::ScopedAStatus commitTmpProfile(
const aidl::com::android::server::art::ProfilePath::TmpProfilePath& in_profile) override;
diff --git a/artd/artd_test.cc b/artd/artd_test.cc
index 8a760fcaf5..f1806e1c2d 100644
--- a/artd/artd_test.cc
+++ b/artd/artd_test.cc
@@ -25,6 +25,7 @@
#include <chrono>
#include <condition_variable>
#include <csignal>
+#include <cstdio>
#include <filesystem>
#include <functional>
#include <memory>
@@ -52,8 +53,8 @@
#include "android/binder_status.h"
#include "base/array_ref.h"
#include "base/common_art_test.h"
+#include "base/macros.h"
#include "exec_utils.h"
-#include "fmt/format.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "oat_file.h"
@@ -61,6 +62,7 @@
#include "profman/profman_result.h"
#include "testing.h"
#include "tools/system_properties.h"
+#include "ziparchive/zip_writer.h"
namespace art {
namespace artd {
@@ -69,6 +71,7 @@ namespace {
using ::aidl::com::android::server::art::ArtConstants;
using ::aidl::com::android::server::art::ArtdDexoptResult;
using ::aidl::com::android::server::art::ArtifactsPath;
+using ::aidl::com::android::server::art::CopyAndRewriteProfileResult;
using ::aidl::com::android::server::art::DexMetadataPath;
using ::aidl::com::android::server::art::DexoptOptions;
using ::aidl::com::android::server::art::FileVisibility;
@@ -117,8 +120,6 @@ using PrimaryCurProfilePath = ProfilePath::PrimaryCurProfilePath;
using PrimaryRefProfilePath = ProfilePath::PrimaryRefProfilePath;
using TmpProfilePath = ProfilePath::TmpProfilePath;
-using ::fmt::literals::operator""_format; // NOLINT
-
constexpr uid_t kRootUid = 0;
ScopeGuard<std::function<void()>> ScopedSetLogger(android::base::LogFunction&& logger) {
@@ -210,7 +211,7 @@ MATCHER_P2(ListFlag, flag, matcher, "") {
// Matches an FD of a file whose path matches `matcher`.
MATCHER_P(FdOf, matcher, "") {
- std::string proc_path = "/proc/self/fd/{}"_format(arg);
+ std::string proc_path = ART_FORMAT("/proc/self/fd/{}", arg);
char path[PATH_MAX];
ssize_t len = readlink(proc_path.c_str(), path, sizeof(path));
if (len < 0) {
@@ -374,17 +375,17 @@ class ArtdTest : public CommonArtTest {
};
clc_1_ = GetTestDexFileName("Main");
clc_2_ = GetTestDexFileName("Nested");
- class_loader_context_ = "PCL[{}:{}]"_format(clc_1_, clc_2_);
+ class_loader_context_ = ART_FORMAT("PCL[{}:{}]", clc_1_, clc_2_);
compiler_filter_ = "speed";
- TmpProfilePath tmp_profile_path{
- .finalPath =
- PrimaryRefProfilePath{.packageName = "com.android.foo", .profileName = "primary"},
- .id = "12345"};
- profile_path_ = tmp_profile_path;
+ tmp_profile_path_ =
+ TmpProfilePath{.finalPath = PrimaryRefProfilePath{.packageName = "com.android.foo",
+ .profileName = "primary"},
+ .id = "12345"};
+ profile_path_ = tmp_profile_path_;
vdex_path_ = artifacts_path_;
dm_path_ = DexMetadataPath{.dexPath = dex_file_};
std::filesystem::create_directories(
- std::filesystem::path(OR_FATAL(BuildFinalProfilePath(tmp_profile_path))).parent_path());
+ std::filesystem::path(OR_FATAL(BuildFinalProfilePath(tmp_profile_path_))).parent_path());
}
void TearDown() override {
@@ -428,12 +429,48 @@ class ArtdTest : public CommonArtTest {
}
}
+ // Runs `copyAndRewriteProfile` with `tmp_profile_path_` and `dex_file_`.
+ template <bool kExpectOk = true>
+ Result<std::pair<std::conditional_t<kExpectOk, CopyAndRewriteProfileResult, ndk::ScopedAStatus>,
+ OutputProfile>>
+ RunCopyAndRewriteProfile() {
+ OutputProfile dst{.profilePath = tmp_profile_path_,
+ .fsPermission = FsPermission{.uid = -1, .gid = -1}};
+ dst.profilePath.id = "";
+ dst.profilePath.tmpPath = "";
+
+ CopyAndRewriteProfileResult result;
+ ndk::ScopedAStatus status =
+ artd_->copyAndRewriteProfile(tmp_profile_path_, &dst, dex_file_, &result);
+ if constexpr (kExpectOk) {
+ if (!status.isOk()) {
+ return Error() << status.getMessage();
+ }
+ return std::make_pair(std::move(result), std::move(dst));
+ } else {
+ return std::make_pair(std::move(status), std::move(dst));
+ }
+ }
+
void CreateFile(const std::string& filename, const std::string& content = "") {
std::filesystem::path path(filename);
std::filesystem::create_directories(path.parent_path());
ASSERT_TRUE(WriteStringToFile(content, filename));
}
+ void CreateZipWithSingleEntry(const std::string& filename,
+ const std::string& entry_name,
+ const std::string& content = "") {
+ std::unique_ptr<File> file(OS::CreateEmptyFileWriteOnly(filename.c_str()));
+ ASSERT_NE(file, nullptr);
+ file->MarkUnchecked(); // `writer.Finish()` flushes the file and the destructor closes it.
+ ZipWriter writer(fdopen(file->Fd(), "wb"));
+ ASSERT_EQ(writer.StartEntry(entry_name, /*flags=*/0), 0);
+ ASSERT_EQ(writer.WriteBytes(content.c_str(), content.size()), 0);
+ ASSERT_EQ(writer.FinishEntry(), 0);
+ ASSERT_EQ(writer.Finish(), 0);
+ }
+
std::shared_ptr<Artd> artd_;
std::unique_ptr<ScratchDir> scratch_dir_;
std::string scratch_path_;
@@ -463,6 +500,7 @@ class ArtdTest : public CommonArtTest {
PriorityClass priority_class_ = PriorityClass::BACKGROUND;
DexoptOptions dexopt_options_;
std::optional<ProfilePath> profile_path_;
+ TmpProfilePath tmp_profile_path_;
bool dex_file_other_readable_ = true;
bool profile_other_readable_ = true;
@@ -1312,13 +1350,9 @@ TEST_F(ArtdTest, isProfileUsableFailed) {
EXPECT_THAT(status.getMessage(), HasSubstr("profman returned an unexpected code: 100"));
}
-TEST_F(ArtdTest, copyAndRewriteProfile) {
- const TmpProfilePath& src = profile_path_->get<ProfilePath::tmpProfilePath>();
- std::string src_file = OR_FATAL(BuildTmpProfilePath(src));
- CreateFile(src_file, "abc");
- OutputProfile dst{.profilePath = src, .fsPermission = FsPermission{.uid = -1, .gid = -1}};
- dst.profilePath.id = "";
- dst.profilePath.tmpPath = "";
+TEST_F(ArtdTest, copyAndRewriteProfileSuccess) {
+ std::string src_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path_));
+ CreateFile(src_file, "valid_profile");
CreateFile(dex_file_);
@@ -1338,64 +1372,160 @@ TEST_F(ArtdTest, copyAndRewriteProfile) {
.WillOnce(DoAll(WithArg<0>(WriteToFdFlag("--reference-profile-file-fd=", "def")),
Return(ProfmanResult::kCopyAndUpdateSuccess)));
- bool result;
- EXPECT_TRUE(artd_->copyAndRewriteProfile(src, &dst, dex_file_, &result).isOk());
- EXPECT_TRUE(result);
+ auto [result, dst] = OR_FAIL(RunCopyAndRewriteProfile());
+
+ EXPECT_EQ(result.status, CopyAndRewriteProfileResult::Status::SUCCESS);
EXPECT_THAT(dst.profilePath.id, Not(IsEmpty()));
std::string real_path = OR_FATAL(BuildTmpProfilePath(dst.profilePath));
EXPECT_EQ(dst.profilePath.tmpPath, real_path);
CheckContent(real_path, "def");
}
-TEST_F(ArtdTest, copyAndRewriteProfileFalse) {
- const TmpProfilePath& src = profile_path_->get<ProfilePath::tmpProfilePath>();
- std::string src_file = OR_FATAL(BuildTmpProfilePath(src));
- CreateFile(src_file, "abc");
- OutputProfile dst{.profilePath = src, .fsPermission = FsPermission{.uid = -1, .gid = -1}};
- dst.profilePath.id = "";
- dst.profilePath.tmpPath = "";
+// The input is a plain profile file in the wrong format.
+TEST_F(ArtdTest, copyAndRewriteProfileBadProfileWrongFormat) {
+ std::string src_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path_));
+ CreateFile(src_file, "wrong_format");
+
+ CreateFile(dex_file_);
+
+ EXPECT_CALL(*mock_exec_utils_, DoExecAndReturnCode(_, _, _))
+ .WillOnce(Return(ProfmanResult::kCopyAndUpdateErrorFailedToLoadProfile));
+
+ auto [result, dst] = OR_FAIL(RunCopyAndRewriteProfile());
+
+ EXPECT_EQ(result.status, CopyAndRewriteProfileResult::Status::BAD_PROFILE);
+ EXPECT_THAT(result.errorMsg,
+ HasSubstr("The profile is in the wrong format or an I/O error has occurred"));
+ EXPECT_THAT(dst.profilePath.id, IsEmpty());
+ EXPECT_THAT(dst.profilePath.tmpPath, IsEmpty());
+}
+
+// The input is a plain profile file that doesn't match the APK.
+TEST_F(ArtdTest, copyAndRewriteProfileBadProfileNoMatch) {
+ std::string src_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path_));
+ CreateFile(src_file, "no_match");
CreateFile(dex_file_);
EXPECT_CALL(*mock_exec_utils_, DoExecAndReturnCode(_, _, _))
.WillOnce(Return(ProfmanResult::kCopyAndUpdateNoMatch));
- bool result;
- EXPECT_TRUE(artd_->copyAndRewriteProfile(src, &dst, dex_file_, &result).isOk());
- EXPECT_FALSE(result);
+ auto [result, dst] = OR_FAIL(RunCopyAndRewriteProfile());
+
+ EXPECT_EQ(result.status, CopyAndRewriteProfileResult::Status::BAD_PROFILE);
+ EXPECT_THAT(result.errorMsg, HasSubstr("The profile does not match the APK"));
EXPECT_THAT(dst.profilePath.id, IsEmpty());
EXPECT_THAT(dst.profilePath.tmpPath, IsEmpty());
}
-TEST_F(ArtdTest, copyAndRewriteProfileNotFound) {
+// The input is a plain profile file that is empty.
+TEST_F(ArtdTest, copyAndRewriteProfileNoProfileEmpty) {
+ std::string src_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path_));
+ CreateFile(src_file, "");
+
CreateFile(dex_file_);
- const TmpProfilePath& src = profile_path_->get<ProfilePath::tmpProfilePath>();
- OutputProfile dst{.profilePath = src, .fsPermission = FsPermission{.uid = -1, .gid = -1}};
- dst.profilePath.id = "";
- dst.profilePath.tmpPath = "";
+ EXPECT_CALL(*mock_exec_utils_, DoExecAndReturnCode(_, _, _))
+ .WillOnce(Return(ProfmanResult::kCopyAndUpdateNoMatch));
+
+ auto [result, dst] = OR_FAIL(RunCopyAndRewriteProfile());
- bool result;
- EXPECT_TRUE(artd_->copyAndRewriteProfile(src, &dst, dex_file_, &result).isOk());
- EXPECT_FALSE(result);
+ EXPECT_EQ(result.status, CopyAndRewriteProfileResult::Status::NO_PROFILE);
+ EXPECT_THAT(dst.profilePath.id, IsEmpty());
+ EXPECT_THAT(dst.profilePath.tmpPath, IsEmpty());
+}
+
+// The input does not exist.
+TEST_F(ArtdTest, copyAndRewriteProfileNoProfileNoFile) {
+ CreateFile(dex_file_);
+
+ auto [result, dst] = OR_FAIL(RunCopyAndRewriteProfile());
+
+ EXPECT_EQ(result.status, CopyAndRewriteProfileResult::Status::NO_PROFILE);
+ EXPECT_THAT(dst.profilePath.id, IsEmpty());
+ EXPECT_THAT(dst.profilePath.tmpPath, IsEmpty());
+}
+
+// The input is a dm file with a profile entry in the wrong format.
+TEST_F(ArtdTest, copyAndRewriteProfileNoProfileDmWrongFormat) {
+ std::string src_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path_));
+ CreateZipWithSingleEntry(src_file, "primary.prof", "wrong_format");
+
+ CreateFile(dex_file_);
+
+ EXPECT_CALL(*mock_exec_utils_, DoExecAndReturnCode(_, _, _))
+ .WillOnce(Return(ProfmanResult::kCopyAndUpdateErrorFailedToLoadProfile));
+
+ auto [result, dst] = OR_FAIL(RunCopyAndRewriteProfile());
+
+ EXPECT_EQ(result.status, CopyAndRewriteProfileResult::Status::BAD_PROFILE);
+ EXPECT_THAT(result.errorMsg,
+ HasSubstr("The profile is in the wrong format or an I/O error has occurred"));
+ EXPECT_THAT(dst.profilePath.id, IsEmpty());
+ EXPECT_THAT(dst.profilePath.tmpPath, IsEmpty());
+}
+
+// The input is a dm file with a profile entry that doesn't match the APK.
+TEST_F(ArtdTest, copyAndRewriteProfileNoProfileDmNoMatch) {
+ std::string src_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path_));
+ CreateZipWithSingleEntry(src_file, "primary.prof", "no_match");
+
+ CreateFile(dex_file_);
+
+ EXPECT_CALL(*mock_exec_utils_, DoExecAndReturnCode(_, _, _))
+ .WillOnce(Return(ProfmanResult::kCopyAndUpdateNoMatch));
+
+ auto [result, dst] = OR_FAIL(RunCopyAndRewriteProfile());
+
+ EXPECT_EQ(result.status, CopyAndRewriteProfileResult::Status::BAD_PROFILE);
+ EXPECT_THAT(result.errorMsg, HasSubstr("The profile does not match the APK"));
+ EXPECT_THAT(dst.profilePath.id, IsEmpty());
+ EXPECT_THAT(dst.profilePath.tmpPath, IsEmpty());
+}
+
+// The input is a dm file with a profile entry that is empty.
+TEST_F(ArtdTest, copyAndRewriteProfileNoProfileDmEmpty) {
+ std::string src_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path_));
+ CreateZipWithSingleEntry(src_file, "primary.prof");
+
+ CreateFile(dex_file_);
+
+ EXPECT_CALL(*mock_exec_utils_, DoExecAndReturnCode(_, _, _))
+ .WillOnce(Return(ProfmanResult::kCopyAndUpdateNoMatch));
+
+ auto [result, dst] = OR_FAIL(RunCopyAndRewriteProfile());
+
+ EXPECT_EQ(result.status, CopyAndRewriteProfileResult::Status::NO_PROFILE);
EXPECT_THAT(dst.profilePath.id, IsEmpty());
EXPECT_THAT(dst.profilePath.tmpPath, IsEmpty());
}
-TEST_F(ArtdTest, copyAndRewriteProfileFailed) {
- const TmpProfilePath& src = profile_path_->get<ProfilePath::tmpProfilePath>();
- std::string src_file = OR_FATAL(BuildTmpProfilePath(src));
- CreateFile(src_file, "abc");
- OutputProfile dst{.profilePath = src, .fsPermission = FsPermission{.uid = -1, .gid = -1}};
- dst.profilePath.id = "";
- dst.profilePath.tmpPath = "";
+// The input is a dm file without a profile entry.
+TEST_F(ArtdTest, copyAndRewriteProfileNoProfileDmNoEntry) {
+ std::string src_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path_));
+ CreateZipWithSingleEntry(src_file, "primary.vdex");
+
+ CreateFile(dex_file_);
+
+ EXPECT_CALL(*mock_exec_utils_, DoExecAndReturnCode(_, _, _))
+ .WillOnce(Return(ProfmanResult::kCopyAndUpdateNoMatch));
+
+ auto [result, dst] = OR_FAIL(RunCopyAndRewriteProfile());
+
+ EXPECT_EQ(result.status, CopyAndRewriteProfileResult::Status::NO_PROFILE);
+ EXPECT_THAT(dst.profilePath.id, IsEmpty());
+ EXPECT_THAT(dst.profilePath.tmpPath, IsEmpty());
+}
+
+TEST_F(ArtdTest, copyAndRewriteProfileException) {
+ std::string src_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path_));
+ CreateFile(src_file, "valid_profile");
CreateFile(dex_file_);
EXPECT_CALL(*mock_exec_utils_, DoExecAndReturnCode(_, _, _)).WillOnce(Return(100));
- bool result;
- ndk::ScopedAStatus status = artd_->copyAndRewriteProfile(src, &dst, dex_file_, &result);
+ auto [status, dst] = OR_FAIL(RunCopyAndRewriteProfile</*kExpectOk=*/false>());
EXPECT_FALSE(status.isOk());
EXPECT_EQ(status.getExceptionCode(), EX_SERVICE_SPECIFIC);
@@ -1405,19 +1535,17 @@ TEST_F(ArtdTest, copyAndRewriteProfileFailed) {
}
TEST_F(ArtdTest, commitTmpProfile) {
- const TmpProfilePath& tmp_profile_path = profile_path_->get<ProfilePath::tmpProfilePath>();
- std::string tmp_profile_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path));
+ std::string tmp_profile_file = OR_FATAL(BuildTmpProfilePath(tmp_profile_path_));
CreateFile(tmp_profile_file);
- EXPECT_TRUE(artd_->commitTmpProfile(tmp_profile_path).isOk());
+ EXPECT_TRUE(artd_->commitTmpProfile(tmp_profile_path_).isOk());
EXPECT_FALSE(std::filesystem::exists(tmp_profile_file));
- EXPECT_TRUE(std::filesystem::exists(OR_FATAL(BuildFinalProfilePath(tmp_profile_path))));
+ EXPECT_TRUE(std::filesystem::exists(OR_FATAL(BuildFinalProfilePath(tmp_profile_path_))));
}
TEST_F(ArtdTest, commitTmpProfileFailed) {
- const TmpProfilePath& tmp_profile_path = profile_path_->get<ProfilePath::tmpProfilePath>();
- ndk::ScopedAStatus status = artd_->commitTmpProfile(tmp_profile_path);
+ ndk::ScopedAStatus status = artd_->commitTmpProfile(tmp_profile_path_);
EXPECT_FALSE(status.isOk());
EXPECT_EQ(status.getExceptionCode(), EX_SERVICE_SPECIFIC);
@@ -1425,7 +1553,7 @@ TEST_F(ArtdTest, commitTmpProfileFailed) {
status.getMessage(),
ContainsRegex(R"re(Failed to move .*primary\.prof\.12345\.tmp.* to .*primary\.prof)re"));
- EXPECT_FALSE(std::filesystem::exists(OR_FATAL(BuildFinalProfilePath(tmp_profile_path))));
+ EXPECT_FALSE(std::filesystem::exists(OR_FATAL(BuildFinalProfilePath(tmp_profile_path_))));
}
TEST_F(ArtdTest, deleteProfile) {
@@ -1592,7 +1720,7 @@ TEST_F(ArtdGetVisibilityTest, getDmFileVisibilityPermissionDenied) {
}
TEST_F(ArtdTest, mergeProfiles) {
- const TmpProfilePath& reference_profile_path = profile_path_->get<ProfilePath::tmpProfilePath>();
+ const TmpProfilePath& reference_profile_path = tmp_profile_path_;
std::string reference_profile_file = OR_FATAL(BuildTmpProfilePath(reference_profile_path));
CreateFile(reference_profile_file, "abc");
@@ -1628,7 +1756,7 @@ TEST_F(ArtdTest, mergeProfiles) {
Contains(Flag("--reference-profile-file-fd=", FdHasContent("abc"))),
Contains(Flag("--apk-fd=", FdOf(dex_file_1))),
Contains(Flag("--apk-fd=", FdOf(dex_file_2))),
- Not(Contains("--force-merge")),
+ Not(Contains("--force-merge-and-analyze")),
Not(Contains("--boot-image-merge")))),
HasKeepFdsFor("--profile-file-fd=", "--reference-profile-file-fd=", "--apk-fd=")),
_,
@@ -1658,7 +1786,7 @@ TEST_F(ArtdTest, mergeProfilesEmptyReferenceProfile) {
std::string profile_0_file = OR_FATAL(BuildPrimaryCurProfilePath(profile_0_path));
CreateFile(profile_0_file, "def");
- OutputProfile output_profile{.profilePath = profile_path_->get<ProfilePath::tmpProfilePath>(),
+ OutputProfile output_profile{.profilePath = tmp_profile_path_,
.fsPermission = FsPermission{.uid = -1, .gid = -1}};
output_profile.profilePath.id = "";
output_profile.profilePath.tmpPath = "";
@@ -1694,7 +1822,7 @@ TEST_F(ArtdTest, mergeProfilesEmptyReferenceProfile) {
}
TEST_F(ArtdTest, mergeProfilesProfilesDontExist) {
- const TmpProfilePath& reference_profile_path = profile_path_->get<ProfilePath::tmpProfilePath>();
+ const TmpProfilePath& reference_profile_path = tmp_profile_path_;
std::string reference_profile_file = OR_FATAL(BuildTmpProfilePath(reference_profile_path));
CreateFile(reference_profile_file, "abc");
@@ -1737,20 +1865,21 @@ TEST_F(ArtdTest, mergeProfilesWithOptionsForceMerge) {
std::string profile_0_file = OR_FATAL(BuildPrimaryCurProfilePath(profile_0_path));
CreateFile(profile_0_file, "def");
- OutputProfile output_profile{.profilePath = profile_path_->get<ProfilePath::tmpProfilePath>(),
+ OutputProfile output_profile{.profilePath = tmp_profile_path_,
.fsPermission = FsPermission{.uid = -1, .gid = -1}};
output_profile.profilePath.id = "";
output_profile.profilePath.tmpPath = "";
CreateFile(dex_file_);
- EXPECT_CALL(
- *mock_exec_utils_,
- DoExecAndReturnCode(
- WhenSplitBy("--", _, AllOf(Contains("--force-merge"), Contains("--boot-image-merge"))),
- _,
- _))
- .WillOnce(Return(ProfmanResult::kSuccess));
+ EXPECT_CALL(*mock_exec_utils_,
+ DoExecAndReturnCode(WhenSplitBy("--",
+ _,
+ AllOf(Contains("--force-merge-and-analyze"),
+ Contains("--boot-image-merge"))),
+ _,
+ _))
+ .WillOnce(Return(ProfmanResult::kCompile));
bool result;
EXPECT_TRUE(artd_
@@ -1772,7 +1901,7 @@ TEST_F(ArtdTest, mergeProfilesWithOptionsDumpOnly) {
std::string profile_0_file = OR_FATAL(BuildPrimaryCurProfilePath(profile_0_path));
CreateFile(profile_0_file, "def");
- OutputProfile output_profile{.profilePath = profile_path_->get<ProfilePath::tmpProfilePath>(),
+ OutputProfile output_profile{.profilePath = tmp_profile_path_,
.fsPermission = FsPermission{.uid = -1, .gid = -1}};
output_profile.profilePath.id = "";
output_profile.profilePath.tmpPath = "";
@@ -1811,7 +1940,7 @@ TEST_F(ArtdTest, mergeProfilesWithOptionsDumpClassesAndMethods) {
std::string profile_0_file = OR_FATAL(BuildPrimaryCurProfilePath(profile_0_path));
CreateFile(profile_0_file, "def");
- OutputProfile output_profile{.profilePath = profile_path_->get<ProfilePath::tmpProfilePath>(),
+ OutputProfile output_profile{.profilePath = tmp_profile_path_,
.fsPermission = FsPermission{.uid = -1, .gid = -1}};
output_profile.profilePath.id = "";
output_profile.profilePath.tmpPath = "";
@@ -1957,11 +2086,11 @@ TEST_F(ArtdTest, cleanup) {
.isOk());
for (const std::string& path : gc_removed_files) {
- EXPECT_FALSE(std::filesystem::exists(path)) << "'{}' should be removed"_format(path);
+ EXPECT_FALSE(std::filesystem::exists(path)) << ART_FORMAT("'{}' should be removed", path);
}
for (const std::string& path : gc_kept_files) {
- EXPECT_TRUE(std::filesystem::exists(path)) << "'{}' should be kept"_format(path);
+ EXPECT_TRUE(std::filesystem::exists(path)) << ART_FORMAT("'{}' should be kept", path);
}
}
diff --git a/artd/binder/com/android/server/art/CopyAndRewriteProfileResult.aidl b/artd/binder/com/android/server/art/CopyAndRewriteProfileResult.aidl
new file mode 100644
index 0000000000..37b7a9fe0d
--- /dev/null
+++ b/artd/binder/com/android/server/art/CopyAndRewriteProfileResult.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.server.art;
+
+/**
+ * The result of {@code IArtd.copyAndRewriteProfileResult}.
+ *
+ * @hide
+ */
+parcelable CopyAndRewriteProfileResult {
+ /** The status code. */
+ Status status;
+ /** The error message, if `status` is `BAD_PROFILE`. */
+ @utf8InCpp String errorMsg;
+
+ @Backing(type="int")
+ enum Status {
+ /** The operation succeeded. */
+ SUCCESS = 0,
+ /** The input does not exist or is empty. This is not considered as an error. */
+ NO_PROFILE = 1,
+ /** The input is a bad profile. */
+ BAD_PROFILE = 2,
+ }
+}
diff --git a/artd/binder/com/android/server/art/IArtd.aidl b/artd/binder/com/android/server/art/IArtd.aidl
index ec57bd451b..3b55297ab2 100644
--- a/artd/binder/com/android/server/art/IArtd.aidl
+++ b/artd/binder/com/android/server/art/IArtd.aidl
@@ -46,13 +46,14 @@ interface IArtd {
@utf8InCpp String dexFile);
/**
- * Copies the profile and rewrites it for the given dex file. Returns true and fills
+ * Copies the profile and rewrites it for the given dex file. Returns `SUCCESS` and fills
* `dst.profilePath.id` if the operation succeeds and `src` exists and contains entries that
* match the given dex file.
*
- * Throws fatal and non-fatal errors.
+ * Throws fatal and non-fatal errors, except if the input is a bad profile.
*/
- boolean copyAndRewriteProfile(in com.android.server.art.ProfilePath src,
+ com.android.server.art.CopyAndRewriteProfileResult copyAndRewriteProfile(
+ in com.android.server.art.ProfilePath src,
inout com.android.server.art.OutputProfile dst, @utf8InCpp String dexFile);
/**
diff --git a/artd/binder/com/android/server/art/MergeProfileOptions.aidl b/artd/binder/com/android/server/art/MergeProfileOptions.aidl
index 2d007f9406..5d3591ccf2 100644
--- a/artd/binder/com/android/server/art/MergeProfileOptions.aidl
+++ b/artd/binder/com/android/server/art/MergeProfileOptions.aidl
@@ -28,7 +28,7 @@ package com.android.server.art;
* @hide
*/
parcelable MergeProfileOptions {
- /** --force-merge */
+ /** --force-merge-and-analyze */
boolean forceMerge;
/** --boot-image-merge */
boolean forBootImage;
diff --git a/artd/file_utils.cc b/artd/file_utils.cc
index fb55dec5d5..f3558534ba 100644
--- a/artd/file_utils.cc
+++ b/artd/file_utils.cc
@@ -33,9 +33,9 @@
#include "android-base/logging.h"
#include "android-base/result.h"
#include "android-base/scopeguard.h"
+#include "base/macros.h"
#include "base/os.h"
#include "base/unix_file/fd_file.h"
-#include "fmt/format.h"
namespace art {
namespace artd {
@@ -46,13 +46,11 @@ using ::aidl::com::android::server::art::FsPermission;
using ::android::base::make_scope_guard;
using ::android::base::Result;
-using ::fmt::literals::operator""_format; // NOLINT
-
void UnlinkIfExists(const std::string& path) {
std::error_code ec;
std::filesystem::remove(path, ec);
if (ec) {
- LOG(WARNING) << "Failed to remove file '{}': {}"_format(path, ec.message());
+ LOG(WARNING) << ART_FORMAT("Failed to remove file '{}': {}", path, ec.message());
}
}
@@ -143,8 +141,10 @@ Result<void> NewFile::CommitAllOrAbandon(const std::vector<NewFile*>& files_to_c
if (ec) {
// This should never happen. We were able to move the file from `original_path` to
// `temp_path`. We should be able to move it back.
- LOG(WARNING) << "Failed to move old file '{}' back from temporary path '{}': {}"_format(
- original_path, temp_path, ec.message());
+ LOG(WARNING) << ART_FORMAT("Failed to move old file '{}' back from temporary path '{}': {}",
+ original_path,
+ temp_path,
+ ec.message());
}
}
});
@@ -206,7 +206,7 @@ Result<void> NewFile::CommitAllOrAbandon(const std::vector<NewFile*>& files_to_c
}
std::string NewFile::BuildTempPath(std::string_view final_path, const std::string& id) {
- return "{}.{}.tmp"_format(final_path, id);
+ return ART_FORMAT("{}.{}.tmp", final_path, id);
}
Result<std::unique_ptr<File>> OpenFileForReading(const std::string& path) {
diff --git a/artd/path_utils.cc b/artd/path_utils.cc
index a0d38c4d4a..6ff9b95bc1 100644
--- a/artd/path_utils.cc
+++ b/artd/path_utils.cc
@@ -26,8 +26,8 @@
#include "android-base/strings.h"
#include "arch/instruction_set.h"
#include "base/file_utils.h"
+#include "base/macros.h"
#include "file_utils.h"
-#include "fmt/format.h"
#include "fstab/fstab.h"
#include "oat_file_assistant.h"
#include "tools/tools.h"
@@ -48,8 +48,6 @@ using ::android::fs_mgr::Fstab;
using ::android::fs_mgr::FstabEntry;
using ::android::fs_mgr::ReadFstabFromProcMounts;
-using ::fmt::literals::operator""_format; // NOLINT
-
using PrebuiltProfilePath = ProfilePath::PrebuiltProfilePath;
using PrimaryCurProfilePath = ProfilePath::PrimaryCurProfilePath;
using PrimaryRefProfilePath = ProfilePath::PrimaryRefProfilePath;
@@ -163,7 +161,7 @@ Result<void> ValidateDexPath(const std::string& dex_path) {
}
Result<std::string> BuildArtBinPath(const std::string& binary_name) {
- return "{}/bin/{}"_format(OR_RETURN(GetArtRootOrError()), binary_name);
+ return ART_FORMAT("{}/bin/{}", OR_RETURN(GetArtRootOrError()), binary_name);
}
Result<std::string> BuildOatPath(const ArtifactsPath& artifacts_path) {
@@ -196,9 +194,10 @@ Result<std::string> BuildPrimaryRefProfilePath(
const PrimaryRefProfilePath& primary_ref_profile_path) {
OR_RETURN(ValidatePathElement(primary_ref_profile_path.packageName, "packageName"));
OR_RETURN(ValidatePathElementSubstring(primary_ref_profile_path.profileName, "profileName"));
- return "{}/misc/profiles/ref/{}/{}.prof"_format(OR_RETURN(GetAndroidDataOrError()),
- primary_ref_profile_path.packageName,
- primary_ref_profile_path.profileName);
+ return ART_FORMAT("{}/misc/profiles/ref/{}/{}.prof",
+ OR_RETURN(GetAndroidDataOrError()),
+ primary_ref_profile_path.packageName,
+ primary_ref_profile_path.profileName);
}
Result<std::string> BuildPrebuiltProfilePath(const PrebuiltProfilePath& prebuilt_profile_path) {
@@ -210,24 +209,27 @@ Result<std::string> BuildPrimaryCurProfilePath(
const PrimaryCurProfilePath& primary_cur_profile_path) {
OR_RETURN(ValidatePathElement(primary_cur_profile_path.packageName, "packageName"));
OR_RETURN(ValidatePathElementSubstring(primary_cur_profile_path.profileName, "profileName"));
- return "{}/misc/profiles/cur/{}/{}/{}.prof"_format(OR_RETURN(GetAndroidDataOrError()),
- primary_cur_profile_path.userId,
- primary_cur_profile_path.packageName,
- primary_cur_profile_path.profileName);
+ return ART_FORMAT("{}/misc/profiles/cur/{}/{}/{}.prof",
+ OR_RETURN(GetAndroidDataOrError()),
+ primary_cur_profile_path.userId,
+ primary_cur_profile_path.packageName,
+ primary_cur_profile_path.profileName);
}
Result<std::string> BuildSecondaryRefProfilePath(
const SecondaryRefProfilePath& secondary_ref_profile_path) {
OR_RETURN(ValidateDexPath(secondary_ref_profile_path.dexPath));
std::filesystem::path dex_path(secondary_ref_profile_path.dexPath);
- return "{}/oat/{}.prof"_format(dex_path.parent_path().string(), dex_path.filename().string());
+ return ART_FORMAT(
+ "{}/oat/{}.prof", dex_path.parent_path().string(), dex_path.filename().string());
}
Result<std::string> BuildSecondaryCurProfilePath(
const SecondaryCurProfilePath& secondary_cur_profile_path) {
OR_RETURN(ValidateDexPath(secondary_cur_profile_path.dexPath));
std::filesystem::path dex_path(secondary_cur_profile_path.dexPath);
- return "{}/oat/{}.cur.prof"_format(dex_path.parent_path().string(), dex_path.filename().string());
+ return ART_FORMAT(
+ "{}/oat/{}.cur.prof", dex_path.parent_path().string(), dex_path.filename().string());
}
Result<std::string> BuildFinalProfilePath(const TmpProfilePath& tmp_profile_path) {
@@ -240,7 +242,7 @@ Result<std::string> BuildFinalProfilePath(const TmpProfilePath& tmp_profile_path
// No default. All cases should be explicitly handled, or the compilation will fail.
}
// This should never happen. Just in case we get a non-enumerator value.
- LOG(FATAL) << "Unexpected writable profile path type {}"_format(final_path.getTag());
+ LOG(FATAL) << ART_FORMAT("Unexpected writable profile path type {}", final_path.getTag());
}
Result<std::string> BuildTmpProfilePath(const TmpProfilePath& tmp_profile_path) {
@@ -273,7 +275,7 @@ Result<std::string> BuildProfileOrDmPath(const ProfilePath& profile_path) {
// No default. All cases should be explicitly handled, or the compilation will fail.
}
// This should never happen. Just in case we get a non-enumerator value.
- LOG(FATAL) << "Unexpected profile path type {}"_format(profile_path.getTag());
+ LOG(FATAL) << ART_FORMAT("Unexpected profile path type {}", profile_path.getTag());
}
Result<std::string> BuildVdexPath(const VdexPath& vdex_path) {
diff --git a/artd/testing.h b/artd/testing.h
index df01a9a814..8bdbe8916e 100644
--- a/artd/testing.h
+++ b/artd/testing.h
@@ -21,7 +21,7 @@
// mismatch. This is only to be used in a gMock matcher.
#define OR_MISMATCH(expr) \
({ \
- decltype(expr)&& tmp__ = (expr); \
+ auto&& tmp__ = (expr); \
if (!tmp__.ok()) { \
*result_listener << tmp__.error().message(); \
return false; \
@@ -32,7 +32,7 @@
// Returns the value of the given `android::base::Result`, or fails the GoogleTest.
#define OR_FAIL(expr) \
({ \
- decltype(expr)&& tmp__ = (expr); \
+ auto&& tmp__ = (expr); \
ASSERT_TRUE(tmp__.ok()) << tmp__.error().message(); \
std::move(tmp__).value(); \
})
diff --git a/compiler/optimizing/write_barrier_elimination.cc b/compiler/optimizing/write_barrier_elimination.cc
index eb70b670fe..6182125b74 100644
--- a/compiler/optimizing/write_barrier_elimination.cc
+++ b/compiler/optimizing/write_barrier_elimination.cc
@@ -21,6 +21,9 @@
#include "base/scoped_arena_containers.h"
#include "optimizing/nodes.h"
+// TODO(b/310755375, solanes): Disable WBE while we investigate crashes.
+constexpr bool kWBEEnabled = false;
+
namespace art HIDDEN {
class WBEVisitor final : public HGraphVisitor {
@@ -153,8 +156,10 @@ class WBEVisitor final : public HGraphVisitor {
};
bool WriteBarrierElimination::Run() {
- WBEVisitor wbe_visitor(graph_, stats_);
- wbe_visitor.VisitReversePostOrder();
+ if (kWBEEnabled) {
+ WBEVisitor wbe_visitor(graph_, stats_);
+ wbe_visitor.VisitReversePostOrder();
+ }
return true;
}
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 63ede1b42f..ad10acc0ca 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -3367,9 +3367,15 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig,
nullptr, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
}
- if (!orig->IsRuntimeMethod() &&
- (compiler_options_.IsBootImage() || compiler_options_.IsBootImageExtension())) {
- orig->SetMemorySharedMethod();
+ if (!orig->IsRuntimeMethod()) {
+ // If we're compiling a boot image and we have a profile, set methods as
+ // being shared memory (to avoid dirtying them with hotness counter). We
+ // expect important methods to be AOT, and non-important methods to be run
+ // in the interpreter.
+ if (CompilerFilter::DependsOnProfile(compiler_options_.GetCompilerFilter()) &&
+ (compiler_options_.IsBootImage() || compiler_options_.IsBootImageExtension())) {
+ orig->SetMemorySharedMethod();
+ }
}
memcpy(copy, orig, ArtMethod::Size(target_ptr_size_));
diff --git a/libartbase/base/macros.h b/libartbase/base/macros.h
index 13e87d770d..5f2100f9e8 100644
--- a/libartbase/base/macros.h
+++ b/libartbase/base/macros.h
@@ -20,6 +20,7 @@
#include <stddef.h> // for size_t
#include <unistd.h> // for TEMP_FAILURE_RETRY
+#include "android-base/format.h"
#include "android-base/macros.h"
#include "android-base/thread_annotations.h"
@@ -32,6 +33,9 @@ friend class test_set_name##_##individual_test##_Test
#define ART_FRIEND_TYPED_TEST(test_set_name, individual_test)\
template<typename T> ART_FRIEND_TEST(test_set_name, individual_test)
+// Shorthand for formatting with compile time checking of the format string
+#define ART_FORMAT(str, ...) ::fmt::format(FMT_STRING(str), __VA_ARGS__)
+
// A macro to disallow new and delete operators for a class. It goes in the private: declarations.
// NOTE: Providing placement new (and matching delete) for constructing container elements.
#define DISALLOW_ALLOCATION() \
diff --git a/libartservice/service/java/com/android/server/art/ArtManagerLocal.java b/libartservice/service/java/com/android/server/art/ArtManagerLocal.java
index 378bfa46be..f8d6b5643e 100644
--- a/libartservice/service/java/com/android/server/art/ArtManagerLocal.java
+++ b/libartservice/service/java/com/android/server/art/ArtManagerLocal.java
@@ -23,6 +23,7 @@ import static com.android.server.art.PrimaryDexUtils.PrimaryDexInfo;
import static com.android.server.art.ReasonMapping.BatchDexoptReason;
import static com.android.server.art.ReasonMapping.BootReason;
import static com.android.server.art.Utils.Abi;
+import static com.android.server.art.Utils.InitProfileResult;
import static com.android.server.art.model.ArtFlags.GetStatusFlags;
import static com.android.server.art.model.ArtFlags.ScheduleStatus;
import static com.android.server.art.model.Config.Callback;
@@ -83,8 +84,10 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
+import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
@@ -455,7 +458,9 @@ public final class ArtManagerLocal {
* @param reason determines the default list of packages and options
* @param cancellationSignal provides the ability to cancel this operation
* @param processCallbackExecutor the executor to call {@code progressCallback}
- * @param progressCallback called repeatedly whenever there is an update on the progress
+ * @param progressCallbacks a mapping from an integer, in {@link ArtFlags.BatchDexoptPass}, to
+ * the callback that is called repeatedly whenever there is an update on the progress
+ * @return a mapping from an integer, in {@link ArtFlags.BatchDexoptPass}, to the dexopt result.
* @throws IllegalStateException if the operation encounters an error that should never happen
* (e.g., an internal logic error), or the callback set by {@link
* #setBatchDexoptStartCallback(Executor, BatchDexoptStartCallback)} provides invalid
@@ -465,11 +470,12 @@ public final class ArtManagerLocal {
*/
@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
@NonNull
- public DexoptResult dexoptPackages(@NonNull PackageManagerLocal.FilteredSnapshot snapshot,
+ public Map<Integer, DexoptResult> dexoptPackages(
+ @NonNull PackageManagerLocal.FilteredSnapshot snapshot,
@NonNull @BatchDexoptReason String reason,
@NonNull CancellationSignal cancellationSignal,
@Nullable @CallbackExecutor Executor progressCallbackExecutor,
- @Nullable Consumer<OperationProgress> progressCallback) {
+ @Nullable Map<Integer, Consumer<OperationProgress>> progressCallbacks) {
List<String> defaultPackages =
Collections.unmodifiableList(getDefaultPackages(snapshot, reason));
DexoptParams defaultDexoptParams = new DexoptParams.Builder(reason).build();
@@ -487,16 +493,37 @@ public final class ArtManagerLocal {
ExecutorService dexoptExecutor =
Executors.newFixedThreadPool(ReasonMapping.getConcurrencyForReason(reason));
+ Map<Integer, DexoptResult> dexoptResults = new HashMap<>();
try {
if (reason.equals(ReasonMapping.REASON_BG_DEXOPT)) {
- maybeDowngradePackages(snapshot,
+ DexoptResult downgradeResult = maybeDowngradePackages(snapshot,
new HashSet<>(params.getPackages()) /* excludedPackages */,
- cancellationSignal, dexoptExecutor);
+ cancellationSignal, dexoptExecutor, progressCallbackExecutor,
+ progressCallbacks != null ? progressCallbacks.get(ArtFlags.PASS_DOWNGRADE)
+ : null);
+ if (downgradeResult != null) {
+ dexoptResults.put(ArtFlags.PASS_DOWNGRADE, downgradeResult);
+ }
+ }
+ Log.i(TAG,
+ "Dexopting " + params.getPackages().size() + " packages with reason=" + reason);
+ DexoptResult mainResult = mInjector.getDexoptHelper().dexopt(snapshot,
+ params.getPackages(), params.getDexoptParams(), cancellationSignal,
+ dexoptExecutor, progressCallbackExecutor,
+ progressCallbacks != null ? progressCallbacks.get(ArtFlags.PASS_MAIN) : null);
+ dexoptResults.put(ArtFlags.PASS_MAIN, mainResult);
+ if (reason.equals(ReasonMapping.REASON_BG_DEXOPT)) {
+ DexoptResult supplementaryResult = maybeDexoptPackagesSupplementaryPass(snapshot,
+ mainResult, params.getDexoptParams(), cancellationSignal, dexoptExecutor,
+ progressCallbackExecutor,
+ progressCallbacks != null
+ ? progressCallbacks.get(ArtFlags.PASS_SUPPLEMENTARY)
+ : null);
+ if (supplementaryResult != null) {
+ dexoptResults.put(ArtFlags.PASS_SUPPLEMENTARY, supplementaryResult);
+ }
}
- Log.i(TAG, "Dexopting packages");
- return mInjector.getDexoptHelper().dexopt(snapshot, params.getPackages(),
- params.getDexoptParams(), cancellationSignal, dexoptExecutor,
- progressCallbackExecutor, progressCallback);
+ return dexoptResults;
} finally {
dexoptExecutor.shutdown();
}
@@ -738,12 +765,18 @@ public final class ArtManagerLocal {
List<ProfilePath> profiles = new ArrayList<>();
- Pair<ProfilePath, Boolean> pair = Utils.getOrInitReferenceProfile(mInjector.getArtd(),
+ InitProfileResult result = Utils.getOrInitReferenceProfile(mInjector.getArtd(),
dexInfo.dexPath(), PrimaryDexUtils.buildRefProfilePath(pkgState, dexInfo),
PrimaryDexUtils.getExternalProfiles(dexInfo),
PrimaryDexUtils.buildOutputProfile(pkgState, dexInfo, Process.SYSTEM_UID,
Process.SYSTEM_UID, false /* isPublic */));
- ProfilePath refProfile = pair != null ? pair.first : null;
+ if (!result.externalProfileErrors().isEmpty()) {
+ Log.e(TAG,
+ "Error occurred when initializing from external profiles: "
+ + result.externalProfileErrors());
+ }
+
+ ProfilePath refProfile = result.profile();
if (refProfile != null) {
profiles.add(refProfile);
@@ -850,7 +883,7 @@ public final class ArtManagerLocal {
@Nullable Consumer<OperationProgress> progressCallback) {
try (var snapshot = mInjector.getPackageManagerLocal().withFilteredSnapshot()) {
dexoptPackages(snapshot, bootReason, new CancellationSignal(), progressCallbackExecutor,
- progressCallback);
+ progressCallback != null ? Map.of(ArtFlags.PASS_MAIN, progressCallback) : null);
}
}
@@ -1002,26 +1035,31 @@ public final class ArtManagerLocal {
}
@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
- private void maybeDowngradePackages(@NonNull PackageManagerLocal.FilteredSnapshot snapshot,
+ @Nullable
+ private DexoptResult maybeDowngradePackages(
+ @NonNull PackageManagerLocal.FilteredSnapshot snapshot,
@NonNull Set<String> excludedPackages, @NonNull CancellationSignal cancellationSignal,
- @NonNull Executor executor) {
+ @NonNull Executor executor,
+ @Nullable @CallbackExecutor Executor progressCallbackExecutor,
+ @Nullable Consumer<OperationProgress> progressCallback) {
if (shouldDowngrade()) {
List<String> packages = getDefaultPackages(snapshot, ReasonMapping.REASON_INACTIVE)
.stream()
.filter(pkg -> !excludedPackages.contains(pkg))
.collect(Collectors.toList());
if (!packages.isEmpty()) {
- Log.i(TAG, "Storage is low. Downgrading inactive packages");
+ Log.i(TAG, "Storage is low. Downgrading " + packages.size() + " inactive packages");
DexoptParams params =
new DexoptParams.Builder(ReasonMapping.REASON_INACTIVE).build();
- mInjector.getDexoptHelper().dexopt(snapshot, packages, params, cancellationSignal,
- executor, null /* processCallbackExecutor */, null /* progressCallback */);
+ return mInjector.getDexoptHelper().dexopt(snapshot, packages, params,
+ cancellationSignal, executor, progressCallbackExecutor, progressCallback);
} else {
Log.i(TAG,
"Storage is low, but downgrading is disabled or there's nothing to "
+ "downgrade");
}
}
+ return null;
}
@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
@@ -1035,6 +1073,48 @@ public final class ArtManagerLocal {
}
}
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ @Nullable
+ private DexoptResult maybeDexoptPackagesSupplementaryPass(
+ @NonNull PackageManagerLocal.FilteredSnapshot snapshot,
+ @NonNull DexoptResult mainResult, @NonNull DexoptParams mainParams,
+ @NonNull CancellationSignal cancellationSignal, @NonNull Executor dexoptExecutor,
+ @Nullable @CallbackExecutor Executor progressCallbackExecutor,
+ @Nullable Consumer<OperationProgress> progressCallback) {
+ if ((mainParams.getFlags() & ArtFlags.FLAG_FORCE_MERGE_PROFILE) != 0) {
+ return null;
+ }
+
+ // Only pick packages that used a profile-guided filter and were skipped in the main pass.
+ // This is a very coarse filter to reduce unnecessary iterations on a best-effort basis.
+ // Packages included in the list may still be skipped by dexopter if the profiles don't have
+ // any change.
+ List<String> packageNames =
+ mainResult.getPackageDexoptResults()
+ .stream()
+ .filter(packageResult
+ -> packageResult.getDexContainerFileDexoptResults()
+ .stream()
+ .anyMatch(fileResult
+ -> DexFile.isProfileGuidedCompilerFilter(
+ fileResult.getActualCompilerFilter())
+ && fileResult.getStatus()
+ == DexoptResult.DEXOPT_SKIPPED))
+ .map(packageResult -> packageResult.getPackageName())
+ .collect(Collectors.toList());
+
+ DexoptParams dexoptParams = mainParams.toBuilder()
+ .setFlags(ArtFlags.FLAG_FORCE_MERGE_PROFILE,
+ ArtFlags.FLAG_FORCE_MERGE_PROFILE)
+ .build();
+
+ Log.i(TAG,
+ "Dexopting " + packageNames.size() + " packages with reason="
+ + dexoptParams.getReason() + " (supplementary pass)");
+ return mInjector.getDexoptHelper().dexopt(snapshot, packageNames, dexoptParams,
+ cancellationSignal, dexoptExecutor, progressCallbackExecutor, progressCallback);
+ }
+
/** Returns the list of packages to process for the given reason. */
@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
@NonNull
diff --git a/libartservice/service/java/com/android/server/art/ArtShellCommand.java b/libartservice/service/java/com/android/server/art/ArtShellCommand.java
index b83f4ab25d..f9e610c124 100644
--- a/libartservice/service/java/com/android/server/art/ArtShellCommand.java
+++ b/libartservice/service/java/com/android/server/art/ArtShellCommand.java
@@ -20,6 +20,8 @@ import static android.os.ParcelFileDescriptor.AutoCloseInputStream;
import static com.android.server.art.ArtManagerLocal.SnapshotProfileException;
import static com.android.server.art.PrimaryDexUtils.PrimaryDexInfo;
+import static com.android.server.art.ReasonMapping.BatchDexoptReason;
+import static com.android.server.art.model.ArtFlags.BatchDexoptPass;
import static com.android.server.art.model.ArtFlags.DexoptFlags;
import static com.android.server.art.model.ArtFlags.PriorityClassApi;
import static com.android.server.art.model.DexoptResult.DexContainerFileDexoptResult;
@@ -66,11 +68,14 @@ import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
+import java.util.Locale;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.function.Consumer;
+import java.util.function.Function;
import java.util.stream.Collectors;
/**
@@ -195,6 +200,7 @@ public final class ArtShellCommand extends BasicShellCommandHandler {
boolean forAllPackages = false;
boolean legacyClearProfile = false;
boolean verbose = false;
+ boolean forceMergeProfile = false;
String opt;
while ((opt = getNextOption()) != null) {
@@ -259,6 +265,9 @@ public final class ArtShellCommand extends BasicShellCommandHandler {
case "-v":
verbose = true;
break;
+ case "--force-merge-profile":
+ forceMergeProfile = true;
+ break;
default:
pw.println("Error: Unknown option: " + opt);
return 1;
@@ -290,6 +299,10 @@ public final class ArtShellCommand extends BasicShellCommandHandler {
if (force) {
paramsBuilder.setFlags(ArtFlags.FLAG_FORCE, ArtFlags.FLAG_FORCE);
}
+ if (forceMergeProfile) {
+ paramsBuilder.setFlags(
+ ArtFlags.FLAG_FORCE_MERGE_PROFILE, ArtFlags.FLAG_FORCE_MERGE_PROFILE);
+ }
if (splitArg != null) {
if (scopeFlags != 0) {
pw.println("Error: '--primary-dex', '--secondary-dex', "
@@ -382,8 +395,7 @@ public final class ArtShellCommand extends BasicShellCommandHandler {
BackgroundDexoptJob.Result result = Utils.getFuture(future);
if (result instanceof BackgroundDexoptJob.CompletedResult) {
var completedResult = (BackgroundDexoptJob.CompletedResult) result;
- if (completedResult.dexoptResult().getFinalStatus()
- == DexoptResult.DEXOPT_CANCELLED) {
+ if (completedResult.isCancelled()) {
pw.println("Job cancelled. See logs for details");
} else {
pw.println("Job finished. See logs for details");
@@ -584,20 +596,41 @@ public final class ArtShellCommand extends BasicShellCommandHandler {
ReasonMapping.BATCH_DEXOPT_REASONS);
return 1;
}
- DexoptResult result;
+
+ final String finalReason = reason;
+
+ // Create callbacks to print the progress.
+ Map<Integer, Consumer<OperationProgress>> progressCallbacks = new HashMap<>();
+ for (@BatchDexoptPass int pass : ArtFlags.BATCH_DEXOPT_PASSES) {
+ progressCallbacks.put(pass, progress -> {
+ pw.println(String.format(Locale.US, "%s: %d%%",
+ getProgressMessageForBatchDexoptPass(pass, finalReason),
+ progress.getPercentage()));
+ pw.flush();
+ });
+ }
+
ExecutorService progressCallbackExecutor = Executors.newSingleThreadExecutor();
try (var signal = new WithCancellationSignal(pw, true /* verbose */)) {
- result = mArtManagerLocal.dexoptPackages(
- snapshot, reason, signal.get(), progressCallbackExecutor, progress -> {
- pw.println(String.format("Dexopting apps: %d%%", progress.getPercentage()));
- pw.flush();
- });
+ Map<Integer, DexoptResult> results = mArtManagerLocal.dexoptPackages(snapshot,
+ finalReason, signal.get(), progressCallbackExecutor, progressCallbacks);
+
Utils.executeAndWait(progressCallbackExecutor, () -> {
- printDexoptResult(pw, result, true /* verbose */, true /* multiPackage */);
+ for (@BatchDexoptPass int pass : ArtFlags.BATCH_DEXOPT_PASSES) {
+ if (results.containsKey(pass)) {
+ pw.println("Result of "
+ + getProgressMessageForBatchDexoptPass(pass, finalReason)
+ .toLowerCase(Locale.US)
+ + ":");
+ printDexoptResult(
+ pw, results.get(pass), true /* verbose */, true /* multiPackage */);
+ }
+ }
});
} finally {
progressCallbackExecutor.shutdown();
}
+
return 0;
}
@@ -642,6 +675,8 @@ public final class ArtShellCommand extends BasicShellCommandHandler {
pw.println(" For secondary dex files, it also clears all dexopt artifacts.");
pw.println(" When this flag is set, all the other flags are ignored.");
pw.println(" -v Verbose mode. This mode prints detailed results.");
+ pw.println(" --force-merge-profile Force merge profiles even if the difference between");
+ pw.println(" before and after the merge is not significant.");
pw.println(" Scope options:");
pw.println(" --primary-dex Dexopt primary dex files only (all APKs that are installed");
pw.println(" as part of the package, including the base APK and all other split");
@@ -906,6 +941,21 @@ public final class ArtShellCommand extends BasicShellCommandHandler {
}
}
+ @NonNull
+ private String getProgressMessageForBatchDexoptPass(
+ @BatchDexoptPass int pass, @NonNull @BatchDexoptReason String reason) {
+ switch (pass) {
+ case ArtFlags.PASS_DOWNGRADE:
+ return "Downgrading apps";
+ case ArtFlags.PASS_MAIN:
+ return reason.equals(ReasonMapping.REASON_BG_DEXOPT) ? "Dexopting apps (main pass)"
+ : "Dexopting apps";
+ case ArtFlags.PASS_SUPPLEMENTARY:
+ return "Dexopting apps (supplementary pass)";
+ }
+ throw new IllegalArgumentException("Unknown batch dexopt pass " + pass);
+ }
+
private static class WithCancellationSignal implements AutoCloseable {
@NonNull private final CancellationSignal mSignal = new CancellationSignal();
@NonNull private final String mJobId;
diff --git a/libartservice/service/java/com/android/server/art/BackgroundDexoptJob.java b/libartservice/service/java/com/android/server/art/BackgroundDexoptJob.java
index 9944e3fb1f..46e3a9f77c 100644
--- a/libartservice/service/java/com/android/server/art/BackgroundDexoptJob.java
+++ b/libartservice/service/java/com/android/server/art/BackgroundDexoptJob.java
@@ -17,6 +17,7 @@
package com.android.server.art;
import static com.android.server.art.ArtManagerLocal.ScheduleBackgroundDexoptJobCallback;
+import static com.android.server.art.model.ArtFlags.BatchDexoptPass;
import static com.android.server.art.model.ArtFlags.ScheduleStatus;
import static com.android.server.art.model.Config.Callback;
@@ -32,6 +33,7 @@ import android.os.CancellationSignal;
import android.os.SystemClock;
import android.os.SystemProperties;
import android.util.Log;
+import android.util.Slog;
import androidx.annotation.RequiresApi;
@@ -41,14 +43,19 @@ import com.android.server.LocalManagerRegistry;
import com.android.server.art.model.ArtFlags;
import com.android.server.art.model.Config;
import com.android.server.art.model.DexoptResult;
+import com.android.server.art.model.OperationProgress;
import com.android.server.pm.PackageManagerLocal;
import com.google.auto.value.AutoValue;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
/** @hide */
@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
@@ -85,15 +92,20 @@ public class BackgroundDexoptJob {
public boolean onStartJob(
@NonNull BackgroundDexoptJobService jobService, @NonNull JobParameters params) {
start().thenAcceptAsync(result -> {
- writeStats(result);
+ try {
+ writeStats(result);
+ } catch (RuntimeException e) {
+ // Not expected. Log wtf to surface it.
+ Slog.wtf(TAG, "Failed to write stats", e);
+ }
+
// This is a periodic job, where the interval is specified in the `JobInfo`. "true"
// means to execute again during a future idle maintenance window in the same
// interval, while "false" means not to execute again during a future idle maintenance
// window in the same interval but to execute again in the next interval.
// This call will be ignored if `onStopJob` is called.
- boolean wantsReschedule = result instanceof CompletedResult
- && ((CompletedResult) result).dexoptResult().getFinalStatus()
- == DexoptResult.DEXOPT_CANCELLED;
+ boolean wantsReschedule =
+ result instanceof CompletedResult && ((CompletedResult) result).isCancelled();
jobService.jobFinished(params, wantsReschedule);
});
// "true" means the job will continue running until `jobFinished` is called.
@@ -202,12 +214,28 @@ public class BackgroundDexoptJob {
@NonNull
private CompletedResult run(@NonNull CancellationSignal cancellationSignal) {
- long startTimeMs = SystemClock.uptimeMillis();
- DexoptResult dexoptResult;
+ // Create callbacks to time each pass.
+ Map<Integer, Long> startTimeMsByPass = new HashMap<>();
+ Map<Integer, Long> durationMsByPass = new HashMap<>();
+ Map<Integer, Consumer<OperationProgress>> progressCallbacks = new HashMap<>();
+ for (@BatchDexoptPass int pass : ArtFlags.BATCH_DEXOPT_PASSES) {
+ progressCallbacks.put(pass, progress -> {
+ if (progress.getTotal() == 0) {
+ durationMsByPass.put(pass, 0l);
+ } else if (progress.getCurrent() == 0) {
+ startTimeMsByPass.put(pass, SystemClock.uptimeMillis());
+ } else if (progress.getCurrent() == progress.getTotal()) {
+ durationMsByPass.put(
+ pass, SystemClock.uptimeMillis() - startTimeMsByPass.get(pass));
+ }
+ });
+ }
+
+ Map<Integer, DexoptResult> dexoptResultByPass;
try (var snapshot = mInjector.getPackageManagerLocal().withFilteredSnapshot()) {
- dexoptResult = mInjector.getArtManagerLocal().dexoptPackages(snapshot,
- ReasonMapping.REASON_BG_DEXOPT, cancellationSignal,
- null /* processCallbackExecutor */, null /* processCallback */);
+ dexoptResultByPass = mInjector.getArtManagerLocal().dexoptPackages(snapshot,
+ ReasonMapping.REASON_BG_DEXOPT, cancellationSignal, Runnable::run,
+ progressCallbacks);
// For simplicity, we don't support cancelling the following operation in the middle.
// This is fine because it typically takes only a few seconds.
@@ -220,7 +248,7 @@ public class BackgroundDexoptJob {
Log.i(TAG, String.format("Freed %d bytes", freedBytes));
}
}
- return CompletedResult.create(dexoptResult, SystemClock.uptimeMillis() - startTimeMs);
+ return CompletedResult.create(dexoptResultByPass, durationMsByPass);
}
private void writeStats(@NonNull Result result) {
@@ -239,13 +267,22 @@ public class BackgroundDexoptJob {
static class FatalErrorResult extends Result {}
@AutoValue
+ @SuppressWarnings("AutoValueImmutableFields") // Can't use ImmutableMap because it's in Guava.
static abstract class CompletedResult extends Result {
- abstract @NonNull DexoptResult dexoptResult();
- abstract long durationMs();
+ abstract @NonNull Map<Integer, DexoptResult> dexoptResultByPass();
+ abstract @NonNull Map<Integer, Long> durationMsByPass();
@NonNull
- static CompletedResult create(@NonNull DexoptResult dexoptResult, long durationMs) {
- return new AutoValue_BackgroundDexoptJob_CompletedResult(dexoptResult, durationMs);
+ static CompletedResult create(@NonNull Map<Integer, DexoptResult> dexoptResultByPass,
+ @NonNull Map<Integer, Long> durationMsByPass) {
+ return new AutoValue_BackgroundDexoptJob_CompletedResult(
+ Collections.unmodifiableMap(dexoptResultByPass),
+ Collections.unmodifiableMap(durationMsByPass));
+ }
+
+ public boolean isCancelled() {
+ return dexoptResultByPass().values().stream().anyMatch(
+ result -> result.getFinalStatus() == DexoptResult.DEXOPT_CANCELLED);
}
}
diff --git a/libartservice/service/java/com/android/server/art/BackgroundDexoptJobStatsReporter.java b/libartservice/service/java/com/android/server/art/BackgroundDexoptJobStatsReporter.java
index 533e3408e3..dc98914292 100644
--- a/libartservice/service/java/com/android/server/art/BackgroundDexoptJobStatsReporter.java
+++ b/libartservice/service/java/com/android/server/art/BackgroundDexoptJobStatsReporter.java
@@ -1,11 +1,14 @@
package com.android.server.art;
+import static com.android.server.art.model.ArtFlags.BatchDexoptPass;
+
import android.annotation.NonNull;
import android.app.job.JobParameters;
import android.os.Build;
import androidx.annotation.RequiresApi;
+import com.android.server.art.model.ArtFlags;
import com.android.server.art.model.DexoptResult;
import dalvik.system.DexFile;
@@ -22,44 +25,61 @@ import java.util.stream.Collectors;
@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
public class BackgroundDexoptJobStatsReporter {
public static void reportFailure() {
+ // The fatal error can occur during any pass, but we attribute it to the main pass for
+ // simplicity.
ArtStatsLog.write(ArtStatsLog.BACKGROUND_DEXOPT_JOB_ENDED,
ArtStatsLog.BACKGROUND_DEXOPT_JOB_ENDED__STATUS__STATUS_FATAL_ERROR,
JobParameters.STOP_REASON_UNDEFINED, 0 /* durationMs */, 0 /* deprecated */,
0 /* optimizedPackagesCount */, 0 /* packagesDependingOnBootClasspathCount */,
- 0 /* totalPackagesCount */);
+ 0 /* totalPackagesCount */,
+ ArtStatsLog.BACKGROUND_DEXOPT_JOB_ENDED__PASS__PASS_MAIN);
}
public static void reportSuccess(@NonNull BackgroundDexoptJob.CompletedResult completedResult,
Optional<Integer> stopReason) {
+ for (var entry : completedResult.dexoptResultByPass().entrySet()) {
+ reportPass(entry.getKey(), entry.getValue(),
+ completedResult.durationMsByPass().getOrDefault(entry.getKey(), 0l),
+ stopReason);
+ }
+ }
+
+ public static void reportPass(@BatchDexoptPass int pass, @NonNull DexoptResult dexoptResult,
+ long durationMs, Optional<Integer> stopReason) {
+ // The job contains multiple passes, so the stop reason may not be for the current pass. We
+ // shouldn't report the stop reason if the current pass finished before the job was
+ // cancelled.
+ int reportedStopReason = dexoptResult.getFinalStatus() == DexoptResult.DEXOPT_CANCELLED
+ ? stopReason.orElse(JobParameters.STOP_REASON_UNDEFINED)
+ : JobParameters.STOP_REASON_UNDEFINED;
+
List<DexoptResult.PackageDexoptResult> packageDexoptResults =
- getFilteredPackageResults(completedResult);
+ getFilteredPackageResults(dexoptResult);
+
ArtStatsLog.write(ArtStatsLog.BACKGROUND_DEXOPT_JOB_ENDED,
- getStatusForStats(completedResult, stopReason),
- stopReason.orElse(JobParameters.STOP_REASON_UNDEFINED),
- completedResult.durationMs(), 0 /* deprecated */,
- getDexoptedPackagesCount(packageDexoptResults),
+ getStatusForStats(dexoptResult, stopReason), reportedStopReason, durationMs,
+ 0 /* deprecated */, getDexoptedPackagesCount(packageDexoptResults),
getPackagesDependingOnBootClasspathCount(packageDexoptResults),
- packageDexoptResults.size());
+ packageDexoptResults.size(), toStatsdPassEnum(pass));
}
@NonNull
private static List<DexoptResult.PackageDexoptResult> getFilteredPackageResults(
- @NonNull BackgroundDexoptJob.CompletedResult completedResult) {
- return completedResult.dexoptResult()
- .getPackageDexoptResults()
+ @NonNull DexoptResult dexoptResult) {
+ return dexoptResult.getPackageDexoptResults()
.stream()
.filter(packageResult
-> packageResult.getDexContainerFileDexoptResults().stream().anyMatch(
fileResult
- -> (fileResult.getExtraStatus()
- & DexoptResult.EXTRA_SKIPPED_NO_DEX_CODE)
+ -> (fileResult.getExtendedStatusFlags()
+ & DexoptResult.EXTENDED_SKIPPED_NO_DEX_CODE)
== 0))
.collect(Collectors.toList());
}
private static int getStatusForStats(
- @NonNull BackgroundDexoptJob.CompletedResult result, Optional<Integer> stopReason) {
- if (result.dexoptResult().getFinalStatus() == DexoptResult.DEXOPT_CANCELLED) {
+ @NonNull DexoptResult dexoptResult, Optional<Integer> stopReason) {
+ if (dexoptResult.getFinalStatus() == DexoptResult.DEXOPT_CANCELLED) {
if (stopReason.isPresent()) {
return ArtStatsLog
.BACKGROUND_DEXOPT_JOB_ENDED__STATUS__STATUS_ABORT_BY_CANCELLATION;
@@ -69,14 +89,13 @@ public class BackgroundDexoptJobStatsReporter {
}
boolean isSkippedDueToStorageLow =
- result.dexoptResult()
- .getPackageDexoptResults()
+ dexoptResult.getPackageDexoptResults()
.stream()
.flatMap(packageResult
-> packageResult.getDexContainerFileDexoptResults().stream())
.anyMatch(fileResult
- -> (fileResult.getExtraStatus()
- & DexoptResult.EXTRA_SKIPPED_STORAGE_LOW)
+ -> (fileResult.getExtendedStatusFlags()
+ & DexoptResult.EXTENDED_SKIPPED_STORAGE_LOW)
!= 0);
if (isSkippedDueToStorageLow) {
return ArtStatsLog.BACKGROUND_DEXOPT_JOB_ENDED__STATUS__STATUS_ABORT_NO_SPACE_LEFT;
@@ -106,4 +125,16 @@ public class BackgroundDexoptJobStatsReporter {
.map(DexoptResult.DexContainerFileDexoptResult::getActualCompilerFilter)
.anyMatch(DexFile::isOptimizedCompilerFilter);
}
+
+ private static int toStatsdPassEnum(@BatchDexoptPass int pass) {
+ switch (pass) {
+ case ArtFlags.PASS_DOWNGRADE:
+ return ArtStatsLog.BACKGROUND_DEXOPT_JOB_ENDED__PASS__PASS_DOWNGRADE;
+ case ArtFlags.PASS_MAIN:
+ return ArtStatsLog.BACKGROUND_DEXOPT_JOB_ENDED__PASS__PASS_MAIN;
+ case ArtFlags.PASS_SUPPLEMENTARY:
+ return ArtStatsLog.BACKGROUND_DEXOPT_JOB_ENDED__PASS__PASS_SUPPLEMENTARY;
+ }
+ throw new IllegalArgumentException("Unknown batch dexopt pass " + pass);
+ }
}
diff --git a/libartservice/service/java/com/android/server/art/DexUseManagerLocal.java b/libartservice/service/java/com/android/server/art/DexUseManagerLocal.java
index 89fb542c0a..97b3c8a186 100644
--- a/libartservice/service/java/com/android/server/art/DexUseManagerLocal.java
+++ b/libartservice/service/java/com/android/server/art/DexUseManagerLocal.java
@@ -441,7 +441,10 @@ public class DexUseManagerLocal {
private static boolean isOwningPackageForPrimaryDex(
@NonNull PackageState pkgState, @NonNull String dexPath) {
- AndroidPackage pkg = Utils.getPackageOrThrow(pkgState);
+ AndroidPackage pkg = pkgState.getAndroidPackage();
+ if (pkg == null) {
+ return false;
+ }
List<AndroidPackageSplit> splits = pkg.getSplits();
for (int i = 0; i < splits.size(); i++) {
if (splits.get(i).getPath().equals(dexPath)) {
@@ -1109,7 +1112,11 @@ public class DexUseManagerLocal {
public @NonNull List<Path> getLocations(
@NonNull PackageState pkgState, @NonNull UserHandle userHandle) {
- AndroidPackage pkg = Utils.getPackageOrThrow(pkgState);
+ AndroidPackage pkg = pkgState.getAndroidPackage();
+ if (pkg == null) {
+ return List.of();
+ }
+
UUID storageUuid = pkg.getStorageUuid();
String packageName = pkgState.getPackageName();
diff --git a/libartservice/service/java/com/android/server/art/Dexopter.java b/libartservice/service/java/com/android/server/art/Dexopter.java
index 72fd22effc..2f15166ce7 100644
--- a/libartservice/service/java/com/android/server/art/Dexopter.java
+++ b/libartservice/service/java/com/android/server/art/Dexopter.java
@@ -20,6 +20,7 @@ import static com.android.server.art.GetDexoptNeededResult.ArtifactsLocation;
import static com.android.server.art.OutputArtifacts.PermissionSettings;
import static com.android.server.art.ProfilePath.TmpProfilePath;
import static com.android.server.art.Utils.Abi;
+import static com.android.server.art.Utils.InitProfileResult;
import static com.android.server.art.model.ArtFlags.DexoptFlags;
import static com.android.server.art.model.DexoptResult.DexContainerFileDexoptResult;
@@ -104,6 +105,7 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
for (DexInfoType dexInfo : getDexInfoList()) {
ProfilePath profile = null;
boolean succeeded = true;
+ List<String> externalProfileErrors = List.of();
try {
if (!isDexoptable(dexInfo)) {
continue;
@@ -120,13 +122,15 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
boolean profileMerged = false;
if (DexFile.isProfileGuidedCompilerFilter(compilerFilter)) {
if (needsToBeShared) {
- profile = initReferenceProfile(dexInfo);
+ InitProfileResult result = initReferenceProfile(dexInfo);
+ profile = result.profile();
+ isOtherReadable = result.isOtherReadable();
+ externalProfileErrors = result.externalProfileErrors();
} else {
- Pair<ProfilePath, Boolean> pair = getOrInitReferenceProfile(dexInfo);
- if (pair != null) {
- profile = pair.first;
- isOtherReadable = pair.second;
- }
+ InitProfileResult result = getOrInitReferenceProfile(dexInfo);
+ profile = result.profile();
+ isOtherReadable = result.isOtherReadable();
+ externalProfileErrors = result.externalProfileErrors();
ProfilePath mergedProfile = mergeProfiles(dexInfo, profile);
if (mergedProfile != null) {
if (profile != null && profile.getTag() == ProfilePath.tmpProfilePath) {
@@ -165,7 +169,7 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
long cpuTimeMs = 0;
long sizeBytes = 0;
long sizeBeforeBytes = 0;
- @DexoptResult.DexoptResultExtraStatus int extraStatus = 0;
+ @DexoptResult.DexoptResultExtendedStatusFlags int extendedStatusFlags = 0;
try {
var target = DexoptTarget.<DexInfoType>builder()
.setDexInfo(dexInfo)
@@ -183,7 +187,7 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
getDexoptNeeded(target, options);
if (!getDexoptNeededResult.hasDexCode) {
- extraStatus |= DexoptResult.EXTRA_SKIPPED_NO_DEX_CODE;
+ extendedStatusFlags |= DexoptResult.EXTENDED_SKIPPED_NO_DEX_CODE;
}
if (!getDexoptNeededResult.isDexoptNeeded) {
@@ -200,7 +204,7 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
&& mInjector.getStorageManager().getAllocatableBytes(
mPkg.getStorageUuid())
<= 0) {
- extraStatus |= DexoptResult.EXTRA_SKIPPED_STORAGE_LOW;
+ extendedStatusFlags |= DexoptResult.EXTENDED_SKIPPED_STORAGE_LOW;
continue;
}
} catch (IOException e) {
@@ -241,9 +245,13 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
e);
status = DexoptResult.DEXOPT_FAILED;
} finally {
+ if (!externalProfileErrors.isEmpty()) {
+ extendedStatusFlags |= DexoptResult.EXTENDED_BAD_EXTERNAL_PROFILE;
+ }
var result = DexContainerFileDexoptResult.create(dexInfo.dexPath(),
abi.isPrimaryAbi(), abi.name(), compilerFilter, status, wallTimeMs,
- cpuTimeMs, sizeBytes, sizeBeforeBytes, extraStatus);
+ cpuTimeMs, sizeBytes, sizeBeforeBytes, extendedStatusFlags,
+ externalProfileErrors);
Log.i(TAG,
String.format("Dexopt result: [packageName = %s] %s",
mPkgState.getPackageName(), result));
@@ -294,13 +302,14 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
if (mInjector.isSystemUiPackage(mPkgState.getPackageName())) {
String systemUiCompilerFilter = getSystemUiCompilerFilter();
if (!systemUiCompilerFilter.isEmpty()) {
- return systemUiCompilerFilter;
+ targetCompilerFilter = systemUiCompilerFilter;
}
+ } else if (mInjector.isLauncherPackage(mPkgState.getPackageName())) {
+ targetCompilerFilter = "speed-profile";
}
- if (mInjector.isLauncherPackage(mPkgState.getPackageName())) {
- return "speed-profile";
- }
+ // Code below should only downgrade the compiler filter. Don't upgrade the compiler filter
+ // beyond this point!
// We force vmSafeMode on debuggable apps as well:
// - the runtime ignores their compiled code
@@ -310,13 +319,13 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
// are done via adb shell commands). This is okay because the runtime will ignore the
// compiled code anyway.
if (mPkg.isVmSafeMode() || mPkg.isDebuggable()) {
- return DexFile.getSafeModeCompilerFilter(targetCompilerFilter);
+ targetCompilerFilter = DexFile.getSafeModeCompilerFilter(targetCompilerFilter);
}
// We cannot do AOT compilation if we don't have a valid class loader context.
- if (dexInfo.classLoaderContext() == null) {
- return DexFile.isOptimizedCompilerFilter(targetCompilerFilter) ? "verify"
- : targetCompilerFilter;
+ if (dexInfo.classLoaderContext() == null
+ && DexFile.isOptimizedCompilerFilter(targetCompilerFilter)) {
+ targetCompilerFilter = "verify";
}
// This application wants to use the embedded dex in the APK, rather than extracted or
@@ -324,9 +333,13 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
// "verify" does not prevent dex2oat from extracting the dex code, but in practice, dex2oat
// won't extract the dex code because the APK is uncompressed, and the assumption is that
// such applications always use uncompressed APKs.
- if (mPkg.isUseEmbeddedDex()) {
- return DexFile.isOptimizedCompilerFilter(targetCompilerFilter) ? "verify"
- : targetCompilerFilter;
+ if (mPkg.isUseEmbeddedDex() && DexFile.isOptimizedCompilerFilter(targetCompilerFilter)) {
+ targetCompilerFilter = "verify";
+ }
+
+ if ((mParams.getFlags() & ArtFlags.FLAG_IGNORE_PROFILE) != 0
+ && DexFile.isProfileGuidedCompilerFilter(targetCompilerFilter)) {
+ targetCompilerFilter = "verify";
}
return targetCompilerFilter;
@@ -344,7 +357,7 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
/** @see Utils#getOrInitReferenceProfile */
@Nullable
- private Pair<ProfilePath, Boolean> getOrInitReferenceProfile(@NonNull DexInfoType dexInfo)
+ private InitProfileResult getOrInitReferenceProfile(@NonNull DexInfoType dexInfo)
throws RemoteException {
return Utils.getOrInitReferenceProfile(mInjector.getArtd(), dexInfo.dexPath(),
buildRefProfilePath(dexInfo), getExternalProfiles(dexInfo),
@@ -352,7 +365,8 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
}
@Nullable
- private ProfilePath initReferenceProfile(@NonNull DexInfoType dexInfo) throws RemoteException {
+ private InitProfileResult initReferenceProfile(@NonNull DexInfoType dexInfo)
+ throws RemoteException {
return Utils.initReferenceProfile(mInjector.getArtd(), dexInfo.dexPath(),
getExternalProfiles(dexInfo), buildOutputProfile(dexInfo, true /* isPublic */));
}
@@ -527,9 +541,12 @@ public abstract class Dexopter<DexInfoType extends DetailedDexInfo> {
@Nullable ProfilePath referenceProfile) throws RemoteException {
OutputProfile output = buildOutputProfile(dexInfo, false /* isPublic */);
+ var options = new MergeProfileOptions();
+ options.forceMerge = (mParams.getFlags() & ArtFlags.FLAG_FORCE_MERGE_PROFILE) != 0;
+
try {
if (mInjector.getArtd().mergeProfiles(getCurProfiles(dexInfo), referenceProfile, output,
- List.of(dexInfo.dexPath()), new MergeProfileOptions())) {
+ List.of(dexInfo.dexPath()), options)) {
return ProfilePath.tmpProfilePath(output.profilePath);
}
} catch (ServiceSpecificException e) {
diff --git a/libartservice/service/java/com/android/server/art/ReasonMapping.java b/libartservice/service/java/com/android/server/art/ReasonMapping.java
index ac08856b67..7c64abfab8 100644
--- a/libartservice/service/java/com/android/server/art/ReasonMapping.java
+++ b/libartservice/service/java/com/android/server/art/ReasonMapping.java
@@ -194,6 +194,7 @@ public class ReasonMapping {
* @hide
*/
public static int getConcurrencyForReason(@NonNull @BatchDexoptReason String reason) {
- return SystemProperties.getInt("pm.dexopt." + reason + ".concurrency", 1 /* def */);
+ return SystemProperties.getInt("persist.device_config.runtime." + reason + "_concurrency",
+ SystemProperties.getInt("pm.dexopt." + reason + ".concurrency", 1 /* def */));
}
}
diff --git a/libartservice/service/java/com/android/server/art/Utils.java b/libartservice/service/java/com/android/server/art/Utils.java
index 6d48c45b84..252074a13a 100644
--- a/libartservice/service/java/com/android/server/art/Utils.java
+++ b/libartservice/service/java/com/android/server/art/Utils.java
@@ -57,6 +57,7 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Set;
@@ -366,13 +367,9 @@ public final class Utils {
* @param externalProfiles a list of external profiles to initialize the reference profile from,
* in the order of preference
* @param initOutput the final location to initialize the reference profile to
- *
- * @return a pair where the first element is the found or initialized profile, and the second
- * element is true if the profile is readable by others. Returns null if there is no
- * reference profile or external profile to use
*/
- @Nullable
- public static Pair<ProfilePath, Boolean> getOrInitReferenceProfile(@NonNull IArtd artd,
+ @NonNull
+ public static InitProfileResult getOrInitReferenceProfile(@NonNull IArtd artd,
@NonNull String dexPath, @NonNull ProfilePath refProfile,
@NonNull List<ProfilePath> externalProfiles, @NonNull OutputProfile initOutput)
throws RemoteException {
@@ -380,7 +377,8 @@ public final class Utils {
if (artd.isProfileUsable(refProfile, dexPath)) {
boolean isOtherReadable =
artd.getProfileVisibility(refProfile) == FileVisibility.OTHER_READABLE;
- return Pair.create(refProfile, isOtherReadable);
+ return InitProfileResult.create(
+ refProfile, isOtherReadable, List.of() /* externalProfileErrors */);
}
} catch (ServiceSpecificException e) {
Log.e(TAG,
@@ -389,22 +387,21 @@ public final class Utils {
e);
}
- ProfilePath initializedProfile =
- initReferenceProfile(artd, dexPath, externalProfiles, initOutput);
- return initializedProfile != null ? Pair.create(initializedProfile, true) : null;
+ return initReferenceProfile(artd, dexPath, externalProfiles, initOutput);
}
/**
* Similar to above, but never uses an existing profile.
*
- * Unlike the one above, this method doesn't return a boolean flag to indicate if the profile is
- * readable by others. The profile returned by this method is initialized form an external
- * profile, meaning it has no user data, so it's always readable by others.
+ * The {@link InitProfileResult#isOtherReadable} field is always set to true. The profile
+ * returned by this method is initialized from an external profile, meaning it has no user data,
+ * so it's always readable by others.
*/
@Nullable
- public static ProfilePath initReferenceProfile(@NonNull IArtd artd, @NonNull String dexPath,
- @NonNull List<ProfilePath> externalProfiles, @NonNull OutputProfile output)
- throws RemoteException {
+ public static InitProfileResult initReferenceProfile(@NonNull IArtd artd,
+ @NonNull String dexPath, @NonNull List<ProfilePath> externalProfiles,
+ @NonNull OutputProfile output) throws RemoteException {
+ List<String> externalProfileErrors = new ArrayList<>();
for (ProfilePath profile : externalProfiles) {
try {
// If the profile path is a PrebuiltProfilePath, and the APK is really a prebuilt
@@ -412,15 +409,22 @@ public final class Utils {
// build time and is correctly set in the profile header. However, the APK can also
// be an installed one, in which case partners may place a profile file next to the
// APK at install time. Rewriting the profile in the latter case is necessary.
- if (artd.copyAndRewriteProfile(profile, output, dexPath)) {
- return ProfilePath.tmpProfilePath(output.profilePath);
+ CopyAndRewriteProfileResult result =
+ artd.copyAndRewriteProfile(profile, output, dexPath);
+ if (result.status == CopyAndRewriteProfileResult.Status.SUCCESS) {
+ return InitProfileResult.create(ProfilePath.tmpProfilePath(output.profilePath),
+ true /* isOtherReadable */, externalProfileErrors);
+ }
+ if (result.status == CopyAndRewriteProfileResult.Status.BAD_PROFILE) {
+ externalProfileErrors.add(result.errorMsg);
}
} catch (ServiceSpecificException e) {
Log.e(TAG, "Failed to initialize profile from " + AidlUtils.toString(profile), e);
}
}
- return null;
+ return InitProfileResult.create(
+ null /* profile */, true /* isOtherReadable */, externalProfileErrors);
}
public static void logArtdException(@NonNull RemoteException e) {
@@ -487,4 +491,31 @@ public final class Utils {
super.close();
}
}
+
+ /** The result of {@link #getOrInitReferenceProfile} and {@link #initReferenceProfile}. */
+ @AutoValue
+ @SuppressWarnings("AutoValueImmutableFields") // Can't use ImmutableList because it's in Guava.
+ public abstract static class InitProfileResult {
+ static @NonNull InitProfileResult create(@Nullable ProfilePath profile,
+ boolean isOtherReadable, @NonNull List<String> externalProfileErrors) {
+ return new AutoValue_Utils_InitProfileResult(
+ profile, isOtherReadable, Collections.unmodifiableList(externalProfileErrors));
+ }
+
+ /**
+ * The found or initialized profile, or null if there is no reference profile or external
+ * profile to use.
+ */
+ abstract @Nullable ProfilePath profile();
+
+ /**
+ * Whether the profile is readable by others.
+ *
+ * If {@link #profile} returns null, this field is always true.
+ */
+ abstract boolean isOtherReadable();
+
+ /** Errors encountered when initializing from external profiles. */
+ abstract @NonNull List<String> externalProfileErrors();
+ }
}
diff --git a/libartservice/service/java/com/android/server/art/model/ArtFlags.java b/libartservice/service/java/com/android/server/art/model/ArtFlags.java
index cc4f826556..6de14f0f11 100644
--- a/libartservice/service/java/com/android/server/art/model/ArtFlags.java
+++ b/libartservice/service/java/com/android/server/art/model/ArtFlags.java
@@ -18,6 +18,7 @@ package com.android.server.art.model;
import android.annotation.IntDef;
import android.annotation.NonNull;
+import android.annotation.SuppressLint;
import android.annotation.SystemApi;
import android.app.job.JobScheduler;
@@ -28,6 +29,7 @@ import com.android.server.pm.PackageManagerLocal;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
+import java.util.List;
/** @hide */
@SystemApi(client = SystemApi.Client.SYSTEM_SERVER)
@@ -78,6 +80,23 @@ public class ArtFlags {
* sys_storage_threshold_max_bytes}.
*/
public static final int FLAG_SKIP_IF_STORAGE_LOW = 1 << 6;
+ /**
+ * If set, no profile will be used by dexopt. I.e., if the compiler filter is a profile-guided
+ * one, such as "speed-profile", it will be adjusted to "verify". This option is especially
+ * useful when the compiler filter is not explicitly specified (i.e., is inferred from the
+ * compilation reason).
+ *
+ * @hide
+ */
+ @SuppressLint("UnflaggedApi") // Flag support for mainline is not available.
+ public static final int FLAG_IGNORE_PROFILE = 1 << 7;
+ /**
+ * Whether to force merge profiles even if the difference between before and after the merge
+ * is not significant.
+ *
+ * @hide
+ */
+ public static final int FLAG_FORCE_MERGE_PROFILE = 1 << 8;
/**
* Flags for {@link
@@ -118,6 +137,8 @@ public class ArtFlags {
FLAG_FORCE,
FLAG_FOR_SINGLE_SPLIT,
FLAG_SKIP_IF_STORAGE_LOW,
+ FLAG_IGNORE_PROFILE,
+ FLAG_FORCE_MERGE_PROFILE,
})
// clang-format on
@Retention(RetentionPolicy.SOURCE)
@@ -218,5 +239,51 @@ public class ArtFlags {
@Retention(RetentionPolicy.SOURCE)
public @interface ScheduleStatus {}
+ /**
+ * The downgrade pass, run before the main pass. Only applicable to bg-dexopt.
+ *
+ * @hide
+ */
+ public static final int PASS_DOWNGRADE = 0;
+
+ /**
+ * The main pass.
+ *
+ * @hide
+ */
+ public static final int PASS_MAIN = 1;
+
+ /**
+ * The supplementary pass, run after the main pass, to take the opportunity to dexopt more
+ * packages. Compared to the main pass, it uses different criteria to determine whether dexopt
+ * is needed or not, but iterates over the same packages in the same order as the main pass (so
+ * the logic in {@link ArtManagerLocal#getDefaultPackages} and {@link
+ * ArtManagerLocal.BatchDexoptStartCallback} controls the packages here too.)
+ *
+ * Only applicable to bg-dexopt.
+ *
+ * @hide
+ */
+ public static final int PASS_SUPPLEMENTARY = 2;
+
+ /**
+ * Indicates the pass of a batch dexopt run.
+ *
+ * @hide
+ */
+ // clang-format off
+ @IntDef(prefix = "PASS_", value = {
+ PASS_DOWNGRADE,
+ PASS_MAIN,
+ PASS_SUPPLEMENTARY,
+ })
+ // clang-format on
+ @Retention(RetentionPolicy.SOURCE)
+ public @interface BatchDexoptPass {}
+
+ /** @hide */
+ public static final List<Integer> BATCH_DEXOPT_PASSES =
+ List.of(PASS_DOWNGRADE, PASS_MAIN, PASS_SUPPLEMENTARY);
+
private ArtFlags() {}
}
diff --git a/libartservice/service/java/com/android/server/art/model/DexoptParams.java b/libartservice/service/java/com/android/server/art/model/DexoptParams.java
index cf86ad6904..455d4cdead 100644
--- a/libartservice/service/java/com/android/server/art/model/DexoptParams.java
+++ b/libartservice/service/java/com/android/server/art/model/DexoptParams.java
@@ -84,8 +84,8 @@ public class DexoptParams {
* https://source.android.com/docs/core/dalvik/configure#compilation_options.
*
* Note that the compiler filter might be adjusted before the execution based on factors
- * like whether the profile is available or whether the app is used by other apps. If not
- * set, the default compiler filter for the given reason will be used.
+ * like dexopt flags, whether the profile is available, or whether the app is used by other
+ * apps. If not set, the default compiler filter for the given reason will be used.
*/
@NonNull
public Builder setCompilerFilter(@NonNull String value) {
@@ -223,4 +223,12 @@ public class DexoptParams {
public @Nullable String getSplitName() {
return mSplitName;
}
+
+ /** @hide */
+ public @NonNull Builder toBuilder() {
+ return new Builder(mReason, mFlags)
+ .setCompilerFilter(mCompilerFilter)
+ .setPriorityClass(mPriorityClass)
+ .setSplitName(mSplitName);
+ }
}
diff --git a/libartservice/service/java/com/android/server/art/model/DexoptResult.java b/libartservice/service/java/com/android/server/art/model/DexoptResult.java
index b678a77aa5..64cdb1ca78 100644
--- a/libartservice/service/java/com/android/server/art/model/DexoptResult.java
+++ b/libartservice/service/java/com/android/server/art/model/DexoptResult.java
@@ -23,12 +23,14 @@ import android.annotation.Nullable;
import android.annotation.SystemApi;
import com.android.internal.annotations.Immutable;
+import com.android.internal.annotations.VisibleForTesting;
import com.google.auto.value.AutoValue;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
/** @hide */
@@ -60,21 +62,47 @@ public abstract class DexoptResult {
@Retention(RetentionPolicy.SOURCE)
public @interface DexoptResultStatus {}
- // Possible values of {@link #DexoptResultExtraStatus}.
- /** @hide */
- public static final int EXTRA_SKIPPED_STORAGE_LOW = 1 << 0;
- /** @hide */
- public static final int EXTRA_SKIPPED_NO_DEX_CODE = 1 << 1;
+ // Possible values of {@link #DexoptResultExtendedStatusFlags}.
+ /**
+ * Dexopt is skipped because the remaining storage space is low.
+ *
+ * @hide
+ */
+ public static final int EXTENDED_SKIPPED_STORAGE_LOW = 1 << 0;
+ /**
+ * Dexopt is skipped because the dex container file has no dex code while the manifest declares
+ * that it does.
+ *
+ * Note that this flag doesn't apply to dex container files that are not declared to have code.
+ * Instead, those files are not listed in {@link
+ * PackageDexoptResult#getDexContainerFileDexoptResults} in the first place.
+ *
+ * @hide
+ */
+ public static final int EXTENDED_SKIPPED_NO_DEX_CODE = 1 << 1;
+ /**
+ * Dexopt encountered errors when processing the profiles that are external to the device,
+ * including the profile in the DM file and the profile embedded in the dex container file.
+ * Details of the errors can be found in {@link
+ * DexContainerFileDexoptResult#getExternalProfileErrors}.
+ *
+ * This is not a critical error. Dexopt may still have succeeded after ignoring the bad external
+ * profiles.
+ *
+ * @hide
+ */
+ public static final int EXTENDED_BAD_EXTERNAL_PROFILE = 1 << 2;
/** @hide */
// clang-format off
- @IntDef(flag = true, prefix = {"EXTRA_"}, value = {
- EXTRA_SKIPPED_STORAGE_LOW,
- EXTRA_SKIPPED_NO_DEX_CODE,
+ @IntDef(flag = true, prefix = {"EXTENDED_"}, value = {
+ EXTENDED_SKIPPED_STORAGE_LOW,
+ EXTENDED_SKIPPED_NO_DEX_CODE,
+ EXTENDED_BAD_EXTERNAL_PROFILE,
})
// clang-format on
@Retention(RetentionPolicy.SOURCE)
- public @interface DexoptResultExtraStatus {}
+ public @interface DexoptResultExtendedStatusFlags {}
/** @hide */
protected DexoptResult() {}
@@ -85,10 +113,17 @@ public abstract class DexoptResult {
return new AutoValue_DexoptResult(requestedCompilerFilter, reason, packageDexoptResult);
}
+ /** @hide */
+ @VisibleForTesting
+ public static @NonNull DexoptResult create() {
+ return new AutoValue_DexoptResult(
+ "compiler-filter", "reason", List.of() /* packageDexoptResult */);
+ }
+
/**
* The requested compiler filter. Note that the compiler filter might be adjusted before the
- * execution based on factors like whether the profile is available or whether the app is
- * used by other apps.
+ * execution based on factors like dexopt flags, whether the profile is available, or whether
+ * the app is used by other apps.
*
* @see DexoptParams.Builder#setCompilerFilter(String)
* @see DexContainerFileDexoptResult#getActualCompilerFilter()
@@ -144,13 +179,17 @@ public abstract class DexoptResult {
/** @hide */
@NonNull
- public static String dexoptResultExtraStatusToString(@DexoptResultExtraStatus int extraStatus) {
+ public static String dexoptResultExtendedStatusFlagsToString(
+ @DexoptResultExtendedStatusFlags int flags) {
var strs = new ArrayList<String>();
- if ((extraStatus & DexoptResult.EXTRA_SKIPPED_STORAGE_LOW) != 0) {
- strs.add("EXTRA_SKIPPED_STORAGE_LOW");
+ if ((flags & DexoptResult.EXTENDED_SKIPPED_STORAGE_LOW) != 0) {
+ strs.add("EXTENDED_SKIPPED_STORAGE_LOW");
}
- if ((extraStatus & DexoptResult.EXTRA_SKIPPED_NO_DEX_CODE) != 0) {
- strs.add("EXTRA_SKIPPED_NO_DEX_CODE");
+ if ((flags & DexoptResult.EXTENDED_SKIPPED_NO_DEX_CODE) != 0) {
+ strs.add("EXTENDED_SKIPPED_NO_DEX_CODE");
+ }
+ if ((flags & DexoptResult.EXTENDED_BAD_EXTERNAL_PROFILE) != 0) {
+ strs.add("EXTENDED_BAD_EXTERNAL_PROFILE");
}
return String.join(", ", strs);
}
@@ -213,6 +252,7 @@ public abstract class DexoptResult {
@SystemApi(client = SystemApi.Client.SYSTEM_SERVER)
@Immutable
@AutoValue
+ @SuppressWarnings("AutoValueImmutableFields") // Can't use ImmutableList because it's in Guava.
public static abstract class DexContainerFileDexoptResult {
/** @hide */
protected DexContainerFileDexoptResult() {}
@@ -222,10 +262,23 @@ public abstract class DexoptResult {
boolean isPrimaryAbi, @NonNull String abi, @NonNull String compilerFilter,
@DexoptResultStatus int status, long dex2oatWallTimeMillis,
long dex2oatCpuTimeMillis, long sizeBytes, long sizeBeforeBytes,
- @DexoptResultExtraStatus int extraStatus) {
+ @DexoptResultExtendedStatusFlags int extendedStatusFlags,
+ @NonNull List<String> externalProfileErrors) {
return new AutoValue_DexoptResult_DexContainerFileDexoptResult(dexContainerFile,
isPrimaryAbi, abi, compilerFilter, status, dex2oatWallTimeMillis,
- dex2oatCpuTimeMillis, sizeBytes, sizeBeforeBytes, extraStatus);
+ dex2oatCpuTimeMillis, sizeBytes, sizeBeforeBytes, extendedStatusFlags,
+ Collections.unmodifiableList(externalProfileErrors));
+ }
+
+ /** @hide */
+ @VisibleForTesting
+ public static @NonNull DexContainerFileDexoptResult create(@NonNull String dexContainerFile,
+ boolean isPrimaryAbi, @NonNull String abi, @NonNull String compilerFilter,
+ @DexoptResultStatus int status) {
+ return create(dexContainerFile, isPrimaryAbi, abi, compilerFilter, status,
+ 0 /* dex2oatWallTimeMillis */, 0 /* dex2oatCpuTimeMillis */, 0 /* sizeBytes */,
+ 0 /* sizeBeforeBytes */, 0 /* extendedStatusFlags */,
+ List.of() /* externalProfileErrors */);
}
/** The absolute path to the dex container file. */
@@ -279,8 +332,34 @@ public abstract class DexoptResult {
*/
public abstract long getSizeBeforeBytes();
- /** @hide */
- public abstract @DexoptResultExtraStatus int getExtraStatus();
+ /**
+ * A bitfield of the extended status flags.
+ *
+ * Flags that starts with `EXTENDED_SKIPPED_` are a subset of the reasons why dexopt is
+ * skipped. Note that they don't cover all possible reasons. At most one `EXTENDED_SKIPPED_`
+ * flag will be set, even if the situation meets multiple `EXTENDED_SKIPPED_` flags. The
+ * order of precedence of those flags is undefined.
+ *
+ * @hide
+ */
+ public abstract @DexoptResultExtendedStatusFlags int getExtendedStatusFlags();
+
+ /**
+ * Details of errors occurred when processing external profiles, one error per profile file
+ * that the dexopter tried to read.
+ *
+ * If the same dex container file is dexopted for multiple ABIs, the same profile errors
+ * will be repeated for each ABI in the {@link DexContainerFileDexoptResult}s of the same
+ * dex container file.
+ *
+ * The error messages are for logging only, and they include the paths to the profile files
+ * that caused the errors.
+ *
+ * @see #EXTENDED_BAD_EXTERNAL_PROFILE.
+ *
+ * @hide
+ */
+ public abstract @NonNull List<String> getExternalProfileErrors();
@Override
@NonNull
@@ -295,12 +374,12 @@ public abstract class DexoptResult {
+ "dex2oatCpuTimeMillis=%d, "
+ "sizeBytes=%d, "
+ "sizeBeforeBytes=%d, "
- + "extraStatus=[%s]}",
+ + "extendedStatusFlags=[%s]}",
getDexContainerFile(), isPrimaryAbi(), getAbi(), getActualCompilerFilter(),
DexoptResult.dexoptResultStatusToString(getStatus()),
getDex2oatWallTimeMillis(), getDex2oatCpuTimeMillis(), getSizeBytes(),
getSizeBeforeBytes(),
- DexoptResult.dexoptResultExtraStatusToString(getExtraStatus()));
+ DexoptResult.dexoptResultExtendedStatusFlagsToString(getExtendedStatusFlags()));
}
}
}
diff --git a/libartservice/service/java/com/android/server/art/model/OperationProgress.java b/libartservice/service/java/com/android/server/art/model/OperationProgress.java
index a47a556bdc..a0a7ddc4fc 100644
--- a/libartservice/service/java/com/android/server/art/model/OperationProgress.java
+++ b/libartservice/service/java/com/android/server/art/model/OperationProgress.java
@@ -38,7 +38,7 @@ public abstract class OperationProgress {
/** The overall progress, in the range of [0, 100]. */
public int getPercentage() {
- return 100 * getCurrent() / getTotal();
+ return getTotal() == 0 ? 100 : 100 * getCurrent() / getTotal();
}
/**
diff --git a/libartservice/service/javatests/com/android/server/art/ArtManagerLocalTest.java b/libartservice/service/javatests/com/android/server/art/ArtManagerLocalTest.java
index 747e51684b..5e3e7b9cd7 100644
--- a/libartservice/service/javatests/com/android/server/art/ArtManagerLocalTest.java
+++ b/libartservice/service/javatests/com/android/server/art/ArtManagerLocalTest.java
@@ -19,6 +19,8 @@ package com.android.server.art;
import static android.os.ParcelFileDescriptor.AutoCloseInputStream;
import static com.android.server.art.DexUseManagerLocal.DetailedSecondaryDexInfo;
+import static com.android.server.art.model.DexoptResult.DexContainerFileDexoptResult;
+import static com.android.server.art.model.DexoptResult.PackageDexoptResult;
import static com.android.server.art.model.DexoptStatus.DexContainerFileDexoptStatus;
import static com.android.server.art.testing.TestingUtils.deepEq;
import static com.android.server.art.testing.TestingUtils.inAnyOrder;
@@ -29,6 +31,7 @@ import static com.google.common.truth.Truth.assertThat;
import static org.mockito.AdditionalMatchers.not;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.ArgumentMatchers.matches;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.anyInt;
@@ -57,6 +60,7 @@ import android.os.storage.StorageManager;
import androidx.test.filters.SmallTest;
import com.android.modules.utils.pm.PackageStateModulesUtils;
+import com.android.server.art.model.ArtFlags;
import com.android.server.art.model.Config;
import com.android.server.art.model.DeleteResult;
import com.android.server.art.model.DexoptParams;
@@ -89,6 +93,7 @@ import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.Executor;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
@@ -166,14 +171,11 @@ public class ArtManagerLocalTest {
.thenReturn("verify");
lenient().when(SystemProperties.get(eq("pm.dexopt.inactive"))).thenReturn("verify");
lenient()
- .when(SystemProperties.getInt(eq("pm.dexopt.bg-dexopt.concurrency"), anyInt()))
+ .when(SystemProperties.getInt(matches("pm\\.dexopt\\..*\\.concurrency"), anyInt()))
.thenReturn(3);
lenient()
.when(SystemProperties.getInt(
- eq("pm.dexopt.boot-after-mainline-update.concurrency"), anyInt()))
- .thenReturn(3);
- lenient()
- .when(SystemProperties.getInt(eq("pm.dexopt.inactive.concurrency"), anyInt()))
+ matches("persist\\.device_config\\.runtime\\..*_concurrency"), anyInt()))
.thenReturn(3);
lenient()
.when(SystemProperties.getInt(
@@ -228,7 +230,9 @@ public class ArtManagerLocalTest {
// By default, none of the profiles are usable.
lenient().when(mArtd.isProfileUsable(any(), any())).thenReturn(false);
- lenient().when(mArtd.copyAndRewriteProfile(any(), any(), any())).thenReturn(false);
+ lenient()
+ .when(mArtd.copyAndRewriteProfile(any(), any(), any()))
+ .thenReturn(TestingUtils.createCopyAndRewriteProfileNoProfile());
mArtManagerLocal = new ArtManagerLocal(mInjector);
}
@@ -408,7 +412,7 @@ public class ArtManagerLocalTest {
@Test
public void testDexoptPackage() throws Exception {
var params = new DexoptParams.Builder("install").build();
- var result = mock(DexoptResult.class);
+ var result = DexoptResult.create();
var cancellationSignal = new CancellationSignal();
when(mDexoptHelper.dexopt(any(), deepEq(List.of(PKG_NAME_1)), same(params),
@@ -422,7 +426,7 @@ public class ArtManagerLocalTest {
@Test
public void testResetDexoptStatus() throws Exception {
- var result = mock(DexoptResult.class);
+ var result = DexoptResult.create();
var cancellationSignal = new CancellationSignal();
when(mDexoptHelper.dexopt(
@@ -459,7 +463,7 @@ public class ArtManagerLocalTest {
@Test
public void testDexoptPackages() throws Exception {
- var dexoptResult = mock(DexoptResult.class);
+ var dexoptResult = DexoptResult.create();
var cancellationSignal = new CancellationSignal();
when(mDexUseManager.getPackageLastUsedAtMs(PKG_NAME_2)).thenReturn(CURRENT_TIME_MS);
simulateStorageLow();
@@ -474,7 +478,7 @@ public class ArtManagerLocalTest {
assertThat(mArtManagerLocal.dexoptPackages(mSnapshot, "bg-dexopt", cancellationSignal,
null /* processCallbackExecutor */, null /* processCallback */))
- .isSameInstanceAs(dexoptResult);
+ .isEqualTo(Map.of(ArtFlags.PASS_MAIN, dexoptResult));
// Nothing to downgrade.
verify(mDexoptHelper, never())
@@ -490,7 +494,7 @@ public class ArtManagerLocalTest {
when(mDexUseManager.getPackageLastUsedAtMs(PKG_NAME_1)).thenReturn(0l);
simulateStorageLow();
- var result = mock(DexoptResult.class);
+ var result = DexoptResult.create();
var cancellationSignal = new CancellationSignal();
// PKG_NAME_1 should be dexopted.
@@ -517,25 +521,28 @@ public class ArtManagerLocalTest {
when(mDexUseManager.getPackageLastUsedAtMs(PKG_NAME_1)).thenReturn(NOT_RECENT_TIME_MS);
simulateStorageLow();
- var result = mock(DexoptResult.class);
+ var mainResult = DexoptResult.create();
+ var downgradeResult = DexoptResult.create();
var cancellationSignal = new CancellationSignal();
// PKG_NAME_1 should not be dexopted.
- doReturn(result)
+ doReturn(mainResult)
.when(mDexoptHelper)
.dexopt(any(), deepEq(List.of(PKG_NAME_2)),
argThat(params -> params.getReason().equals("bg-dexopt")), any(), any(),
any(), any());
// PKG_NAME_1 should be downgraded.
- doReturn(result)
+ doReturn(downgradeResult)
.when(mDexoptHelper)
.dexopt(any(), deepEq(List.of(PKG_NAME_1)),
argThat(params -> params.getReason().equals("inactive")), any(), any(),
any(), any());
- mArtManagerLocal.dexoptPackages(mSnapshot, "bg-dexopt", cancellationSignal,
- null /* processCallbackExecutor */, null /* processCallback */);
+ assertThat(mArtManagerLocal.dexoptPackages(mSnapshot, "bg-dexopt", cancellationSignal,
+ null /* processCallbackExecutor */, null /* processCallback */))
+ .isEqualTo(Map.of(
+ ArtFlags.PASS_DOWNGRADE, downgradeResult, ArtFlags.PASS_MAIN, mainResult));
}
@Test
@@ -545,7 +552,7 @@ public class ArtManagerLocalTest {
when(userState.getFirstInstallTimeMillis()).thenReturn(NOT_RECENT_TIME_MS);
when(mDexUseManager.getPackageLastUsedAtMs(PKG_NAME_1)).thenReturn(NOT_RECENT_TIME_MS);
- var result = mock(DexoptResult.class);
+ var result = DexoptResult.create();
var cancellationSignal = new CancellationSignal();
// PKG_NAME_1 should not be dexopted.
@@ -566,7 +573,7 @@ public class ArtManagerLocalTest {
@Test
public void testDexoptPackagesBootAfterMainlineUpdate() throws Exception {
- var result = mock(DexoptResult.class);
+ var result = DexoptResult.create();
var cancellationSignal = new CancellationSignal();
lenient().when(mInjector.isSystemUiPackage(PKG_NAME_1)).thenReturn(true);
@@ -583,7 +590,7 @@ public class ArtManagerLocalTest {
@Test
public void testDexoptPackagesBootAfterMainlineUpdatePackagesNotFound() throws Exception {
- var result = mock(DexoptResult.class);
+ var result = DexoptResult.create();
var cancellationSignal = new CancellationSignal();
// PKG_NAME_1 is neither recently installed nor recently used.
PackageUserState userState = mPkgState1.getStateForUser(UserHandle.of(1));
@@ -615,7 +622,7 @@ public class ArtManagerLocalTest {
simulateStorageLow();
var params = new DexoptParams.Builder("bg-dexopt").build();
- var result = mock(DexoptResult.class);
+ var result = DexoptResult.create();
var cancellationSignal = new CancellationSignal();
mArtManagerLocal.setBatchDexoptStartCallback(ForkJoinPool.commonPool(),
@@ -645,7 +652,7 @@ public class ArtManagerLocalTest {
@Test
public void testDexoptPackagesOverrideCleared() throws Exception {
var params = new DexoptParams.Builder("bg-dexopt").build();
- var result = mock(DexoptResult.class);
+ var result = DexoptResult.create();
var cancellationSignal = new CancellationSignal();
mArtManagerLocal.setBatchDexoptStartCallback(ForkJoinPool.commonPool(),
@@ -661,7 +668,56 @@ public class ArtManagerLocalTest {
assertThat(mArtManagerLocal.dexoptPackages(mSnapshot, "bg-dexopt", cancellationSignal,
null /* processCallbackExecutor */, null /* processCallback */))
- .isSameInstanceAs(result);
+ .isEqualTo(Map.of(ArtFlags.PASS_MAIN, result));
+ }
+
+ @Test
+ public void testDexoptPackagesSupplementaryPass() throws Exception {
+ // The supplementary pass should only try dexopting PKG_NAME_2.
+ var mainResult = DexoptResult.create("speed-profile", "bg-dexopt",
+ List.of(PackageDexoptResult.create(PKG_NAME_1,
+ List.of(DexContainerFileDexoptResult.create("dex-file-1",
+ true /* isPrimaryAbi */, "arm64", "speed-profile",
+ DexoptResult.DEXOPT_PERFORMED),
+ DexContainerFileDexoptResult.create("dex-file-2",
+ true /* isPrimaryAbi */, "arm64", "speed",
+ DexoptResult.DEXOPT_SKIPPED)),
+ null /* packageLevelStatus */),
+ PackageDexoptResult.create(PKG_NAME_2,
+ List.of(DexContainerFileDexoptResult.create("dex-file-1",
+ true /* isPrimaryAbi */, "arm64", "speed-profile",
+ DexoptResult.DEXOPT_PERFORMED),
+ DexContainerFileDexoptResult.create("dex-file-2",
+ true /* isPrimaryAbi */, "arm64", "speed-profile",
+ DexoptResult.DEXOPT_SKIPPED)),
+ null /* packageLevelStatus */)));
+ var supplementaryResult = DexoptResult.create();
+ var cancellationSignal = new CancellationSignal();
+
+ // The main pass.
+ doReturn(mainResult)
+ .when(mDexoptHelper)
+ .dexopt(any(), inAnyOrder(PKG_NAME_1, PKG_NAME_2),
+ argThat(params
+ -> params.getReason().equals("bg-dexopt")
+ && (params.getFlags() & ArtFlags.FLAG_FORCE_MERGE_PROFILE)
+ == 0),
+ any(), any(), any(), any());
+
+ // The supplementary pass.
+ doReturn(supplementaryResult)
+ .when(mDexoptHelper)
+ .dexopt(any(), inAnyOrder(PKG_NAME_2),
+ argThat(params
+ -> params.getReason().equals("bg-dexopt")
+ && (params.getFlags() & ArtFlags.FLAG_FORCE_MERGE_PROFILE)
+ != 0),
+ any(), any(), any(), any());
+
+ assertThat(mArtManagerLocal.dexoptPackages(mSnapshot, "bg-dexopt", cancellationSignal,
+ null /* processCallbackExecutor */, null /* processCallback */))
+ .isEqualTo(Map.of(ArtFlags.PASS_MAIN, mainResult, ArtFlags.PASS_SUPPLEMENTARY,
+ supplementaryResult));
}
@Test(expected = IllegalStateException.class)
@@ -743,7 +799,7 @@ public class ArtManagerLocalTest {
.thenAnswer(invocation -> {
var output = invocation.<OutputProfile>getArgument(1);
output.profilePath.tmpPath = tempPathForRef;
- return true;
+ return TestingUtils.createCopyAndRewriteProfileSuccess();
});
// Verify that the reference file initialized from the DM file is used.
@@ -899,6 +955,31 @@ public class ArtManagerLocalTest {
}
@Test
+ public void testOnBoot() throws Exception {
+ var progressCallbackExecutor = mock(Executor.class);
+ var progressCallback = mock(Consumer.class);
+
+ when(mDexoptHelper.dexopt(any(), any(),
+ argThat(params -> params.getReason().equals(ReasonMapping.REASON_FIRST_BOOT)),
+ any(), any(), same(progressCallbackExecutor), same(progressCallback)))
+ .thenReturn(DexoptResult.create());
+
+ mArtManagerLocal.onBoot(
+ ReasonMapping.REASON_FIRST_BOOT, progressCallbackExecutor, progressCallback);
+ }
+
+ @Test
+ public void testOnBootNoProgressCallback() throws Exception {
+ when(mDexoptHelper.dexopt(any(), any(),
+ argThat(params -> params.getReason().equals(ReasonMapping.REASON_FIRST_BOOT)),
+ any(), any(), isNull(), isNull()))
+ .thenReturn(DexoptResult.create());
+
+ mArtManagerLocal.onBoot(ReasonMapping.REASON_FIRST_BOOT,
+ null /* progressCallbackExecutor */, null /* progressCallback */);
+ }
+
+ @Test
public void testCleanup() throws Exception {
// It should keep all artifacts.
doReturn(createGetDexoptStatusResult("speed-profile", "bg-dexopt", "location"))
diff --git a/libartservice/service/javatests/com/android/server/art/BackgroundDexoptJobTest.java b/libartservice/service/javatests/com/android/server/art/BackgroundDexoptJobTest.java
index 3528caf3c3..c61461d0c9 100644
--- a/libartservice/service/javatests/com/android/server/art/BackgroundDexoptJobTest.java
+++ b/libartservice/service/javatests/com/android/server/art/BackgroundDexoptJobTest.java
@@ -17,6 +17,8 @@
package com.android.server.art;
import static com.android.server.art.model.Config.Callback;
+import static com.android.server.art.model.DexoptResult.DexoptResultStatus;
+import static com.android.server.art.model.DexoptResult.PackageDexoptResult;
import static com.google.common.truth.Truth.assertThat;
@@ -25,6 +27,7 @@ import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.lenient;
+import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.same;
import static org.mockito.Mockito.times;
@@ -56,6 +59,9 @@ import org.junit.runner.RunWith;
import org.mockito.ArgumentCaptor;
import org.mockito.Mock;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
@@ -74,12 +80,12 @@ public class BackgroundDexoptJobTest {
@Mock private PackageManagerLocal mPackageManagerLocal;
@Mock private PackageManagerLocal.FilteredSnapshot mSnapshot;
@Mock private JobScheduler mJobScheduler;
- @Mock private DexoptResult mDexoptResult;
@Mock private BackgroundDexoptJobService mJobService;
@Mock private JobParameters mJobParameters;
private Config mConfig;
private BackgroundDexoptJob mBackgroundDexoptJob;
private Semaphore mJobFinishedCalled = new Semaphore(0);
+ private Map<Integer, DexoptResult> mDexoptResultByPass;
@Before
public void setUp() throws Exception {
@@ -110,17 +116,19 @@ public class BackgroundDexoptJobTest {
lenient()
.when(mJobParameters.getStopReason())
.thenReturn(JobParameters.STOP_REASON_UNDEFINED);
+
+ mDexoptResultByPass = new HashMap<>();
}
@Test
public void testStart() {
when(mArtManagerLocal.dexoptPackages(
same(mSnapshot), eq(ReasonMapping.REASON_BG_DEXOPT), any(), any(), any()))
- .thenReturn(mDexoptResult);
+ .thenReturn(mDexoptResultByPass);
Result result = Utils.getFuture(mBackgroundDexoptJob.start());
assertThat(result).isInstanceOf(CompletedResult.class);
- assertThat(((CompletedResult) result).dexoptResult()).isSameInstanceAs(mDexoptResult);
+ assertThat(((CompletedResult) result).dexoptResultByPass()).isEqualTo(mDexoptResultByPass);
verify(mArtManagerLocal).cleanup(same(mSnapshot));
}
@@ -131,7 +139,7 @@ public class BackgroundDexoptJobTest {
when(mArtManagerLocal.dexoptPackages(any(), any(), any(), any(), any()))
.thenAnswer(invocation -> {
assertThat(dexoptDone.tryAcquire(TIMEOUT_SEC, TimeUnit.SECONDS)).isTrue();
- return mDexoptResult;
+ return mDexoptResultByPass;
});
Future<Result> future1 = mBackgroundDexoptJob.start();
@@ -147,7 +155,7 @@ public class BackgroundDexoptJobTest {
@Test
public void testStartAnother() {
when(mArtManagerLocal.dexoptPackages(any(), any(), any(), any(), any()))
- .thenReturn(mDexoptResult);
+ .thenReturn(mDexoptResultByPass);
Future<Result> future1 = mBackgroundDexoptJob.start();
Utils.getFuture(future1);
@@ -172,7 +180,7 @@ public class BackgroundDexoptJobTest {
.thenReturn(true);
when(mArtManagerLocal.dexoptPackages(any(), any(), any(), any(), any()))
- .thenReturn(mDexoptResult);
+ .thenReturn(mDexoptResultByPass);
// The `start` method should ignore the system property. The system property is for
// `schedule`.
@@ -187,7 +195,7 @@ public class BackgroundDexoptJobTest {
assertThat(dexoptCancelled.tryAcquire(TIMEOUT_SEC, TimeUnit.SECONDS)).isTrue();
var cancellationSignal = invocation.<CancellationSignal>getArgument(2);
assertThat(cancellationSignal.isCanceled()).isTrue();
- return mDexoptResult;
+ return mDexoptResultByPass;
});
Future<Result> future = mBackgroundDexoptJob.start();
@@ -273,9 +281,12 @@ public class BackgroundDexoptJobTest {
@Test
public void testWantsRescheduleFalsePerformed() throws Exception {
- when(mDexoptResult.getFinalStatus()).thenReturn(DexoptResult.DEXOPT_PERFORMED);
+ DexoptResult downgradeResult = createDexoptResultWithStatus(DexoptResult.DEXOPT_PERFORMED);
+ mDexoptResultByPass.put(ArtFlags.PASS_DOWNGRADE, downgradeResult);
+ DexoptResult mainResult = createDexoptResultWithStatus(DexoptResult.DEXOPT_PERFORMED);
+ mDexoptResultByPass.put(ArtFlags.PASS_MAIN, mainResult);
when(mArtManagerLocal.dexoptPackages(any(), any(), any(), any(), any()))
- .thenReturn(mDexoptResult);
+ .thenReturn(mDexoptResultByPass);
mBackgroundDexoptJob.onStartJob(mJobService, mJobParameters);
assertThat(mJobFinishedCalled.tryAcquire(TIMEOUT_SEC, TimeUnit.SECONDS)).isTrue();
@@ -296,13 +307,22 @@ public class BackgroundDexoptJobTest {
@Test
public void testWantsRescheduleTrue() throws Exception {
- when(mDexoptResult.getFinalStatus()).thenReturn(DexoptResult.DEXOPT_CANCELLED);
+ DexoptResult downgradeResult = createDexoptResultWithStatus(DexoptResult.DEXOPT_PERFORMED);
+ mDexoptResultByPass.put(ArtFlags.PASS_DOWNGRADE, downgradeResult);
+ DexoptResult mainResult = createDexoptResultWithStatus(DexoptResult.DEXOPT_CANCELLED);
+ mDexoptResultByPass.put(ArtFlags.PASS_MAIN, mainResult);
when(mArtManagerLocal.dexoptPackages(any(), any(), any(), any(), any()))
- .thenReturn(mDexoptResult);
+ .thenReturn(mDexoptResultByPass);
mBackgroundDexoptJob.onStartJob(mJobService, mJobParameters);
assertThat(mJobFinishedCalled.tryAcquire(TIMEOUT_SEC, TimeUnit.SECONDS)).isTrue();
verify(mJobService).jobFinished(any(), eq(true) /* wantsReschedule */);
}
+
+ private DexoptResult createDexoptResultWithStatus(@DexoptResultStatus int status) {
+ return DexoptResult.create("compiler-filter", "reason",
+ List.of(PackageDexoptResult.create(
+ "package-name", List.of() /* dexContainerFileDexoptResults */, status)));
+ }
}
diff --git a/libartservice/service/javatests/com/android/server/art/DexUseManagerTest.java b/libartservice/service/javatests/com/android/server/art/DexUseManagerTest.java
index cf27d8cba1..f34cd434ed 100644
--- a/libartservice/service/javatests/com/android/server/art/DexUseManagerTest.java
+++ b/libartservice/service/javatests/com/android/server/art/DexUseManagerTest.java
@@ -66,6 +66,7 @@ import java.io.FileInputStream;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.HashMap;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -117,14 +118,21 @@ public class DexUseManagerTest {
lenient().when(Process.isIsolatedUid(anyInt())).thenReturn(false);
- mPackageStates = new HashMap<>();
+ // Use a LinkedHashMap so that we can control the iteration order.
+ mPackageStates = new LinkedHashMap<>();
- PackageState loadingPkgState = createPackageState(LOADING_PKG_NAME, "armeabi-v7a");
+ // Put the null package in front of other packages to verify that it's properly skipped.
+ PackageState nullPkgState =
+ createPackageState("com.example.null", "arm64-v8a", false /* hasPackage */);
+ addPackage("com.example.null", nullPkgState);
+ PackageState loadingPkgState =
+ createPackageState(LOADING_PKG_NAME, "armeabi-v7a", true /* hasPackage */);
addPackage(LOADING_PKG_NAME, loadingPkgState);
- PackageState owningPkgState = createPackageState(OWNING_PKG_NAME, "arm64-v8a");
+ PackageState owningPkgState =
+ createPackageState(OWNING_PKG_NAME, "arm64-v8a", true /* hasPackage */);
addPackage(OWNING_PKG_NAME, owningPkgState);
PackageState platformPkgState =
- createPackageState(Utils.PLATFORM_PACKAGE_NAME, "arm64-v8a");
+ createPackageState(Utils.PLATFORM_PACKAGE_NAME, "arm64-v8a", true /* hasPackage */);
addPackage(Utils.PLATFORM_PACKAGE_NAME, platformPkgState);
lenient().when(mSnapshot.getPackageStates()).thenReturn(mPackageStates);
@@ -582,7 +590,8 @@ public class DexUseManagerTest {
@Test
public void testCleanup() throws Exception {
- PackageState pkgState = createPackageState("com.example.deletedpackage", "arm64-v8a");
+ PackageState pkgState = createPackageState(
+ "com.example.deletedpackage", "arm64-v8a", true /* hasPackage */);
addPackage("com.example.deletedpackage", pkgState);
lenient()
.when(mArtd.getDexFileVisibility("/data/app/com.example.deletedpackage/base.apk"))
@@ -758,11 +767,12 @@ public class DexUseManagerTest {
return pkg;
}
- private PackageState createPackageState(String packageName, String primaryAbi) {
+ private PackageState createPackageState(
+ String packageName, String primaryAbi, boolean hasPackage) {
PackageState pkgState = mock(PackageState.class);
lenient().when(pkgState.getPackageName()).thenReturn(packageName);
AndroidPackage pkg = createPackage(packageName);
- lenient().when(pkgState.getAndroidPackage()).thenReturn(pkg);
+ lenient().when(pkgState.getAndroidPackage()).thenReturn(hasPackage ? pkg : null);
lenient().when(pkgState.getPrimaryCpuAbi()).thenReturn(primaryAbi);
return pkgState;
}
diff --git a/libartservice/service/javatests/com/android/server/art/DexoptHelperTest.java b/libartservice/service/javatests/com/android/server/art/DexoptHelperTest.java
index 21d89e65a3..97bdb4f4a3 100644
--- a/libartservice/service/javatests/com/android/server/art/DexoptHelperTest.java
+++ b/libartservice/service/javatests/com/android/server/art/DexoptHelperTest.java
@@ -819,14 +819,10 @@ public class DexoptHelperTest {
private List<DexContainerFileDexoptResult> createResults(
String dexPath, @DexoptResultStatus int status1, @DexoptResultStatus int status2) {
- return List.of(DexContainerFileDexoptResult.create(dexPath, true /* isPrimaryAbi */,
- "arm64-v8a", "verify", status1, 100 /* dex2oatWallTimeMillis */,
- 400 /* dex2oatCpuTimeMillis */, 0 /* sizeBytes */,
- 0 /* sizeBeforeBytes */, 0 /* extraStatus */),
- DexContainerFileDexoptResult.create(dexPath, false /* isPrimaryAbi */,
- "armeabi-v7a", "verify", status2, 100 /* dex2oatWallTimeMillis */,
- 400 /* dex2oatCpuTimeMillis */, 0 /* sizeBytes */, 0 /* sizeBeforeBytes */,
- 0 /* extraStatus */));
+ return List.of(DexContainerFileDexoptResult.create(
+ dexPath, true /* isPrimaryAbi */, "arm64-v8a", "verify", status1),
+ DexContainerFileDexoptResult.create(
+ dexPath, false /* isPrimaryAbi */, "armeabi-v7a", "verify", status2));
}
private void checkPackageResult(DexoptResult result, int index, String packageName,
diff --git a/libartservice/service/javatests/com/android/server/art/PrimaryDexopterParameterizedTest.java b/libartservice/service/javatests/com/android/server/art/PrimaryDexopterParameterizedTest.java
index fe9584797e..36b6a58fcf 100644
--- a/libartservice/service/javatests/com/android/server/art/PrimaryDexopterParameterizedTest.java
+++ b/libartservice/service/javatests/com/android/server/art/PrimaryDexopterParameterizedTest.java
@@ -156,6 +156,18 @@ public class PrimaryDexopterParameterizedTest extends PrimaryDexopterTestBase {
params.mSkipIfStorageLow = true;
list.add(params);
+ params = new Params();
+ params.mIgnoreProfile = true;
+ params.mRequestedCompilerFilter = "speed-profile";
+ params.mExpectedCompilerFilter = "verify";
+ list.add(params);
+
+ params = new Params();
+ params.mIgnoreProfile = true;
+ params.mRequestedCompilerFilter = "speed";
+ params.mExpectedCompilerFilter = "speed";
+ list.add(params);
+
return list;
}
@@ -195,6 +207,8 @@ public class PrimaryDexopterParameterizedTest extends PrimaryDexopterTestBase {
ArtFlags.FLAG_SHOULD_DOWNGRADE)
.setFlags(mParams.mSkipIfStorageLow ? ArtFlags.FLAG_SKIP_IF_STORAGE_LOW : 0,
ArtFlags.FLAG_SKIP_IF_STORAGE_LOW)
+ .setFlags(mParams.mIgnoreProfile ? ArtFlags.FLAG_IGNORE_PROFILE : 0,
+ ArtFlags.FLAG_IGNORE_PROFILE)
.build();
mPrimaryDexopter =
@@ -280,23 +294,20 @@ public class PrimaryDexopterParameterizedTest extends PrimaryDexopterTestBase {
mParams.mExpectedCompilerFilter, DexoptResult.DEXOPT_PERFORMED,
100 /* dex2oatWallTimeMillis */, 400 /* dex2oatCpuTimeMillis */,
30000 /* sizeBytes */, 32000 /* sizeBeforeBytes */,
- 0 /* extraStatus */),
+ 0 /* extendedStatusFlags */, List.of() /* externalProfileErrors */),
DexContainerFileDexoptResult.create("/data/app/foo/base.apk",
false /* isPrimaryAbi */, "armeabi-v7a",
- mParams.mExpectedCompilerFilter, DexoptResult.DEXOPT_FAILED,
- 0 /* dex2oatWallTimeMillis */, 0 /* dex2oatCpuTimeMillis */,
- 0 /* sizeBytes */, 0 /* sizeBeforeBytes */, 0 /* extraStatus */),
+ mParams.mExpectedCompilerFilter, DexoptResult.DEXOPT_FAILED),
DexContainerFileDexoptResult.create("/data/app/foo/split_0.apk",
true /* isPrimaryAbi */, "arm64-v8a",
- mParams.mExpectedCompilerFilter, DexoptResult.DEXOPT_SKIPPED,
- 0 /* dex2oatWallTimeMillis */, 0 /* dex2oatCpuTimeMillis */,
- 0 /* sizeBytes */, 0 /* sizeBeforeBytes */, 0 /* extraStatus */),
+ mParams.mExpectedCompilerFilter, DexoptResult.DEXOPT_SKIPPED),
DexContainerFileDexoptResult.create("/data/app/foo/split_0.apk",
false /* isPrimaryAbi */, "armeabi-v7a",
mParams.mExpectedCompilerFilter, DexoptResult.DEXOPT_PERFORMED,
200 /* dex2oatWallTimeMillis */, 200 /* dex2oatCpuTimeMillis */,
10000 /* sizeBytes */, 0 /* sizeBeforeBytes */,
- 0 /* extraStatus */));
+ 0 /* extendedStatusFlags */,
+ List.of() /* externalProfileErrors */));
// Verify that there are no more calls than the ones above.
verify(mArtd, times(3))
@@ -319,6 +330,7 @@ public class PrimaryDexopterParameterizedTest extends PrimaryDexopterTestBase {
public boolean mForce = false;
public boolean mShouldDowngrade = false;
public boolean mSkipIfStorageLow = false;
+ public boolean mIgnoreProfile = false;
// System properties.
public boolean mAlwaysDebuggable = false;
@@ -331,8 +343,8 @@ public class PrimaryDexopterParameterizedTest extends PrimaryDexopterTestBase {
public boolean mExpectedIsHiddenApiPolicyEnabled = true;
public String toString() {
- return String.format("mIsInDalvikCache=%b,"
- + "mHiddenApiEnforcementPolicy=%d,"
+ return String.format("isInDalvikCache=%b,"
+ + "hiddenApiEnforcementPolicy=%d,"
+ "isVmSafeMode=%b,"
+ "isDebuggable=%b,"
+ "isSystemUi=%b,"
@@ -341,7 +353,8 @@ public class PrimaryDexopterParameterizedTest extends PrimaryDexopterTestBase {
+ "requestedCompilerFilter=%s,"
+ "force=%b,"
+ "shouldDowngrade=%b,"
- + "mSkipIfStorageLow=%b,"
+ + "skipIfStorageLow=%b,"
+ + "ignoreProfile=%b,"
+ "alwaysDebuggable=%b"
+ " => "
+ "targetCompilerFilter=%s,"
@@ -350,8 +363,8 @@ public class PrimaryDexopterParameterizedTest extends PrimaryDexopterTestBase {
+ "expectedIsHiddenApiPolicyEnabled=%b",
mIsInDalvikCache, mHiddenApiEnforcementPolicy, mIsVmSafeMode, mIsDebuggable,
mIsSystemUi, mIsLauncher, mIsUseEmbeddedDex, mRequestedCompilerFilter, mForce,
- mShouldDowngrade, mSkipIfStorageLow, mAlwaysDebuggable, mExpectedCompilerFilter,
- mExpectedDexoptTrigger, mExpectedIsDebuggable,
+ mShouldDowngrade, mSkipIfStorageLow, mIgnoreProfile, mAlwaysDebuggable,
+ mExpectedCompilerFilter, mExpectedDexoptTrigger, mExpectedIsDebuggable,
mExpectedIsHiddenApiPolicyEnabled);
}
}
diff --git a/libartservice/service/javatests/com/android/server/art/PrimaryDexopterTest.java b/libartservice/service/javatests/com/android/server/art/PrimaryDexopterTest.java
index 554b8c681c..cdabcf341c 100644
--- a/libartservice/service/javatests/com/android/server/art/PrimaryDexopterTest.java
+++ b/libartservice/service/javatests/com/android/server/art/PrimaryDexopterTest.java
@@ -109,7 +109,9 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
// By default, none of the profiles are usable.
lenient().when(mArtd.isProfileUsable(any(), any())).thenReturn(false);
- lenient().when(mArtd.copyAndRewriteProfile(any(), any(), any())).thenReturn(false);
+ lenient()
+ .when(mArtd.copyAndRewriteProfile(any(), any(), any()))
+ .thenReturn(TestingUtils.createCopyAndRewriteProfileNoProfile());
// By default, no DM file exists.
lenient().when(mArtd.getDmFileVisibility(any())).thenReturn(FileVisibility.NOT_FOUND);
@@ -175,7 +177,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
.dexopt(any(), eq(mSplit0DexPath), eq("arm"), any(), any(), any(), isNull(), any(),
anyInt(), any(), any());
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
}
@Test
@@ -184,7 +187,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
.when(mArtd.getDmFileVisibility(deepEq(mDmFile)))
.thenReturn(FileVisibility.OTHER_READABLE);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
verify(mArtd, times(2))
.dexopt(any(), eq(mDexPath), any(), any(), any(), any(), any(), deepEq(mDmFile),
@@ -209,7 +213,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
makeProfileUsable(mPrebuiltProfile);
makeProfileUsable(mDmProfile);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
verify(mArtd).getDexoptNeeded(
eq(mDexPath), eq("arm64"), any(), eq("speed-profile"), eq(mDefaultDexoptTrigger));
@@ -245,7 +250,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
makeProfileUsable(mPrebuiltProfile);
makeProfileUsable(mDmProfile);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
checkDexoptWithProfile(verify(mArtd), mDexPath, "arm64", mRefProfile,
true /* isOtherReadable */, true /* generateAppImage */);
@@ -262,7 +268,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
makeProfileUsable(mPrebuiltProfile);
makeProfileUsable(mDmProfile);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
InOrder inOrder = inOrder(mArtd);
@@ -284,8 +291,7 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
@Test
public void testDexoptMergesProfiles() throws Exception {
- when(mPkgState.getStateForUser(eq(UserHandle.of(0)))).thenReturn(mPkgUserStateInstalled);
- when(mPkgState.getStateForUser(eq(UserHandle.of(2)))).thenReturn(mPkgUserStateInstalled);
+ setPackageInstalledForUserIds(0, 2);
when(mArtd.mergeProfiles(any(), any(), any(), any(), any())).thenReturn(true);
@@ -293,7 +299,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
when(mArtd.getProfileVisibility(deepEq(mRefProfile)))
.thenReturn(FileVisibility.OTHER_READABLE);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
InOrder inOrder = inOrder(mArtd);
@@ -328,8 +335,7 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
@Test
public void testDexoptMergesProfilesMergeFailed() throws Exception {
- when(mPkgState.getStateForUser(eq(UserHandle.of(0)))).thenReturn(mPkgUserStateInstalled);
- when(mPkgState.getStateForUser(eq(UserHandle.of(2)))).thenReturn(mPkgUserStateInstalled);
+ setPackageInstalledForUserIds(0, 2);
when(mArtd.mergeProfiles(any(), any(), any(), any(), any())).thenReturn(false);
@@ -337,7 +343,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
when(mArtd.getProfileVisibility(deepEq(mRefProfile)))
.thenReturn(FileVisibility.OTHER_READABLE);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
// It should still use "speed-profile", but with the existing reference profile only.
verify(mArtd).getDexoptNeeded(
@@ -355,12 +362,35 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
}
@Test
+ public void testDexoptMergesProfilesForceMerge() throws Exception {
+ mDexoptParams = mDexoptParams.toBuilder()
+ .setFlags(ArtFlags.FLAG_FORCE_MERGE_PROFILE,
+ ArtFlags.FLAG_FORCE_MERGE_PROFILE)
+ .build();
+ mPrimaryDexopter =
+ new PrimaryDexopter(mInjector, mPkgState, mPkg, mDexoptParams, mCancellationSignal);
+
+ setPackageInstalledForUserIds(0, 2);
+
+ mMergeProfileOptions.forceMerge = true;
+ when(mArtd.mergeProfiles(any(), any(), any(), any(), deepEq(mMergeProfileOptions)))
+ .thenReturn(true);
+
+ makeProfileUsable(mRefProfile);
+ when(mArtd.getProfileVisibility(deepEq(mRefProfile)))
+ .thenReturn(FileVisibility.OTHER_READABLE);
+
+ mPrimaryDexopter.dexopt();
+ }
+
+ @Test
public void testDexoptUsesDmProfile() throws Exception {
makeProfileNotUsable(mRefProfile);
makeProfileNotUsable(mPrebuiltProfile);
makeProfileUsable(mDmProfile);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
verify(mArtd).copyAndRewriteProfile(
deepEq(mDmProfile), deepEq(mPublicOutputProfile), eq(mDexPath));
@@ -377,6 +407,32 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
}
@Test
+ public void testDexoptExternalProfileErrors() throws Exception {
+ makeProfileNotUsable(mRefProfile);
+
+ // Having no profile should not be reported.
+ makeProfileNotUsable(mPrebuiltProfile);
+
+ // Having a bad profile should be reported.
+ lenient()
+ .when(mArtd.copyAndRewriteProfile(deepEq(mDmProfile), any(), any()))
+ .thenReturn(TestingUtils.createCopyAndRewriteProfileBadProfile("error_msg"));
+
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+
+ assertThat(results.get(0).getStatus()).isEqualTo(DexoptResult.DEXOPT_PERFORMED);
+ assertThat(results.get(0).getExtendedStatusFlags()
+ & DexoptResult.EXTENDED_BAD_EXTERNAL_PROFILE)
+ .isNotEqualTo(0);
+ assertThat(results.get(0).getExternalProfileErrors()).containsExactly("error_msg");
+ assertThat(results.get(1).getStatus()).isEqualTo(DexoptResult.DEXOPT_PERFORMED);
+ assertThat(results.get(1).getExtendedStatusFlags()
+ & DexoptResult.EXTENDED_BAD_EXTERNAL_PROFILE)
+ .isNotEqualTo(0);
+ assertThat(results.get(1).getExternalProfileErrors()).containsExactly("error_msg");
+ }
+
+ @Test
public void testDexoptDeletesProfileOnFailure() throws Exception {
makeProfileNotUsable(mRefProfile);
makeProfileNotUsable(mPrebuiltProfile);
@@ -411,7 +467,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
argThat(artifactsPath -> artifactsPath.dexPath == mDexPath)))
.thenReturn(FileVisibility.NOT_OTHER_READABLE);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
verify(mArtd).copyAndRewriteProfile(
deepEq(mDmProfile), deepEq(mPublicOutputProfile), eq(mDexPath));
@@ -451,7 +508,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
argThat(artifactsPath -> artifactsPath.dexPath == mDexPath)))
.thenReturn(FileVisibility.OTHER_READABLE);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
// It should use the default dexopt trigger.
verify(mArtd).getDexoptNeeded(
@@ -466,7 +524,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
when(mArtd.getProfileVisibility(deepEq(mSplit0RefProfile)))
.thenReturn(FileVisibility.NOT_OTHER_READABLE);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
verify(mArtd).getDexoptNeeded(eq(mSplit0DexPath), eq("arm64"), any(), eq("speed-profile"),
eq(mDefaultDexoptTrigger));
@@ -564,7 +623,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
mPrimaryDexopter =
new PrimaryDexopter(mInjector, mPkgState, mPkg, mDexoptParams, mCancellationSignal);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
verify(mArtd, times(2))
.dexopt(any(), eq(mDexPath), any(), any(), any(), any(), any(), any(), anyInt(),
@@ -585,7 +645,8 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
mPrimaryDexopter =
new PrimaryDexopter(mInjector, mPkgState, mPkg, mDexoptParams, mCancellationSignal);
- mPrimaryDexopter.dexopt();
+ List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
+ verifyStatusAllOk(results);
verify(mArtd, never())
.dexopt(any(), eq(mDexPath), any(), any(), any(), any(), any(), any(), anyInt(),
@@ -609,16 +670,20 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
assertThat(results.get(0).getStatus()).isEqualTo(DexoptResult.DEXOPT_PERFORMED);
- assertThat(results.get(0).getExtraStatus() & DexoptResult.EXTRA_SKIPPED_STORAGE_LOW)
+ assertThat(
+ results.get(0).getExtendedStatusFlags() & DexoptResult.EXTENDED_SKIPPED_STORAGE_LOW)
.isEqualTo(0);
assertThat(results.get(1).getStatus()).isEqualTo(DexoptResult.DEXOPT_SKIPPED);
- assertThat(results.get(1).getExtraStatus() & DexoptResult.EXTRA_SKIPPED_STORAGE_LOW)
+ assertThat(
+ results.get(1).getExtendedStatusFlags() & DexoptResult.EXTENDED_SKIPPED_STORAGE_LOW)
.isNotEqualTo(0);
assertThat(results.get(2).getStatus()).isEqualTo(DexoptResult.DEXOPT_SKIPPED);
- assertThat(results.get(2).getExtraStatus() & DexoptResult.EXTRA_SKIPPED_STORAGE_LOW)
+ assertThat(
+ results.get(2).getExtendedStatusFlags() & DexoptResult.EXTENDED_SKIPPED_STORAGE_LOW)
.isNotEqualTo(0);
assertThat(results.get(3).getStatus()).isEqualTo(DexoptResult.DEXOPT_PERFORMED);
- assertThat(results.get(3).getExtraStatus() & DexoptResult.EXTRA_SKIPPED_STORAGE_LOW)
+ assertThat(
+ results.get(3).getExtendedStatusFlags() & DexoptResult.EXTENDED_SKIPPED_STORAGE_LOW)
.isEqualTo(0);
verify(mArtd, times(2))
@@ -643,16 +708,20 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
List<DexContainerFileDexoptResult> results = mPrimaryDexopter.dexopt();
assertThat(results.get(0).getStatus()).isEqualTo(DexoptResult.DEXOPT_SKIPPED);
- assertThat(results.get(0).getExtraStatus() & DexoptResult.EXTRA_SKIPPED_NO_DEX_CODE)
+ assertThat(
+ results.get(0).getExtendedStatusFlags() & DexoptResult.EXTENDED_SKIPPED_NO_DEX_CODE)
.isNotEqualTo(0);
assertThat(results.get(1).getStatus()).isEqualTo(DexoptResult.DEXOPT_SKIPPED);
- assertThat(results.get(1).getExtraStatus() & DexoptResult.EXTRA_SKIPPED_NO_DEX_CODE)
+ assertThat(
+ results.get(1).getExtendedStatusFlags() & DexoptResult.EXTENDED_SKIPPED_NO_DEX_CODE)
.isNotEqualTo(0);
assertThat(results.get(2).getStatus()).isEqualTo(DexoptResult.DEXOPT_SKIPPED);
- assertThat(results.get(2).getExtraStatus() & DexoptResult.EXTRA_SKIPPED_NO_DEX_CODE)
+ assertThat(
+ results.get(2).getExtendedStatusFlags() & DexoptResult.EXTENDED_SKIPPED_NO_DEX_CODE)
.isEqualTo(0);
assertThat(results.get(3).getStatus()).isEqualTo(DexoptResult.DEXOPT_PERFORMED);
- assertThat(results.get(3).getExtraStatus() & DexoptResult.EXTRA_SKIPPED_NO_DEX_CODE)
+ assertThat(
+ results.get(3).getExtendedStatusFlags() & DexoptResult.EXTENDED_SKIPPED_NO_DEX_CODE)
.isEqualTo(0);
}
@@ -691,7 +760,7 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
.when(mArtd.copyAndRewriteProfile(deepEq(profile), any(), any()))
.thenAnswer(invocation -> {
mUsedProfiles.add(invocation.<ProfilePath>getArgument(0));
- return true;
+ return TestingUtils.createCopyAndRewriteProfileSuccess();
});
}
@@ -699,6 +768,22 @@ public class PrimaryDexopterTest extends PrimaryDexopterTestBase {
lenient().when(mArtd.isProfileUsable(deepEq(profile), any())).thenReturn(false);
lenient()
.when(mArtd.copyAndRewriteProfile(deepEq(profile), any(), any()))
- .thenReturn(false);
+ .thenReturn(TestingUtils.createCopyAndRewriteProfileNoProfile());
+ }
+
+ private void verifyStatusAllOk(List<DexContainerFileDexoptResult> results) {
+ for (DexContainerFileDexoptResult result : results) {
+ assertThat(result.getStatus()).isEqualTo(DexoptResult.DEXOPT_PERFORMED);
+ assertThat(result.getExtendedStatusFlags()).isEqualTo(0);
+ assertThat(result.getExternalProfileErrors()).isEmpty();
+ }
+ }
+
+ /** Dexopter relies on this information to determine which current profiles to check. */
+ private void setPackageInstalledForUserIds(int... userIds) {
+ for (int userId : userIds) {
+ when(mPkgState.getStateForUser(eq(UserHandle.of(userId))))
+ .thenReturn(mPkgUserStateInstalled);
+ }
}
}
diff --git a/libartservice/service/javatests/com/android/server/art/ReasonMappingTest.java b/libartservice/service/javatests/com/android/server/art/ReasonMappingTest.java
index 55fd0b43ce..8338d5a706 100644
--- a/libartservice/service/javatests/com/android/server/art/ReasonMappingTest.java
+++ b/libartservice/service/javatests/com/android/server/art/ReasonMappingTest.java
@@ -20,6 +20,7 @@ import static com.google.common.truth.Truth.assertThat;
import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.when;
import android.os.SystemProperties;
@@ -33,6 +34,7 @@ import com.android.server.art.testing.StaticMockitoRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
+import org.mockito.ArgumentCaptor;
@SmallTest
@RunWith(AndroidJUnit4.class)
@@ -81,8 +83,27 @@ public class ReasonMappingTest {
@Test
public void testGetConcurrencyForReason() {
- when(SystemProperties.getInt(eq("pm.dexopt.bg-dexopt.concurrency"), anyInt()))
+ var defaultCaptor = ArgumentCaptor.forClass(Integer.class);
+ lenient()
+ .when(SystemProperties.getInt(
+ eq("persist.device_config.runtime.bg-dexopt_concurrency"),
+ defaultCaptor.capture()))
+ .thenAnswer(invocation -> defaultCaptor.getValue());
+ lenient()
+ .when(SystemProperties.getInt(eq("pm.dexopt.bg-dexopt.concurrency"), anyInt()))
.thenReturn(3);
assertThat(ReasonMapping.getConcurrencyForReason("bg-dexopt")).isEqualTo(3);
}
+
+ @Test
+ public void testGetConcurrencyForReasonFromPhFlag() {
+ lenient()
+ .when(SystemProperties.getInt(
+ eq("persist.device_config.runtime.bg-dexopt_concurrency"), anyInt()))
+ .thenReturn(4);
+ lenient()
+ .when(SystemProperties.getInt(eq("pm.dexopt.bg-dexopt.concurrency"), anyInt()))
+ .thenReturn(3);
+ assertThat(ReasonMapping.getConcurrencyForReason("bg-dexopt")).isEqualTo(4);
+ }
}
diff --git a/libartservice/service/javatests/com/android/server/art/SecondaryDexopterTest.java b/libartservice/service/javatests/com/android/server/art/SecondaryDexopterTest.java
index 35e256ac3c..85d298393a 100644
--- a/libartservice/service/javatests/com/android/server/art/SecondaryDexopterTest.java
+++ b/libartservice/service/javatests/com/android/server/art/SecondaryDexopterTest.java
@@ -165,21 +165,13 @@ public class SecondaryDexopterTest {
.comparingElementsUsing(TestingUtils.<DexContainerFileDexoptResult>deepEquality())
.containsExactly(
DexContainerFileDexoptResult.create(DEX_1, true /* isPrimaryAbi */,
- "arm64-v8a", "speed-profile", DexoptResult.DEXOPT_PERFORMED,
- 0 /* dex2oatWallTimeMillis */, 0 /* dex2oatCpuTimeMillis */,
- 0 /* sizeBytes */, 0 /* sizeBeforeBytes */, 0 /* extraStatus */),
+ "arm64-v8a", "speed-profile", DexoptResult.DEXOPT_PERFORMED),
DexContainerFileDexoptResult.create(DEX_2, true /* isPrimaryAbi */,
- "arm64-v8a", "speed", DexoptResult.DEXOPT_PERFORMED,
- 0 /* dex2oatWallTimeMillis */, 0 /* dex2oatCpuTimeMillis */,
- 0 /* sizeBytes */, 0 /* sizeBeforeBytes */, 0 /* extraStatus */),
+ "arm64-v8a", "speed", DexoptResult.DEXOPT_PERFORMED),
DexContainerFileDexoptResult.create(DEX_2, false /* isPrimaryAbi */,
- "armeabi-v7a", "speed", DexoptResult.DEXOPT_PERFORMED,
- 0 /* dex2oatWallTimeMillis */, 0 /* dex2oatCpuTimeMillis */,
- 0 /* sizeBytes */, 0 /* sizeBeforeBytes */, 0 /* extraStatus */),
+ "armeabi-v7a", "speed", DexoptResult.DEXOPT_PERFORMED),
DexContainerFileDexoptResult.create(DEX_3, true /* isPrimaryAbi */,
- "arm64-v8a", "verify", DexoptResult.DEXOPT_PERFORMED,
- 0 /* dex2oatWallTimeMillis */, 0 /* dex2oatCpuTimeMillis */,
- 0 /* sizeBytes */, 0 /* sizeBeforeBytes */, 0 /* extraStatus */));
+ "arm64-v8a", "verify", DexoptResult.DEXOPT_PERFORMED));
// It should use profile for dex 1.
diff --git a/libartservice/service/javatests/com/android/server/art/model/DexoptParamsTest.java b/libartservice/service/javatests/com/android/server/art/model/DexoptParamsTest.java
index 60986414fa..d98340ee17 100644
--- a/libartservice/service/javatests/com/android/server/art/model/DexoptParamsTest.java
+++ b/libartservice/service/javatests/com/android/server/art/model/DexoptParamsTest.java
@@ -16,12 +16,19 @@
package com.android.server.art.model;
+import static com.google.common.truth.Truth.assertThat;
+
import androidx.test.filters.SmallTest;
import androidx.test.runner.AndroidJUnit4;
import org.junit.Test;
import org.junit.runner.RunWith;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import java.util.Arrays;
+import java.util.stream.Collectors;
+
@SmallTest
@RunWith(AndroidJUnit4.class)
public class DexoptParamsTest {
@@ -106,4 +113,31 @@ public class DexoptParamsTest {
.setSplitName("split_0")
.build();
}
+
+ @Test
+ public void testToBuilder() {
+ // Update this test with new fields if this assertion fails.
+ assertThat(Arrays.stream(DexoptParams.class.getDeclaredFields())
+ .filter(field -> !Modifier.isStatic(field.getModifiers()))
+ .map(Field::getName)
+ .collect(Collectors.toList()))
+ .containsExactly(
+ "mFlags", "mCompilerFilter", "mPriorityClass", "mReason", "mSplitName");
+
+ DexoptParams params1 =
+ new DexoptParams.Builder("install")
+ .setFlags(ArtFlags.FLAG_FOR_PRIMARY_DEX | ArtFlags.FLAG_FOR_SINGLE_SPLIT)
+ .setCompilerFilter("speed")
+ .setPriorityClass(90)
+ .setSplitName("split_0")
+ .build();
+
+ DexoptParams params2 = params1.toBuilder().build();
+
+ assertThat(params1.getFlags()).isEqualTo(params2.getFlags());
+ assertThat(params1.getCompilerFilter()).isEqualTo(params2.getCompilerFilter());
+ assertThat(params1.getPriorityClass()).isEqualTo(params2.getPriorityClass());
+ assertThat(params1.getReason()).isEqualTo(params2.getReason());
+ assertThat(params1.getSplitName()).isEqualTo(params2.getSplitName());
+ }
}
diff --git a/libartservice/service/javatests/com/android/server/art/testing/TestingUtils.java b/libartservice/service/javatests/com/android/server/art/testing/TestingUtils.java
index 5ee0a57bd7..ee55170f05 100644
--- a/libartservice/service/javatests/com/android/server/art/testing/TestingUtils.java
+++ b/libartservice/service/javatests/com/android/server/art/testing/TestingUtils.java
@@ -22,6 +22,8 @@ import android.annotation.NonNull;
import android.annotation.Nullable;
import android.util.Log;
+import com.android.server.art.CopyAndRewriteProfileResult;
+
import com.google.common.truth.Correspondence;
import com.google.common.truth.Truth;
@@ -159,6 +161,26 @@ public final class TestingUtils {
});
}
+ public static CopyAndRewriteProfileResult createCopyAndRewriteProfileSuccess() {
+ var result = new CopyAndRewriteProfileResult();
+ result.status = CopyAndRewriteProfileResult.Status.SUCCESS;
+ return result;
+ }
+
+ public static CopyAndRewriteProfileResult createCopyAndRewriteProfileNoProfile() {
+ var result = new CopyAndRewriteProfileResult();
+ result.status = CopyAndRewriteProfileResult.Status.NO_PROFILE;
+ return result;
+ }
+
+ public static CopyAndRewriteProfileResult createCopyAndRewriteProfileBadProfile(
+ String errorMsg) {
+ var result = new CopyAndRewriteProfileResult();
+ result.status = CopyAndRewriteProfileResult.Status.BAD_PROFILE;
+ result.errorMsg = errorMsg;
+ return result;
+ }
+
private static boolean listDeepEquals(
@NonNull List<?> a, @NonNull List<?> b, @NonNull StringBuilder errorMsg) {
if (a.size() != b.size()) {
diff --git a/libarttools/Android.bp b/libarttools/Android.bp
index d38ef45d6d..5c0f1f21c4 100644
--- a/libarttools/Android.bp
+++ b/libarttools/Android.bp
@@ -34,6 +34,7 @@ cc_defaults {
"tools/tools.cc",
],
export_include_dirs: ["."],
+ header_libs: ["art_libartbase_headers"],
shared_libs: [
"libbase",
],
diff --git a/libarttools/tools/art_exec.cc b/libarttools/tools/art_exec.cc
index 1806ed4514..8f3365885b 100644
--- a/libarttools/tools/art_exec.cc
+++ b/libarttools/tools/art_exec.cc
@@ -36,7 +36,6 @@
#include "android-base/strings.h"
#include "base/macros.h"
#include "base/scoped_cap.h"
-#include "fmt/format.h"
#include "palette/palette.h"
#include "system/thread_defs.h"
@@ -48,8 +47,6 @@ using ::android::base::ParseInt;
using ::android::base::Result;
using ::android::base::Split;
-using ::fmt::literals::operator""_format; // NOLINT
-
constexpr const char* kUsage =
R"(A wrapper binary that configures the process and executes a command.
@@ -170,7 +167,7 @@ Result<void> CloseFds(const std::unordered_set<int>& keep_fds) {
if (keep_fds.find(fd) == keep_fds.end()) {
if (close(fd) != 0) {
Result<void> error = ErrnoErrorf("Failed to close FD {}", fd);
- if (std::filesystem::exists("/proc/self/fd/{}"_format(fd))) {
+ if (std::filesystem::exists(ART_FORMAT("/proc/self/fd/{}", fd))) {
return error;
}
}
diff --git a/libarttools/tools/art_exec_test.cc b/libarttools/tools/art_exec_test.cc
index 9e8b0de3ad..a5a0b01ead 100644
--- a/libarttools/tools/art_exec_test.cc
+++ b/libarttools/tools/art_exec_test.cc
@@ -37,7 +37,6 @@
#include "base/os.h"
#include "base/scoped_cap.h"
#include "exec_utils.h"
-#include "fmt/format.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "system/thread_defs.h"
@@ -57,10 +56,6 @@ using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::Not;
-// clang-tidy incorrectly complaints about the using declaration while the user-defined literal is
-// actually being used.
-using ::fmt::literals::operator""_format; // NOLINT
-
constexpr uid_t kRoot = 0;
constexpr uid_t kNobody = 9999;
@@ -73,9 +68,13 @@ constexpr uid_t kNobody = 9999;
// TODO(b/247108425): Remove this when ART gtests no longer use LD_LIBRARY_PATH.
constexpr const char* kEmptyLdLibraryPath = "--env=LD_LIBRARY_PATH=";
-std::string GetArtBin(const std::string& name) { return "{}/bin/{}"_format(GetArtRoot(), name); }
+std::string GetArtBin(const std::string& name) {
+ return ART_FORMAT("{}/bin/{}", GetArtRoot(), name);
+}
-std::string GetBin(const std::string& name) { return "{}/bin/{}"_format(GetAndroidRoot(), name); }
+std::string GetBin(const std::string& name) {
+ return ART_FORMAT("{}/bin/{}", GetAndroidRoot(), name);
+}
// Executes the command, waits for it to finish, and keeps it in a waitable state until the current
// scope exits.
@@ -217,16 +216,20 @@ TEST_F(ArtExecTest, CloseFds) {
ASSERT_GE(scratch_file.GetFd(), 0);
std::vector<std::string> args{art_exec_bin_,
- "--keep-fds={}:{}"_format(file3->Fd(), file2->Fd()),
+ ART_FORMAT("--keep-fds={}:{}", file3->Fd(), file2->Fd()),
kEmptyLdLibraryPath,
"--",
GetBin("sh"),
"-c",
- "("
- "readlink /proc/self/fd/{} || echo;"
- "readlink /proc/self/fd/{} || echo;"
- "readlink /proc/self/fd/{} || echo;"
- ") > {}"_format(file1->Fd(), file2->Fd(), file3->Fd(), filename)};
+ ART_FORMAT("("
+ "readlink /proc/self/fd/{} || echo;"
+ "readlink /proc/self/fd/{} || echo;"
+ "readlink /proc/self/fd/{} || echo;"
+ ") > {}",
+ file1->Fd(),
+ file2->Fd(),
+ file3->Fd(),
+ filename)};
ScopedExecAndWait(args);
diff --git a/libarttools/tools/tools.cc b/libarttools/tools/tools.cc
index 3d5301ad7f..4ec9d9a750 100644
--- a/libarttools/tools/tools.cc
+++ b/libarttools/tools/tools.cc
@@ -28,7 +28,7 @@
#include <vector>
#include "android-base/logging.h"
-#include "fmt/format.h"
+#include "base/macros.h"
namespace art {
namespace tools {
@@ -37,8 +37,6 @@ namespace {
using ::std::placeholders::_1;
-using ::fmt::literals::operator""_format; // NOLINT
-
// Returns true if `path_prefix` matches `pattern` or can be a prefix of a path that matches
// `pattern` (i.e., `path_prefix` represents a directory that may contain a file whose path matches
// `pattern`).
@@ -117,13 +115,13 @@ void MatchGlobRecursive(const std::vector<std::filesystem::path>& patterns,
// It's expected that we don't have permission to stat some dirs/files, and we don't care
// about them.
if (ec2.value() != EACCES) {
- LOG(ERROR) << "Unable to lstat '{}': {}"_format(entry.path().string(), ec2.message());
+ LOG(ERROR) << ART_FORMAT("Unable to lstat '{}': {}", entry.path().string(), ec2.message());
}
continue;
}
}
if (ec) {
- LOG(ERROR) << "Unable to walk through '{}': {}"_format(root_dir.string(), ec.message());
+ LOG(ERROR) << ART_FORMAT("Unable to walk through '{}': {}", root_dir.string(), ec.message());
}
}
diff --git a/libnativeloader/library_namespaces.cpp b/libnativeloader/library_namespaces.cpp
index 9aeebf38ad..1e29f4e457 100644
--- a/libnativeloader/library_namespaces.cpp
+++ b/libnativeloader/library_namespaces.cpp
@@ -255,7 +255,7 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
// Different name is useful for debugging
namespace_name = kVendorClassloaderNamespaceName;
- } else if (apk_origin == APK_ORIGIN_PRODUCT && is_product_vndk_version_defined()) {
+ } else if (apk_origin == APK_ORIGIN_PRODUCT && is_product_treblelized()) {
unbundled_app_origin = APK_ORIGIN_PRODUCT;
apk_origin_msg = "unbundled product apk";
@@ -406,8 +406,7 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
product_public_libraries());
if (!product_libs.empty()) {
auto target_ns = system_ns;
- if (is_product_vndk_version_defined()) {
- // If ro.product.vndk.version is defined, product namespace provides the product libraries.
+ if (is_product_treblelized()) {
target_ns = NativeLoaderNamespace::GetExportedNamespace(kProductNamespaceName, is_bridged);
}
if (target_ns.ok()) {
diff --git a/libnativeloader/native_loader_test.cpp b/libnativeloader/native_loader_test.cpp
index 6c0c8b17a7..72348ed364 100644
--- a/libnativeloader/native_loader_test.cpp
+++ b/libnativeloader/native_loader_test.cpp
@@ -18,9 +18,9 @@
#include "native_loader_test.h"
-#include <dlfcn.h>
-
+#include <android-base/properties.h>
#include <android-base/strings.h>
+#include <dlfcn.h>
#include <gtest/gtest.h>
#include "nativehelper/scoped_utf_chars.h"
@@ -348,7 +348,9 @@ TEST_P(NativeLoaderTest_Create, UnbundledVendorApp) {
expected_permitted_path = expected_permitted_path + ":/vendor/" LIB_DIR;
expected_shared_libs_to_platform_ns =
default_public_libraries() + ":" + llndk_libraries_vendor();
- expected_link_with_vndk_ns = true;
+ if (android::base::GetProperty("ro.vndk.version", "") != "") {
+ expected_link_with_vndk_ns = true;
+ }
SetExpectations();
RunTest();
}
@@ -378,15 +380,19 @@ TEST_P(NativeLoaderTest_Create, UnbundledProductApp) {
dex_path = "/product/app/foo/foo.apk";
is_shared = false;
- if (is_product_vndk_version_defined()) {
+ if (is_product_treblelized()) {
expected_namespace_prefix = "product-clns";
- expected_library_path = expected_library_path + ":/product/" LIB_DIR ":/system/product/" LIB_DIR;
+ expected_library_path =
+ expected_library_path + ":/product/" LIB_DIR ":/system/product/" LIB_DIR;
expected_permitted_path =
expected_permitted_path + ":/product/" LIB_DIR ":/system/product/" LIB_DIR;
expected_shared_libs_to_platform_ns =
append_extended_libraries(default_public_libraries() + ":" + llndk_libraries_product());
- expected_link_with_vndk_product_ns = true;
+ if (android::base::GetProperty("ro.product.vndk.version", "") != "") {
+ expected_link_with_vndk_product_ns = true;
+ }
}
+
SetExpectations();
RunTest();
}
diff --git a/libnativeloader/public_libraries.cpp b/libnativeloader/public_libraries.cpp
index 896c5c7106..87210c8f14 100644
--- a/libnativeloader/public_libraries.cpp
+++ b/libnativeloader/public_libraries.cpp
@@ -55,6 +55,7 @@ constexpr const char* kExtendedPublicLibrariesFileSuffix = ".txt";
constexpr const char* kApexLibrariesConfigFile = "/linkerconfig/apex.libraries.config.txt";
constexpr const char* kVendorPublicLibrariesFile = "/vendor/etc/public.libraries.txt";
constexpr const char* kLlndkLibrariesFile = "/apex/com.android.vndk.v{}/etc/llndk.libraries.{}.txt";
+constexpr const char* kLlndkLibrariesNoVndkFile = "/system/etc/llndk.libraries.txt";
constexpr const char* kVndkLibrariesFile = "/apex/com.android.vndk.v{}/etc/vndksp.libraries.{}.txt";
@@ -200,7 +201,7 @@ static std::string InitVendorPublicLibraries() {
// contains the extended public libraries that are loaded from the system namespace.
static std::string InitProductPublicLibraries() {
std::vector<std::string> sonames;
- if (is_product_vndk_version_defined()) {
+ if (is_product_treblelized()) {
ReadExtensionLibraries("/product/etc", &sonames);
}
std::string libs = android::base::Join(sonames, ':');
@@ -217,7 +218,7 @@ static std::string InitExtendedPublicLibraries() {
std::vector<std::string> sonames;
ReadExtensionLibraries("/system/etc", &sonames);
ReadExtensionLibraries("/system_ext/etc", &sonames);
- if (!is_product_vndk_version_defined()) {
+ if (!is_product_treblelized()) {
ReadExtensionLibraries("/product/etc", &sonames);
}
std::string libs = android::base::Join(sonames, ':');
@@ -225,9 +226,30 @@ static std::string InitExtendedPublicLibraries() {
return libs;
}
+bool IsVendorVndkEnabled() {
+#if defined(ART_TARGET_ANDROID)
+ return android::base::GetProperty("ro.vndk.version", "") != "";
+#else
+ return true;
+#endif
+}
+
+bool IsProductVndkEnabled() {
+#if defined(ART_TARGET_ANDROID)
+ return android::base::GetProperty("ro.product.vndk.version", "") != "";
+#else
+ return true;
+#endif
+}
+
static std::string InitLlndkLibrariesVendor() {
- std::string config_file = kLlndkLibrariesFile;
- InsertVndkVersionStr(&config_file, false);
+ std::string config_file;
+ if (IsVendorVndkEnabled()) {
+ config_file = kLlndkLibrariesFile;
+ InsertVndkVersionStr(&config_file, false);
+ } else {
+ config_file = kLlndkLibrariesNoVndkFile;
+ }
auto sonames = ReadConfig(config_file, always_true);
if (!sonames.ok()) {
LOG_ALWAYS_FATAL("%s: %s", config_file.c_str(), sonames.error().message().c_str());
@@ -239,12 +261,17 @@ static std::string InitLlndkLibrariesVendor() {
}
static std::string InitLlndkLibrariesProduct() {
- if (!is_product_vndk_version_defined()) {
- ALOGD("InitLlndkLibrariesProduct: No product VNDK version defined");
+ if (!is_product_treblelized()) {
+ ALOGD("InitLlndkLibrariesProduct: Product is not treblelized");
return "";
}
- std::string config_file = kLlndkLibrariesFile;
- InsertVndkVersionStr(&config_file, true);
+ std::string config_file;
+ if (IsProductVndkEnabled()) {
+ config_file = kLlndkLibrariesFile;
+ InsertVndkVersionStr(&config_file, true);
+ } else {
+ config_file = kLlndkLibrariesNoVndkFile;
+ }
auto sonames = ReadConfig(config_file, always_true);
if (!sonames.ok()) {
LOG_ALWAYS_FATAL("%s: %s", config_file.c_str(), sonames.error().message().c_str());
@@ -256,6 +283,11 @@ static std::string InitLlndkLibrariesProduct() {
}
static std::string InitVndkspLibrariesVendor() {
+ if (!IsVendorVndkEnabled()) {
+ ALOGD("InitVndkspLibrariesVendor: VNDK is deprecated with vendor");
+ return "";
+ }
+
std::string config_file = kVndkLibrariesFile;
InsertVndkVersionStr(&config_file, false);
auto sonames = ReadConfig(config_file, always_true);
@@ -269,8 +301,8 @@ static std::string InitVndkspLibrariesVendor() {
}
static std::string InitVndkspLibrariesProduct() {
- if (!is_product_vndk_version_defined()) {
- ALOGD("InitVndkspLibrariesProduct: No product VNDK version defined");
+ if (!IsProductVndkEnabled()) {
+ ALOGD("InitVndkspLibrariesProduct: VNDK is deprecated with product");
return "";
}
std::string config_file = kVndkLibrariesFile;
@@ -393,9 +425,14 @@ const std::map<std::string, std::string>& apex_public_libraries() {
return public_libraries;
}
-bool is_product_vndk_version_defined() {
+bool is_product_treblelized() {
#if defined(ART_TARGET_ANDROID)
- return android::sysprop::VndkProperties::product_vndk_version().has_value();
+ // Product is not treblelized iff launching version is prior to R and
+ // ro.product.vndk.version is not defined
+ static bool product_treblelized =
+ !(android::base::GetIntProperty("ro.product.first_api_level", 0) < __ANDROID_API_R__ &&
+ !android::sysprop::VndkProperties::product_vndk_version().has_value());
+ return product_treblelized;
#else
return false;
#endif
diff --git a/libnativeloader/public_libraries.h b/libnativeloader/public_libraries.h
index 6f5a13c9b3..1830824704 100644
--- a/libnativeloader/public_libraries.h
+++ b/libnativeloader/public_libraries.h
@@ -47,12 +47,14 @@ const std::string& apex_jni_libraries(const std::string& apex_name);
// but provided by com.android.foo APEX.
const std::map<std::string, std::string>& apex_public_libraries();
-// Returns true if libnativeloader is running on devices and the device has
-// ro.product.vndk.version property. It returns false for host.
-bool is_product_vndk_version_defined();
-
std::string get_vndk_version(bool is_product_vndk);
+// Returnes true if libnativeloader is running on devices and the device has
+// treblelized product partition. It returns false for host.
+// TODO: Remove this function and assume it is always true once when Mainline does not support any
+// devices launched with Q or below.
+bool is_product_treblelized();
+
// These are exported for testing
namespace internal {
diff --git a/libprofile/profile/profile_compilation_info.cc b/libprofile/profile/profile_compilation_info.cc
index 4341a1d49a..1dc63a0ef9 100644
--- a/libprofile/profile/profile_compilation_info.cc
+++ b/libprofile/profile/profile_compilation_info.cc
@@ -32,11 +32,14 @@
#include <numeric>
#include <random>
#include <string>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "android-base/file.h"
#include "android-base/properties.h"
#include "android-base/scopeguard.h"
+#include "android-base/strings.h"
#include "android-base/unique_fd.h"
#include "base/arena_allocator.h"
#include "base/bit_utils.h"
@@ -2478,31 +2481,68 @@ bool ProfileCompilationInfo::IsProfileFile(int fd) {
bool ProfileCompilationInfo::UpdateProfileKeys(
const std::vector<std::unique_ptr<const DexFile>>& dex_files, /*out*/ bool* matched) {
- *matched = false;
- for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
- for (const std::unique_ptr<DexFileData>& dex_data : info_) {
+ // This check aligns with when dex2oat falls back from "speed-profile" to "verify".
+ //
+ // ART Service relies on the exit code of profman, which is determined by the value of `matched`,
+ // to judge whether it should re-dexopt for "speed-profile". Therefore, a misalignment will cause
+ // repeated dexopt.
+ if (IsEmpty()) {
+ *matched = false;
+ return true;
+ }
+ DCHECK(!info_.empty());
+
+ *matched = true;
+
+ // A map from the old base key to the new base key.
+ std::unordered_map<std::string, std::string> old_key_to_new_key;
+
+ // A map from the new base key to all matching old base keys (an invert of the map above), for
+ // detecting duplicate keys.
+ std::unordered_map<std::string, std::unordered_set<std::string>> new_key_to_old_keys;
+
+ for (const std::unique_ptr<DexFileData>& dex_data : info_) {
+ std::string old_base_key = GetBaseKeyFromAugmentedKey(dex_data->profile_key);
+ bool found = false;
+ for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
if (dex_data->checksum == dex_file->GetLocationChecksum() &&
dex_data->num_type_ids == dex_file->NumTypeIds() &&
dex_data->num_method_ids == dex_file->NumMethodIds()) {
- std::string new_profile_key = GetProfileDexFileBaseKey(dex_file->GetLocation());
- std::string dex_data_base_key = GetBaseKeyFromAugmentedKey(dex_data->profile_key);
- if (dex_data_base_key != new_profile_key) {
- if (profile_key_map_.find(new_profile_key) != profile_key_map_.end()) {
- // We can't update the key if the new key belongs to a different dex file.
- LOG(ERROR) << "Cannot update profile key to " << new_profile_key
- << " because the new key belongs to another dex file.";
- return false;
- }
- profile_key_map_.erase(dex_data->profile_key);
- // Retain the annotation (if any) during the renaming by re-attaching the info
- // form the old key.
- dex_data->profile_key = MigrateAnnotationInfo(new_profile_key, dex_data->profile_key);
- profile_key_map_.Put(dex_data->profile_key, dex_data->profile_index);
- }
- *matched = true;
+ std::string new_base_key = GetProfileDexFileBaseKey(dex_file->GetLocation());
+ old_key_to_new_key[old_base_key] = new_base_key;
+ new_key_to_old_keys[new_base_key].insert(old_base_key);
+ found = true;
+ break;
}
}
+ if (!found) {
+ *matched = false;
+ // Keep the old key.
+ old_key_to_new_key[old_base_key] = old_base_key;
+ new_key_to_old_keys[old_base_key].insert(old_base_key);
+ }
+ }
+
+ for (const auto& [new_key, old_keys] : new_key_to_old_keys) {
+ if (old_keys.size() > 1) {
+ LOG(ERROR) << "Cannot update multiple profile keys [" << android::base::Join(old_keys, ", ")
+ << "] to the same new key '" << new_key << "'";
+ return false;
+ }
}
+
+ // Check passed. Now perform the actual mutation.
+ profile_key_map_.clear();
+
+ for (const std::unique_ptr<DexFileData>& dex_data : info_) {
+ std::string old_base_key = GetBaseKeyFromAugmentedKey(dex_data->profile_key);
+ const std::string& new_base_key = old_key_to_new_key[old_base_key];
+ DCHECK(!new_base_key.empty());
+ // Retain the annotation (if any) during the renaming by re-attaching the info from the old key.
+ dex_data->profile_key = MigrateAnnotationInfo(new_base_key, dex_data->profile_key);
+ profile_key_map_.Put(dex_data->profile_key, dex_data->profile_index);
+ }
+
return true;
}
diff --git a/libprofile/profile/profile_compilation_info.h b/libprofile/profile/profile_compilation_info.h
index 68177629b0..2da33a0c02 100644
--- a/libprofile/profile/profile_compilation_info.h
+++ b/libprofile/profile/profile_compilation_info.h
@@ -655,7 +655,7 @@ class ProfileCompilationInfo {
// If the new profile key would collide with an existing key (for a different dex)
// the method returns false. Otherwise it returns true.
//
- // `matched` is set to true if any profile has matched any input dex file.
+ // `matched` is set to true if all profiles have matched input dex files.
bool UpdateProfileKeys(const std::vector<std::unique_ptr<const DexFile>>& dex_files,
/*out*/ bool* matched);
diff --git a/libprofile/profile/profile_compilation_info_test.cc b/libprofile/profile/profile_compilation_info_test.cc
index 81680041b5..627d05d7dd 100644
--- a/libprofile/profile/profile_compilation_info_test.cc
+++ b/libprofile/profile/profile_compilation_info_test.cc
@@ -1029,28 +1029,50 @@ TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOkMatchedButNoUpdate) {
TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOkButNoMatch) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
- dex_files.push_back(std::unique_ptr<const DexFile>(dex1));
+ dex_files.push_back(std::unique_ptr<const DexFile>(dex1_renamed));
+ dex_files.push_back(std::unique_ptr<const DexFile>(dex2_renamed));
+ // This is a partial match: `dex1` matches `dex1_renamed`, but `dex3` matches nothing. It should
+ // be treated as a match failure.
ProfileCompilationInfo info;
- AddMethod(&info, dex2, /*method_idx=*/ 0);
+ AddMethod(&info, dex1, /*method_idx=*/0);
+ AddMethod(&info, dex3, /*method_idx=*/0);
// Update the profile keys based on the original dex files.
bool matched = false;
ASSERT_TRUE(info.UpdateProfileKeys(dex_files, &matched));
ASSERT_FALSE(matched);
- // Verify that we did not perform any update and that we cannot find anything with the new
- // location.
- for (const std::unique_ptr<const DexFile>& dex : dex_files) {
- ProfileCompilationInfo::MethodHotness loaded_hotness =
- GetMethod(info, dex.get(), /*method_idx=*/ 0);
- ASSERT_FALSE(loaded_hotness.IsHot());
+ // Verify that the unmatched entry is kept.
+ ProfileCompilationInfo::MethodHotness loaded_hotness = GetMethod(info, dex3, /*method_idx=*/0);
+ ASSERT_TRUE(loaded_hotness.IsHot());
+
+ // Verify that we can find the updated entry.
+ ProfileCompilationInfo::MethodHotness loaded_hotness_2 =
+ GetMethod(info, dex1_renamed, /*method_idx=*/0);
+ ASSERT_TRUE(loaded_hotness_2.IsHot());
+
+ // Release the ownership as this is held by the test class;
+ for (std::unique_ptr<const DexFile>& dex : dex_files) {
+ UNUSED(dex.release());
}
+}
- // Verify that we can find the original entry.
- ProfileCompilationInfo::MethodHotness loaded_hotness =
- GetMethod(info, dex2, /*method_idx=*/ 0);
- ASSERT_TRUE(loaded_hotness.IsHot());
+TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyOkButEmpty) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files.push_back(std::unique_ptr<const DexFile>(dex1_renamed));
+ dex_files.push_back(std::unique_ptr<const DexFile>(dex2_renamed));
+
+ // Empty profile.
+ ProfileCompilationInfo info;
+
+ // Update the profile keys based on the original dex files.
+ bool matched = false;
+ ASSERT_TRUE(info.UpdateProfileKeys(dex_files, &matched));
+ ASSERT_FALSE(matched);
+
+ // Verify that the updated profile is still empty.
+ EXPECT_TRUE(info.IsEmpty());
// Release the ownership as this is held by the test class;
for (std::unique_ptr<const DexFile>& dex : dex_files) {
@@ -1071,7 +1093,7 @@ TEST_F(ProfileCompilationInfoTest, UpdateProfileKeyFail) {
bool matched = false;
ASSERT_FALSE(info.UpdateProfileKeys(dex_files, &matched));
- ASSERT_FALSE(matched);
+ ASSERT_TRUE(matched);
// Release the ownership as this is held by the test class;
for (std::unique_ptr<const DexFile>& dex : dex_files) {
diff --git a/odrefresh/odr_common.cc b/odrefresh/odr_common.cc
index ec18884a05..191defc125 100644
--- a/odrefresh/odr_common.cc
+++ b/odrefresh/odr_common.cc
@@ -28,19 +28,16 @@
#include "android-base/logging.h"
#include "android-base/parseint.h"
#include "android-base/result.h"
-#include "fmt/format.h"
+#include "base/macros.h"
namespace art {
namespace odrefresh {
namespace {
-
using ::android::base::Result;
-
-using ::fmt::literals::operator""_format; // NOLINT
}
-std::string QuotePath(std::string_view path) { return "'{}'"_format(path); }
+std::string QuotePath(std::string_view path) { return ART_FORMAT("'{}'", path); }
Result<int> ParseSecurityPatchStr(const std::string& security_patch_str) {
std::regex security_patch_regex(R"re((\d{4})-(\d{2})-(\d{2}))re");
@@ -91,5 +88,12 @@ void SystemPropertyForeach(std::function<void(const char* name, const char* valu
&action);
}
+bool CheckBuildUserfaultFdGc(bool build_enable_uffd_gc,
+ bool is_at_least_t,
+ bool kernel_supports_uffd) {
+ bool runtime_uses_uffd_gc = (build_enable_uffd_gc || is_at_least_t) && kernel_supports_uffd;
+ return build_enable_uffd_gc == runtime_uses_uffd_gc;
+}
+
} // namespace odrefresh
} // namespace art
diff --git a/odrefresh/odr_common.h b/odrefresh/odr_common.h
index e55258ff1c..d887bf0449 100644
--- a/odrefresh/odr_common.h
+++ b/odrefresh/odr_common.h
@@ -44,6 +44,11 @@ bool ShouldDisableRefresh(const std::string& sdk_version_str);
// Passes the name and the value for each system property to the provided callback.
void SystemPropertyForeach(std::function<void(const char* name, const char* value)> action);
+// Returns true if the build-time UFFD GC matches the runtime's choice.
+bool CheckBuildUserfaultFdGc(bool build_enable_uffd_gc,
+ bool is_at_least_t,
+ bool kernel_supports_uffd);
+
} // namespace odrefresh
} // namespace art
diff --git a/odrefresh/odr_common_test.cc b/odrefresh/odr_common_test.cc
index 150f57bf89..204c04bc53 100644
--- a/odrefresh/odr_common_test.cc
+++ b/odrefresh/odr_common_test.cc
@@ -55,5 +55,24 @@ TEST(OdrCommonTest, ShouldDisableRefresh) {
EXPECT_FALSE(ShouldDisableRefresh("invalid"));
}
+TEST(OdrCommonTest, CheckBuildUserfaultFdGc) {
+ EXPECT_TRUE(CheckBuildUserfaultFdGc(
+ /*build_enable_uffd_gc=*/false, /*is_at_least_t=*/false, /*kernel_supports_uffd=*/false));
+ EXPECT_FALSE(CheckBuildUserfaultFdGc(
+ /*build_enable_uffd_gc=*/true, /*is_at_least_t=*/false, /*kernel_supports_uffd=*/false));
+ EXPECT_TRUE(CheckBuildUserfaultFdGc(
+ /*build_enable_uffd_gc=*/false, /*is_at_least_t=*/true, /*kernel_supports_uffd=*/false));
+ EXPECT_FALSE(CheckBuildUserfaultFdGc(
+ /*build_enable_uffd_gc=*/true, /*is_at_least_t=*/true, /*kernel_supports_uffd=*/false));
+ EXPECT_TRUE(CheckBuildUserfaultFdGc(
+ /*build_enable_uffd_gc=*/false, /*is_at_least_t=*/false, /*kernel_supports_uffd=*/true));
+ EXPECT_TRUE(CheckBuildUserfaultFdGc(
+ /*build_enable_uffd_gc=*/true, /*is_at_least_t=*/false, /*kernel_supports_uffd=*/true));
+ EXPECT_FALSE(CheckBuildUserfaultFdGc(
+ /*build_enable_uffd_gc=*/false, /*is_at_least_t=*/true, /*kernel_supports_uffd=*/true));
+ EXPECT_TRUE(CheckBuildUserfaultFdGc(
+ /*build_enable_uffd_gc=*/true, /*is_at_least_t=*/true, /*kernel_supports_uffd=*/true));
+}
+
} // namespace odrefresh
} // namespace art
diff --git a/odrefresh/odr_config.h b/odrefresh/odr_config.h
index e87acc03f2..2a42a5e4e0 100644
--- a/odrefresh/odr_config.h
+++ b/odrefresh/odr_config.h
@@ -33,6 +33,7 @@
#include "log/log.h"
#include "odr_common.h"
#include "odrefresh/odrefresh.h"
+#include "tools/system_properties.h"
namespace art {
namespace odrefresh {
@@ -73,9 +74,14 @@ struct SystemPropertyConfig {
const android::base::NoDestructor<std::vector<SystemPropertyConfig>> kSystemProperties{
{SystemPropertyConfig{.name = "persist.device_config.runtime_native_boot.enable_uffd_gc_2",
.default_value = "false"},
+ SystemPropertyConfig{.name = "persist.device_config.runtime_native_boot.force_disable_uffd_gc",
+ .default_value = "false"},
SystemPropertyConfig{.name = kPhDisableCompactDex, .default_value = "false"},
SystemPropertyConfig{.name = kSystemPropertySystemServerCompilerFilterOverride,
- .default_value = ""}}};
+ .default_value = ""},
+ // For testing only (cf. odsign_e2e_tests_full).
+ SystemPropertyConfig{.name = "persist.device_config.runtime_native_boot.odrefresh_test_toggle",
+ .default_value = "false"}}};
// An enumeration of the possible zygote configurations on Android.
enum class ZygoteKind : uint8_t {
@@ -89,6 +95,26 @@ enum class ZygoteKind : uint8_t {
kZygote64 = 3
};
+class OdrSystemProperties : public tools::SystemProperties {
+ public:
+ explicit OdrSystemProperties(
+ const std::unordered_map<std::string, std::string>* system_properties)
+ : system_properties_(system_properties) {}
+
+ // For supporting foreach loops.
+ auto begin() const { return system_properties_->begin(); }
+ auto end() const { return system_properties_->end(); }
+
+ protected:
+ std::string GetProperty(const std::string& key) const override {
+ auto it = system_properties_->find(key);
+ return it != system_properties_->end() ? it->second : "";
+ }
+
+ private:
+ const std::unordered_map<std::string, std::string>* system_properties_;
+};
+
// Configuration class for odrefresh. Exists to enable abstracting environment variables and
// system properties into a configuration class for development and testing purposes.
class OdrConfig final {
@@ -115,17 +141,20 @@ class OdrConfig final {
// The current values of system properties listed in `kSystemProperties`.
std::unordered_map<std::string, std::string> system_properties_;
+ // A helper for reading from `system_properties_`.
+ OdrSystemProperties odr_system_properties_;
+
// Staging directory for artifacts. The directory must exist and will be automatically removed
// after compilation. If empty, use the default directory.
std::string staging_dir_;
public:
explicit OdrConfig(const char* program_name)
- : dry_run_(false),
- isa_(InstructionSet::kNone),
- program_name_(android::base::Basename(program_name)),
- artifact_dir_(GetApexDataDalvikCacheDirectory(InstructionSet::kNone)) {
- }
+ : dry_run_(false),
+ isa_(InstructionSet::kNone),
+ program_name_(android::base::Basename(program_name)),
+ artifact_dir_(GetApexDataDalvikCacheDirectory(InstructionSet::kNone)),
+ odr_system_properties_(&system_properties_) {}
const std::string& GetApexInfoListFile() const { return apex_info_list_file_; }
@@ -209,9 +238,7 @@ class OdrConfig final {
}
bool GetCompilationOsMode() const { return compilation_os_mode_; }
bool GetMinimal() const { return minimal_; }
- const std::unordered_map<std::string, std::string>& GetSystemProperties() const {
- return system_properties_;
- }
+ const OdrSystemProperties& GetSystemProperties() const { return odr_system_properties_; }
void SetApexInfoListFile(const std::string& file_path) { apex_info_list_file_ = file_path; }
void SetArtBinDir(const std::string& art_bin_dir) { art_bin_dir_ = art_bin_dir; }
diff --git a/odrefresh/odrefresh.cc b/odrefresh/odrefresh.cc
index 2fed8b7a2e..7ff6aa4272 100644
--- a/odrefresh/odrefresh.cc
+++ b/odrefresh/odrefresh.cc
@@ -28,10 +28,12 @@
#include <unistd.h>
#include <algorithm>
+#include <cerrno>
#include <cstdarg>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
+#include <cstring>
#include <filesystem>
#include <fstream>
#include <functional>
@@ -57,7 +59,6 @@
#include "android-base/file.h"
#include "android-base/logging.h"
#include "android-base/macros.h"
-#include "android-base/parsebool.h"
#include "android-base/parseint.h"
#include "android-base/properties.h"
#include "android-base/result.h"
@@ -65,34 +66,27 @@
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
#include "android-modules-utils/sdk_level.h"
-#include "android/log.h"
#include "arch/instruction_set.h"
#include "base/file_utils.h"
-#include "base/globals.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/os.h"
#include "base/stl_util.h"
-#include "base/string_view_cpp20.h"
#include "base/unix_file/fd_file.h"
#include "com_android_apex.h"
#include "com_android_art.h"
#include "dex/art_dex_file_loader.h"
-#include "dexoptanalyzer.h"
#include "exec_utils.h"
-#include "fmt/format.h"
#include "gc/collector/mark_compact.h"
-#include "log/log.h"
#include "odr_artifacts.h"
#include "odr_common.h"
-#include "odr_compilation_log.h"
#include "odr_config.h"
#include "odr_fs_utils.h"
#include "odr_metrics.h"
#include "odrefresh/odrefresh.h"
#include "palette/palette.h"
#include "palette/palette_types.h"
-#include "read_barrier_config.h"
+#include "tools/cmdline_builder.h"
namespace art {
namespace odrefresh {
@@ -104,10 +98,7 @@ namespace art_apex = com::android::art;
using ::android::base::Basename;
using ::android::base::Dirname;
-using ::android::base::GetProperty;
using ::android::base::Join;
-using ::android::base::ParseBool;
-using ::android::base::ParseBoolResult;
using ::android::base::ParseInt;
using ::android::base::Result;
using ::android::base::SetProperty;
@@ -115,9 +106,9 @@ using ::android::base::Split;
using ::android::base::StartsWith;
using ::android::base::StringPrintf;
using ::android::base::Timer;
+using ::android::modules::sdklevel::IsAtLeastT;
using ::android::modules::sdklevel::IsAtLeastU;
-
-using ::fmt::literals::operator""_format; // NOLINT
+using ::art::tools::CmdlineBuilder;
// Name of cache info file in the ART Apex artifact cache.
constexpr const char* kCacheInfoFile = "cache-info.xml";
@@ -157,7 +148,7 @@ bool MoveOrEraseFiles(const std::vector<std::unique_ptr<File>>& files,
std::vector<std::unique_ptr<File>> output_files;
for (auto& file : files) {
std::string file_basename(Basename(file->GetPath()));
- std::string output_file_path = "{}/{}"_format(output_directory_path, file_basename);
+ std::string output_file_path = ART_FORMAT("{}/{}", output_directory_path, file_basename);
std::string input_file_path = file->GetPath();
output_files.emplace_back(OS::CreateEmptyFileWriteOnly(output_file_path.c_str()));
@@ -384,20 +375,20 @@ bool ArtifactsExist(const OdrArtifacts& artifacts,
return true;
}
-void AddDex2OatCommonOptions(/*inout*/ std::vector<std::string>& args) {
- args.emplace_back("--android-root=out/empty");
- args.emplace_back("--abort-on-hard-verifier-error");
- args.emplace_back("--no-abort-on-soft-verifier-error");
- args.emplace_back("--compilation-reason=boot");
- args.emplace_back("--image-format=lz4");
- args.emplace_back("--force-determinism");
- args.emplace_back("--resolve-startup-const-strings=true");
+void AddDex2OatCommonOptions(/*inout*/ CmdlineBuilder& args) {
+ args.Add("--android-root=out/empty");
+ args.Add("--abort-on-hard-verifier-error");
+ args.Add("--no-abort-on-soft-verifier-error");
+ args.Add("--compilation-reason=boot");
+ args.Add("--image-format=lz4");
+ args.Add("--force-determinism");
+ args.Add("--resolve-startup-const-strings=true");
// Avoid storing dex2oat cmdline in oat header. We want to be sure that the compiled artifacts
// are identical regardless of where the compilation happened. But some of the cmdline flags tends
// to be unstable, e.g. those contains FD numbers. To avoid the problem, the whole cmdline is not
// added to the oat header.
- args.emplace_back("--avoid-storing-invocation");
+ args.Add("--avoid-storing-invocation");
}
bool IsCpuSetSpecValid(const std::string& cpu_set) {
@@ -410,68 +401,66 @@ bool IsCpuSetSpecValid(const std::string& cpu_set) {
return true;
}
-Result<void> AddDex2OatConcurrencyArguments(/*inout*/ std::vector<std::string>& args,
- bool is_compilation_os) {
+Result<void> AddDex2OatConcurrencyArguments(/*inout*/ CmdlineBuilder& args,
+ bool is_compilation_os,
+ const OdrSystemProperties& system_properties) {
std::string threads;
if (is_compilation_os) {
- threads = GetProperty("dalvik.vm.background-dex2oat-threads", "");
- if (threads.empty()) {
- threads = GetProperty("dalvik.vm.dex2oat-threads", "");
- }
+ threads = system_properties.GetOrEmpty("dalvik.vm.background-dex2oat-threads",
+ "dalvik.vm.dex2oat-threads");
} else {
- threads = GetProperty("dalvik.vm.boot-dex2oat-threads", "");
- }
- if (!threads.empty()) {
- args.push_back("-j" + threads);
+ threads = system_properties.GetOrEmpty("dalvik.vm.boot-dex2oat-threads");
}
+ args.AddIfNonEmpty("-j%s", threads);
std::string cpu_set;
if (is_compilation_os) {
- cpu_set = GetProperty("dalvik.vm.background-dex2oat-cpu-set", "");
- if (cpu_set.empty()) {
- cpu_set = GetProperty("dalvik.vm.dex2oat-cpu-set", "");
- }
+ cpu_set = system_properties.GetOrEmpty("dalvik.vm.background-dex2oat-cpu-set",
+ "dalvik.vm.dex2oat-cpu-set");
} else {
- cpu_set = GetProperty("dalvik.vm.boot-dex2oat-cpu-set", "");
+ cpu_set = system_properties.GetOrEmpty("dalvik.vm.boot-dex2oat-cpu-set");
}
if (!cpu_set.empty()) {
if (!IsCpuSetSpecValid(cpu_set)) {
return Errorf("Invalid CPU set spec '{}'", cpu_set);
}
- args.push_back("--cpu-set=" + cpu_set);
+ args.Add("--cpu-set=%s", cpu_set);
}
return {};
}
-void AddDex2OatDebugInfo(/*inout*/ std::vector<std::string>& args) {
- args.emplace_back("--generate-mini-debug-info");
- args.emplace_back("--strip");
+void AddDex2OatDebugInfo(/*inout*/ CmdlineBuilder& args) {
+ args.Add("--generate-mini-debug-info");
+ args.Add("--strip");
}
-void AddDex2OatInstructionSet(/*inout*/ std::vector<std::string>& args, InstructionSet isa) {
+void AddDex2OatInstructionSet(/*inout*/ CmdlineBuilder& args, InstructionSet isa) {
const char* isa_str = GetInstructionSetString(isa);
- args.emplace_back(StringPrintf("--instruction-set=%s", isa_str));
+ args.Add("--instruction-set=%s", isa_str);
}
-// Returns true if any profile has been added.
-bool AddDex2OatProfile(
- /*inout*/ std::vector<std::string>& args,
+// Returns true if any profile has been added, or false if no profile exists, or error if any error
+// occurred.
+Result<bool> AddDex2OatProfile(
+ /*inout*/ CmdlineBuilder& args,
/*inout*/ std::vector<std::unique_ptr<File>>& output_files,
const std::vector<std::string>& profile_paths) {
bool has_any_profile = false;
- for (auto& path : profile_paths) {
+ for (const std::string& path : profile_paths) {
std::unique_ptr<File> profile_file(OS::OpenFileForReading(path.c_str()));
- if (profile_file && profile_file->IsOpened()) {
- args.emplace_back(StringPrintf("--profile-file-fd=%d", profile_file->Fd()));
+ if (profile_file != nullptr) {
+ args.Add("--profile-file-fd=%d", profile_file->Fd());
output_files.emplace_back(std::move(profile_file));
has_any_profile = true;
+ } else if (errno != ENOENT) {
+ return ErrnoErrorf("Failed to open profile file '{}'", path);
}
}
return has_any_profile;
}
-Result<void> AddBootClasspathFds(/*inout*/ std::vector<std::string>& args,
+Result<void> AddBootClasspathFds(/*inout*/ CmdlineBuilder& args,
/*inout*/ std::vector<std::unique_ptr<File>>& output_files,
const std::vector<std::string>& bcp_jars) {
std::vector<std::string> bcp_fds;
@@ -484,19 +473,18 @@ Result<void> AddBootClasspathFds(/*inout*/ std::vector<std::string>& args,
} else {
std::string actual_path = RewriteParentDirectoryIfNeeded(jar);
std::unique_ptr<File> jar_file(OS::OpenFileForReading(actual_path.c_str()));
- if (!jar_file || !jar_file->IsValid()) {
- return Errorf("Failed to open a BCP jar '{}'", actual_path);
+ if (jar_file == nullptr) {
+ return ErrnoErrorf("Failed to open a BCP jar '{}'", actual_path);
}
bcp_fds.push_back(std::to_string(jar_file->Fd()));
output_files.push_back(std::move(jar_file));
}
}
- args.emplace_back("--runtime-arg");
- args.emplace_back("-Xbootclasspathfds:" + Join(bcp_fds, ':'));
+ args.AddRuntime("-Xbootclasspathfds:%s", Join(bcp_fds, ':'));
return {};
}
-Result<void> AddCacheInfoFd(/*inout*/ std::vector<std::string>& args,
+Result<void> AddCacheInfoFd(/*inout*/ CmdlineBuilder& args,
/*inout*/ std::vector<std::unique_ptr<File>>& readonly_files_raii,
const std::string& cache_info_filename) {
std::unique_ptr<File> cache_info_file(OS::OpenFileForReading(cache_info_filename.c_str()));
@@ -504,7 +492,7 @@ Result<void> AddCacheInfoFd(/*inout*/ std::vector<std::string>& args,
return ErrnoErrorf("Failed to open a cache info file '{}'", cache_info_file);
}
- args.emplace_back("--cache-info-fd=" + std::to_string(cache_info_file->Fd()));
+ args.Add("--cache-info-fd=%d", cache_info_file->Fd());
readonly_files_raii.push_back(std::move(cache_info_file));
return {};
}
@@ -517,8 +505,8 @@ std::string GetBootImageComponentBasename(const std::string& jar_path, bool is_f
return "boot-" + ReplaceFileExtension(jar_name, "art");
}
-void AddCompiledBootClasspathFdsIfAny(
- /*inout*/ std::vector<std::string>& args,
+Result<void> AddCompiledBootClasspathFdsIfAny(
+ /*inout*/ CmdlineBuilder& args,
/*inout*/ std::vector<std::unique_ptr<File>>& output_files,
const std::vector<std::string>& bcp_jars,
InstructionSet isa,
@@ -544,45 +532,50 @@ void AddCompiledBootClasspathFdsIfAny(
std::string image_path = artifact_dir + "/" + basename;
image_path = GetSystemImageFilename(image_path.c_str(), isa);
std::unique_ptr<File> image_file(OS::OpenFileForReading(image_path.c_str()));
- if (image_file && image_file->IsValid()) {
+ if (image_file != nullptr) {
bcp_image_fds.push_back(std::to_string(image_file->Fd()));
opened_files.push_back(std::move(image_file));
added_any = true;
- } else {
+ } else if (errno == ENOENT) {
bcp_image_fds.push_back("-1");
+ } else {
+ return ErrnoErrorf("Failed to open boot image file '{}'", image_path);
}
std::string oat_path = ReplaceFileExtension(image_path, "oat");
std::unique_ptr<File> oat_file(OS::OpenFileForReading(oat_path.c_str()));
- if (oat_file && oat_file->IsValid()) {
+ if (oat_file != nullptr) {
bcp_oat_fds.push_back(std::to_string(oat_file->Fd()));
opened_files.push_back(std::move(oat_file));
added_any = true;
- } else {
+ } else if (errno == ENOENT) {
bcp_oat_fds.push_back("-1");
+ } else {
+ return ErrnoErrorf("Failed to open boot image file '{}'", oat_path);
}
std::string vdex_path = ReplaceFileExtension(image_path, "vdex");
std::unique_ptr<File> vdex_file(OS::OpenFileForReading(vdex_path.c_str()));
- if (vdex_file && vdex_file->IsValid()) {
+ if (vdex_file != nullptr) {
bcp_vdex_fds.push_back(std::to_string(vdex_file->Fd()));
opened_files.push_back(std::move(vdex_file));
added_any = true;
- } else {
+ } else if (errno == ENOENT) {
bcp_vdex_fds.push_back("-1");
+ } else {
+ return ErrnoErrorf("Failed to open boot image file '{}'", vdex_path);
}
}
// Add same amount of FDs as BCP JARs, or none.
if (added_any) {
std::move(opened_files.begin(), opened_files.end(), std::back_inserter(output_files));
- args.emplace_back("--runtime-arg");
- args.emplace_back("-Xbootclasspathimagefds:" + Join(bcp_image_fds, ':'));
- args.emplace_back("--runtime-arg");
- args.emplace_back("-Xbootclasspathoatfds:" + Join(bcp_oat_fds, ':'));
- args.emplace_back("--runtime-arg");
- args.emplace_back("-Xbootclasspathvdexfds:" + Join(bcp_vdex_fds, ':'));
+ args.AddRuntime("-Xbootclasspathimagefds:%s", Join(bcp_image_fds, ':'));
+ args.AddRuntime("-Xbootclasspathoatfds:%s", Join(bcp_oat_fds, ':'));
+ args.AddRuntime("-Xbootclasspathvdexfds:%s", Join(bcp_vdex_fds, ':'));
}
+
+ return {};
}
std::string GetStagingLocation(const std::string& staging_dir, const std::string& path) {
@@ -753,7 +746,7 @@ Result<art_apex::CacheInfo> OnDeviceRefresh::ReadCacheInfo() const {
Result<void> OnDeviceRefresh::WriteCacheInfo() const {
if (OS::FileExists(cache_info_filename_.c_str())) {
if (unlink(cache_info_filename_.c_str()) != 0) {
- return ErrnoErrorf("Failed to unlink() file {}", QuotePath(cache_info_filename_));
+ return ErrnoErrorf("Failed to unlink file {}", QuotePath(cache_info_filename_));
}
}
@@ -789,7 +782,7 @@ Result<void> OnDeviceRefresh::WriteCacheInfo() const {
std::ofstream out(cache_info_filename_.c_str());
if (out.fail()) {
- return Errorf("Cannot open {} for writing.", QuotePath(cache_info_filename_));
+ return ErrnoErrorf("Could not create cache info file {}", QuotePath(cache_info_filename_));
}
std::unique_ptr<art_apex::CacheInfo> info(new art_apex::CacheInfo(
@@ -804,7 +797,7 @@ Result<void> OnDeviceRefresh::WriteCacheInfo() const {
art_apex::write(out, *info);
out.close();
if (out.fail()) {
- return Errorf("Cannot write to {}", QuotePath(cache_info_filename_));
+ return ErrnoErrorf("Could not write cache info file {}", QuotePath(cache_info_filename_));
}
return {};
@@ -896,7 +889,7 @@ std::string OnDeviceRefresh::GetSystemBootImageFrameworkExtension() const {
std::string basename =
GetBootImageComponentBasename(framework_bcp_jars[0], /*is_first_jar=*/false);
// Typically "/system/framework/boot-framework.art".
- return "{}/framework/{}"_format(GetAndroidRoot(), basename);
+ return ART_FORMAT("{}/framework/{}", GetAndroidRoot(), basename);
}
std::string OnDeviceRefresh::GetSystemBootImageFrameworkExtensionPath(InstructionSet isa) const {
@@ -910,10 +903,10 @@ std::string OnDeviceRefresh::GetBootImageMainlineExtension(bool on_system) const
GetBootImageComponentBasename(mainline_bcp_jars[0], /*is_first_jar=*/false);
if (on_system) {
// Typically "/system/framework/boot-framework-adservices.art".
- return "{}/framework/{}"_format(GetAndroidRoot(), basename);
+ return ART_FORMAT("{}/framework/{}", GetAndroidRoot(), basename);
} else {
// Typically "/data/misc/apexdata/com.android.art/dalvik-cache/boot-framework-adservices.art".
- return "{}/{}"_format(config_.GetArtifactDirectory(), basename);
+ return ART_FORMAT("{}/{}", config_.GetArtifactDirectory(), basename);
}
}
@@ -964,7 +957,7 @@ std::string OnDeviceRefresh::GetSystemServerImagePath(bool on_system,
std::string image_name = ReplaceFileExtension(jar_name, "art");
const char* isa_str = GetInstructionSetString(config_.GetSystemServerIsa());
// Typically "/system/framework/oat/<isa>/services.art".
- return "{}/oat/{}/{}"_format(Dirname(jar_path), isa_str, image_name);
+ return ART_FORMAT("{}/oat/{}/{}", Dirname(jar_path), isa_str, image_name);
} else {
// Typically
// "/data/misc/apexdata/.../dalvik-cache/<isa>/system@framework@services.jar@classes.art".
@@ -1043,16 +1036,15 @@ WARN_UNUSED bool OnDeviceRefresh::CheckSystemPropertiesAreDefault() const {
std::end(kCheckedSystemPropertyPrefixes),
[](const char* prefix) { return StartsWith(prefix, "persist."); }));
- const std::unordered_map<std::string, std::string>& system_properties =
- config_.GetSystemProperties();
+ const OdrSystemProperties& system_properties = config_.GetSystemProperties();
for (const SystemPropertyConfig& system_property_config : *kSystemProperties.get()) {
- auto property = system_properties.find(system_property_config.name);
- DCHECK(property != system_properties.end());
+ std::string property = system_properties.GetOrEmpty(system_property_config.name);
+ DCHECK_NE(property, "");
- if (property->second != system_property_config.default_value) {
+ if (property != system_property_config.default_value) {
LOG(INFO) << "System property " << system_property_config.name << " has a non-default value ("
- << property->second << ").";
+ << property << ").";
return false;
}
}
@@ -1078,16 +1070,14 @@ WARN_UNUSED bool OnDeviceRefresh::CheckSystemPropertiesHaveNotChanged(
checked_properties.insert(pair.getK());
}
- const std::unordered_map<std::string, std::string>& system_properties =
- config_.GetSystemProperties();
+ const OdrSystemProperties& system_properties = config_.GetSystemProperties();
for (const auto& [key, value] : system_properties) {
checked_properties.insert(key);
}
for (const std::string& name : checked_properties) {
- auto property_it = system_properties.find(name);
- std::string property = property_it != system_properties.end() ? property_it->second : "";
+ std::string property = system_properties.GetOrEmpty(name);
std::string cached_property = cached_system_properties[name];
if (property != cached_property) {
@@ -1101,16 +1091,22 @@ WARN_UNUSED bool OnDeviceRefresh::CheckSystemPropertiesHaveNotChanged(
}
WARN_UNUSED bool OnDeviceRefresh::CheckBuildUserfaultFdGc() const {
- auto it = config_.GetSystemProperties().find("ro.dalvik.vm.enable_uffd_gc");
- bool build_enable_uffd_gc = it != config_.GetSystemProperties().end() ?
- ParseBool(it->second) == ParseBoolResult::kTrue :
- false;
+ bool build_enable_uffd_gc =
+ config_.GetSystemProperties().GetBool("ro.dalvik.vm.enable_uffd_gc", /*default_value=*/false);
+ bool is_at_least_t = IsAtLeastT();
bool kernel_supports_uffd = KernelSupportsUffd();
- if (build_enable_uffd_gc && !kernel_supports_uffd) {
- // Normally, this should not happen. If this happens, the system image was probably built with a
- // wrong PRODUCT_ENABLE_UFFD_GC flag.
- LOG(WARNING) << "Userfaultfd GC check failed (build-time: {}, runtime: {})."_format(
- build_enable_uffd_gc, kernel_supports_uffd);
+ if (!art::odrefresh::CheckBuildUserfaultFdGc(
+ build_enable_uffd_gc, is_at_least_t, kernel_supports_uffd)) {
+ // Assuming the system property reflects how the dexpreopted boot image was
+ // compiled, and it doesn't agree with runtime support, we need to recompile
+ // it. This happens if we're running on S, T or U, or if the system image
+ // was built with a wrong PRODUCT_ENABLE_UFFD_GC flag.
+ LOG(INFO) << ART_FORMAT(
+ "Userfaultfd GC check failed (build_enable_uffd_gc: {}, is_at_least_t: {}, "
+ "kernel_supports_uffd: {}).",
+ build_enable_uffd_gc,
+ is_at_least_t,
+ kernel_supports_uffd);
return false;
}
return true;
@@ -1152,14 +1148,18 @@ WARN_UNUSED PreconditionCheckResult OnDeviceRefresh::CheckPreconditionForSystem(
WARN_UNUSED static bool CheckModuleInfo(const art_apex::ModuleInfo& cached_info,
const apex::ApexInfo& current_info) {
if (cached_info.getVersionCode() != current_info.getVersionCode()) {
- LOG(INFO) << "APEX ({}) version code mismatch (before: {}, now: {})"_format(
- current_info.getModuleName(), cached_info.getVersionCode(), current_info.getVersionCode());
+ LOG(INFO) << ART_FORMAT("APEX ({}) version code mismatch (before: {}, now: {})",
+ current_info.getModuleName(),
+ cached_info.getVersionCode(),
+ current_info.getVersionCode());
return false;
}
if (cached_info.getVersionName() != current_info.getVersionName()) {
- LOG(INFO) << "APEX ({}) version name mismatch (before: {}, now: {})"_format(
- current_info.getModuleName(), cached_info.getVersionName(), current_info.getVersionName());
+ LOG(INFO) << ART_FORMAT("APEX ({}) version name mismatch (before: {}, now: {})",
+ current_info.getModuleName(),
+ cached_info.getVersionName(),
+ current_info.getVersionName());
return false;
}
@@ -1169,10 +1169,10 @@ WARN_UNUSED static bool CheckModuleInfo(const art_apex::ModuleInfo& cached_info,
const int64_t cached_last_update_millis =
cached_info.hasLastUpdateMillis() ? cached_info.getLastUpdateMillis() : -1;
if (cached_last_update_millis != current_info.getLastUpdateMillis()) {
- LOG(INFO) << "APEX ({}) last update time mismatch (before: {}, now: {})"_format(
- current_info.getModuleName(),
- cached_info.getLastUpdateMillis(),
- current_info.getLastUpdateMillis());
+ LOG(INFO) << ART_FORMAT("APEX ({}) last update time mismatch (before: {}, now: {})",
+ current_info.getModuleName(),
+ cached_info.getLastUpdateMillis(),
+ current_info.getLastUpdateMillis());
return false;
}
@@ -1356,12 +1356,12 @@ WARN_UNUSED BootImages OnDeviceRefresh::CheckBootClasspathArtifactsAreUpToDate(
}
if (boot_images_on_system.Count() == BootImages::kMaxCount) {
- LOG(INFO) << "Boot images on /system OK ({})"_format(isa_str);
+ LOG(INFO) << ART_FORMAT("Boot images on /system OK ({})", isa_str);
// Nothing to compile.
return BootImages{.primary_boot_image = false, .boot_image_mainline_extension = false};
}
- LOG(INFO) << "Checking boot images /data ({})"_format(isa_str);
+ LOG(INFO) << ART_FORMAT("Checking boot images /data ({})", isa_str);
BootImages boot_images_on_data{.primary_boot_image = false,
.boot_image_mainline_extension = false};
@@ -1378,7 +1378,7 @@ WARN_UNUSED BootImages OnDeviceRefresh::CheckBootClasspathArtifactsAreUpToDate(
// attempt to generate a full boot image even if the minimal one exists.
if (PrimaryBootImageExist(
/*on_system=*/false, /*minimal=*/true, isa, &error_msg, checked_artifacts)) {
- LOG(INFO) << "Found minimal primary boot image ({})"_format(isa_str);
+ LOG(INFO) << ART_FORMAT("Found minimal primary boot image ({})", isa_str);
}
}
} else {
@@ -1408,7 +1408,7 @@ WARN_UNUSED BootImages OnDeviceRefresh::CheckBootClasspathArtifactsAreUpToDate(
};
if (boot_images_to_generate.Count() == 0) {
- LOG(INFO) << "Boot images on /data OK ({})"_format(isa_str);
+ LOG(INFO) << ART_FORMAT("Boot images on /data OK ({})", isa_str);
}
return boot_images_to_generate;
@@ -1653,15 +1653,16 @@ WARN_UNUSED CompilationResult OnDeviceRefresh::RunDex2oat(
const std::vector<std::string>& boot_classpath,
const std::vector<std::string>& input_boot_images,
const OdrArtifacts& artifacts,
- const std::vector<std::string>& extra_args,
+ CmdlineBuilder&& extra_args,
/*inout*/ std::vector<std::unique_ptr<File>>& readonly_files_raii) const {
- std::vector<std::string> args;
- args.push_back(config_.GetDex2Oat());
+ CmdlineBuilder args;
+ args.Add(config_.GetDex2Oat());
AddDex2OatCommonOptions(args);
AddDex2OatDebugInfo(args);
AddDex2OatInstructionSet(args, isa);
- Result<void> result = AddDex2OatConcurrencyArguments(args, config_.GetCompilationOsMode());
+ Result<void> result = AddDex2OatConcurrencyArguments(
+ args, config_.GetCompilationOsMode(), config_.GetSystemProperties());
if (!result.ok()) {
return CompilationResult::Error(OdrMetrics::Status::kUnknown, result.error().message());
}
@@ -1674,26 +1675,33 @@ WARN_UNUSED CompilationResult OnDeviceRefresh::RunDex2oat(
for (const std::string& dex_file : dex_files) {
std::string actual_path = RewriteParentDirectoryIfNeeded(dex_file);
- args.emplace_back("--dex-file=" + dex_file);
+ args.Add("--dex-file=%s", dex_file);
std::unique_ptr<File> file(OS::OpenFileForReading(actual_path.c_str()));
- args.emplace_back(StringPrintf("--dex-fd=%d", file->Fd()));
+ if (file == nullptr) {
+ return CompilationResult::Error(
+ OdrMetrics::Status::kIoError,
+ ART_FORMAT("Failed to open dex file '{}': {}", actual_path, strerror(errno)));
+ }
+ args.Add("--dex-fd=%d", file->Fd());
readonly_files_raii.push_back(std::move(file));
}
- args.emplace_back("--runtime-arg");
- args.emplace_back("-Xbootclasspath:" + Join(boot_classpath, ":"));
+ args.AddRuntime("-Xbootclasspath:%s", Join(boot_classpath, ":"));
result = AddBootClasspathFds(args, readonly_files_raii, boot_classpath);
if (!result.ok()) {
return CompilationResult::Error(OdrMetrics::Status::kIoError, result.error().message());
}
if (!input_boot_images.empty()) {
- args.emplace_back("--boot-image=" + Join(input_boot_images, ':'));
- AddCompiledBootClasspathFdsIfAny(
+ args.Add("--boot-image=%s", Join(input_boot_images, ':'));
+ result = AddCompiledBootClasspathFdsIfAny(
args, readonly_files_raii, boot_classpath, isa, input_boot_images);
+ if (!result.ok()) {
+ return CompilationResult::Error(OdrMetrics::Status::kIoError, result.error().message());
+ }
}
- args.emplace_back("--oat-location=" + artifacts.OatPath());
+ args.Add("--oat-location=%s", artifacts.OatPath());
std::pair<std::string, const char*> location_kind_pairs[] = {
std::make_pair(artifacts.ImagePath(), artifacts.ImageKind()),
std::make_pair(artifacts.OatPath(), "oat"),
@@ -1705,12 +1713,12 @@ WARN_UNUSED CompilationResult OnDeviceRefresh::RunDex2oat(
if (staging_file == nullptr) {
return CompilationResult::Error(
OdrMetrics::Status::kIoError,
- "Failed to create {} file '{}'"_format(kind, staging_location));
+ ART_FORMAT("Failed to create {} file '{}': {}", kind, staging_location, strerror(errno)));
}
// Don't check the state of the staging file. It doesn't need to be flushed because it's removed
// after the compilation regardless of success or failure.
staging_file->MarkUnchecked();
- args.emplace_back(StringPrintf("--%s-fd=%d", kind, staging_file->Fd()));
+ args.Add(StringPrintf("--%s-fd=%d", kind, staging_file->Fd()));
staging_files.emplace_back(std::move(staging_file));
}
@@ -1718,35 +1726,36 @@ WARN_UNUSED CompilationResult OnDeviceRefresh::RunDex2oat(
if (!EnsureDirectoryExists(install_location)) {
return CompilationResult::Error(
OdrMetrics::Status::kIoError,
- "Error encountered when preparing directory '{}'"_format(install_location));
+ ART_FORMAT("Error encountered when preparing directory '{}'", install_location));
}
- std::copy(extra_args.begin(), extra_args.end(), std::back_inserter(args));
+ args.Concat(std::move(extra_args));
Timer timer;
time_t timeout = GetSubprocessTimeout();
- std::string cmd_line = Join(args, ' ');
- LOG(INFO) << "{}: {} [timeout {}s]"_format(debug_message, cmd_line, timeout);
+ std::string cmd_line = Join(args.Get(), ' ');
+ LOG(INFO) << ART_FORMAT("{}: {} [timeout {}s]", debug_message, cmd_line, timeout);
if (config_.GetDryRun()) {
LOG(INFO) << "Compilation skipped (dry-run).";
return CompilationResult::Ok();
}
std::string error_msg;
- ExecResult dex2oat_result = exec_utils_->ExecAndReturnResult(args, timeout, &error_msg);
+ ExecResult dex2oat_result = exec_utils_->ExecAndReturnResult(args.Get(), timeout, &error_msg);
if (dex2oat_result.exit_code != 0) {
return CompilationResult::Dex2oatError(
dex2oat_result.exit_code < 0 ?
error_msg :
- "dex2oat returned an unexpected code: {}"_format(dex2oat_result.exit_code),
+ ART_FORMAT("dex2oat returned an unexpected code: {}", dex2oat_result.exit_code),
timer.duration().count(),
dex2oat_result);
}
if (!MoveOrEraseFiles(staging_files, install_location)) {
- return CompilationResult::Error(OdrMetrics::Status::kIoError,
- "Failed to commit artifacts to '{}'"_format(install_location));
+ return CompilationResult::Error(
+ OdrMetrics::Status::kIoError,
+ ART_FORMAT("Failed to commit artifacts to '{}'", install_location));
}
return CompilationResult::Dex2oatOk(timer.duration().count(), dex2oat_result);
@@ -1760,61 +1769,75 @@ OnDeviceRefresh::RunDex2oatForBootClasspath(const std::string& staging_dir,
const std::vector<std::string>& boot_classpath,
const std::vector<std::string>& input_boot_images,
const std::string& output_path) const {
- std::vector<std::string> args;
+ CmdlineBuilder args;
std::vector<std::unique_ptr<File>> readonly_files_raii;
// Compile as a single image for fewer files and slightly less memory overhead.
- args.emplace_back("--single-image");
+ args.Add("--single-image");
if (input_boot_images.empty()) {
// Primary boot image.
std::string art_boot_profile_file = GetArtRoot() + "/etc/boot-image.prof";
std::string framework_boot_profile_file = GetAndroidRoot() + "/etc/boot-image.prof";
- bool has_any_profile = AddDex2OatProfile(
+ Result<bool> has_any_profile = AddDex2OatProfile(
args, readonly_files_raii, {art_boot_profile_file, framework_boot_profile_file});
- if (!has_any_profile) {
+ if (!has_any_profile.ok()) {
+ return CompilationResult::Error(OdrMetrics::Status::kIoError,
+ has_any_profile.error().message());
+ }
+ if (!*has_any_profile) {
return CompilationResult::Error(OdrMetrics::Status::kIoError, "Missing boot image profile");
}
const std::string& compiler_filter = config_.GetBootImageCompilerFilter();
if (!compiler_filter.empty()) {
- args.emplace_back("--compiler-filter=" + compiler_filter);
+ args.Add("--compiler-filter=%s", compiler_filter);
} else {
- args.emplace_back(StringPrintf("--compiler-filter=%s", kPrimaryCompilerFilter));
+ args.Add("--compiler-filter=%s", kPrimaryCompilerFilter);
}
- args.emplace_back(StringPrintf("--base=0x%08x", ART_BASE_ADDRESS));
+ args.Add(StringPrintf("--base=0x%08x", ART_BASE_ADDRESS));
std::string dirty_image_objects_file(GetAndroidRoot() + "/etc/dirty-image-objects");
- if (OS::FileExists(dirty_image_objects_file.c_str())) {
- std::unique_ptr<File> file(OS::OpenFileForReading(dirty_image_objects_file.c_str()));
- args.emplace_back(StringPrintf("--dirty-image-objects-fd=%d", file->Fd()));
+ std::unique_ptr<File> file(OS::OpenFileForReading(dirty_image_objects_file.c_str()));
+ if (file != nullptr) {
+ args.Add("--dirty-image-objects-fd=%d", file->Fd());
readonly_files_raii.push_back(std::move(file));
+ } else if (errno == ENOENT) {
+ LOG(WARNING) << ART_FORMAT("Missing dirty objects file '{}'", dirty_image_objects_file);
} else {
- LOG(WARNING) << "Missing dirty objects file: '{}'"_format(dirty_image_objects_file);
+ return CompilationResult::Error(OdrMetrics::Status::kIoError,
+ ART_FORMAT("Failed to open dirty objects file '{}': {}",
+ dirty_image_objects_file,
+ strerror(errno)));
}
std::string preloaded_classes_file(GetAndroidRoot() + "/etc/preloaded-classes");
- if (OS::FileExists(preloaded_classes_file.c_str())) {
- std::unique_ptr<File> file(OS::OpenFileForReading(preloaded_classes_file.c_str()));
- args.emplace_back(StringPrintf("--preloaded-classes-fds=%d", file->Fd()));
+ file.reset(OS::OpenFileForReading(preloaded_classes_file.c_str()));
+ if (file != nullptr) {
+ args.Add("--preloaded-classes-fds=%d", file->Fd());
readonly_files_raii.push_back(std::move(file));
+ } else if (errno == ENOENT) {
+ LOG(WARNING) << ART_FORMAT("Missing preloaded classes file '{}'", preloaded_classes_file);
} else {
- LOG(WARNING) << "Missing preloaded classes file: '{}'"_format(preloaded_classes_file);
+ return CompilationResult::Error(OdrMetrics::Status::kIoError,
+ ART_FORMAT("Failed to open preloaded classes file '{}': {}",
+ preloaded_classes_file,
+ strerror(errno)));
}
} else {
// Mainline extension.
- args.emplace_back(StringPrintf("--compiler-filter=%s", kMainlineCompilerFilter));
+ args.Add("--compiler-filter=%s", kMainlineCompilerFilter);
}
return RunDex2oat(
staging_dir,
- "Compiling boot classpath ({}, {})"_format(GetInstructionSetString(isa), debug_name),
+ ART_FORMAT("Compiling boot classpath ({}, {})", GetInstructionSetString(isa), debug_name),
isa,
dex_files,
boot_classpath,
input_boot_images,
OdrArtifacts::ForBootImage(output_path),
- args,
+ std::move(args),
readonly_files_raii);
}
@@ -1918,7 +1941,7 @@ WARN_UNUSED CompilationResult OnDeviceRefresh::RunDex2oatForSystemServer(
const std::string& staging_dir,
const std::string& dex_file,
const std::vector<std::string>& classloader_context) const {
- std::vector<std::string> args;
+ CmdlineBuilder args;
std::vector<std::unique_ptr<File>> readonly_files_raii;
InstructionSet isa = config_.GetSystemServerIsa();
std::string output_path = GetSystemServerImagePath(/*on_system=*/false, dex_file);
@@ -1927,46 +1950,54 @@ WARN_UNUSED CompilationResult OnDeviceRefresh::RunDex2oatForSystemServer(
std::string profile = actual_jar_path + ".prof";
const std::string& compiler_filter = config_.GetSystemServerCompilerFilter();
bool maybe_add_profile = !compiler_filter.empty() || HasVettedDeviceSystemServerProfiles();
- bool has_added_profile =
- maybe_add_profile && AddDex2OatProfile(args, readonly_files_raii, {profile});
+ bool has_added_profile = false;
+ if (maybe_add_profile) {
+ Result<bool> has_any_profile = AddDex2OatProfile(args, readonly_files_raii, {profile});
+ if (!has_any_profile.ok()) {
+ return CompilationResult::Error(OdrMetrics::Status::kIoError,
+ has_any_profile.error().message());
+ }
+ has_added_profile = *has_any_profile;
+ }
if (!compiler_filter.empty()) {
- args.emplace_back("--compiler-filter=" + compiler_filter);
+ args.Add("--compiler-filter=%s", compiler_filter);
} else if (has_added_profile) {
- args.emplace_back("--compiler-filter=speed-profile");
+ args.Add("--compiler-filter=speed-profile");
} else {
- args.emplace_back("--compiler-filter=speed");
+ args.Add("--compiler-filter=speed");
}
std::string context_path = Join(classloader_context, ':');
if (art::ContainsElement(systemserver_classpath_jars_, dex_file)) {
- args.emplace_back("--class-loader-context=PCL[" + context_path + "]");
+ args.Add("--class-loader-context=PCL[%s]", context_path);
} else {
- args.emplace_back("--class-loader-context=PCL[];PCL[" + context_path + "]");
+ args.Add("--class-loader-context=PCL[];PCL[%s]", context_path);
}
if (!classloader_context.empty()) {
std::vector<int> fds;
for (const std::string& path : classloader_context) {
std::string actual_path = RewriteParentDirectoryIfNeeded(path);
std::unique_ptr<File> file(OS::OpenFileForReading(actual_path.c_str()));
- if (!file->IsValid()) {
+ if (file == nullptr) {
return CompilationResult::Error(
OdrMetrics::Status::kIoError,
- "Failed to open classloader context '{}': {}"_format(actual_path, strerror(errno)));
+ ART_FORMAT(
+ "Failed to open classloader context '{}': {}", actual_path, strerror(errno)));
}
fds.emplace_back(file->Fd());
readonly_files_raii.emplace_back(std::move(file));
}
- args.emplace_back("--class-loader-context-fds=" + Join(fds, ':'));
+ args.Add("--class-loader-context-fds=%s", Join(fds, ':'));
}
return RunDex2oat(staging_dir,
- "Compiling {}"_format(Basename(dex_file)),
+ ART_FORMAT("Compiling {}", Basename(dex_file)),
isa,
{dex_file},
boot_classpath_jars_,
GetBestBootImages(isa, /*include_mainline_extension=*/true),
OdrArtifacts::ForSystemServer(output_path),
- args,
+ std::move(args),
readonly_files_raii);
}
@@ -1993,7 +2024,7 @@ OnDeviceRefresh::CompileSystemServer(const std::string& staging_dir,
if (current_result.IsOk()) {
on_dex2oat_success();
} else {
- LOG(ERROR) << "Compilation of {} failed: {}"_format(Basename(jar), result.error_msg);
+ LOG(ERROR) << ART_FORMAT("Compilation of {} failed: {}", Basename(jar), result.error_msg);
}
}
diff --git a/odrefresh/odrefresh.h b/odrefresh/odrefresh.h
index 93812eb020..9496afedcc 100644
--- a/odrefresh/odrefresh.h
+++ b/odrefresh/odrefresh.h
@@ -35,6 +35,7 @@
#include "odr_config.h"
#include "odr_metrics.h"
#include "odrefresh/odrefresh.h"
+#include "tools/cmdline_builder.h"
namespace art {
namespace odrefresh {
@@ -331,7 +332,7 @@ class OnDeviceRefresh final {
const std::vector<std::string>& boot_classpath,
const std::vector<std::string>& input_boot_images,
const OdrArtifacts& artifacts,
- const std::vector<std::string>& extra_args,
+ tools::CmdlineBuilder&& extra_args,
/*inout*/ std::vector<std::unique_ptr<File>>& readonly_files_raii) const;
WARN_UNUSED CompilationResult
diff --git a/odrefresh/odrefresh_test.cc b/odrefresh/odrefresh_test.cc
index 8ebeeab6bb..5ee92a737c 100644
--- a/odrefresh/odrefresh_test.cc
+++ b/odrefresh/odrefresh_test.cc
@@ -33,9 +33,9 @@
#include "arch/instruction_set.h"
#include "base/common_art_test.h"
#include "base/file_utils.h"
+#include "base/macros.h"
#include "base/stl_util.h"
#include "exec_utils.h"
-#include "fmt/format.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "odr_artifacts.h"
@@ -58,8 +58,6 @@ using ::testing::Not;
using ::testing::ResultOf;
using ::testing::Return;
-using ::fmt::literals::operator""_format; // NOLINT
-
constexpr int kReplace = 1;
void CreateEmptyFile(const std::string& name) {
@@ -438,18 +436,18 @@ TEST_F(OdRefreshTest, AllSystemServerJars) {
.WillOnce(Return(0));
EXPECT_CALL(
*mock_exec_utils_,
- DoExecAndReturnCode(
- AllOf(Contains(Flag("--dex-file=", services_jar_)),
- Contains(Flag("--class-loader-context=", "PCL[{}]"_format(location_provider_jar_))),
- Contains(Flag("--class-loader-context-fds=", FdOf(location_provider_jar_))),
- Contains(Flag("--cache-info-fd=", FdOf(cache_info_xml_))))))
+ DoExecAndReturnCode(AllOf(
+ Contains(Flag("--dex-file=", services_jar_)),
+ Contains(Flag("--class-loader-context=", ART_FORMAT("PCL[{}]", location_provider_jar_))),
+ Contains(Flag("--class-loader-context-fds=", FdOf(location_provider_jar_))),
+ Contains(Flag("--cache-info-fd=", FdOf(cache_info_xml_))))))
.WillOnce(Return(0));
EXPECT_CALL(
*mock_exec_utils_,
DoExecAndReturnCode(AllOf(
Contains(Flag("--dex-file=", services_foo_jar_)),
Contains(Flag("--class-loader-context=",
- "PCL[];PCL[{}:{}]"_format(location_provider_jar_, services_jar_))),
+ ART_FORMAT("PCL[];PCL[{}:{}]", location_provider_jar_, services_jar_))),
Contains(ListFlag("--class-loader-context-fds=",
ElementsAre(FdOf(location_provider_jar_), FdOf(services_jar_)))),
Contains(Flag("--cache-info-fd=", FdOf(cache_info_xml_))))))
@@ -459,7 +457,7 @@ TEST_F(OdRefreshTest, AllSystemServerJars) {
DoExecAndReturnCode(AllOf(
Contains(Flag("--dex-file=", services_bar_jar_)),
Contains(Flag("--class-loader-context=",
- "PCL[];PCL[{}:{}]"_format(location_provider_jar_, services_jar_))),
+ ART_FORMAT("PCL[];PCL[{}:{}]", location_provider_jar_, services_jar_))),
Contains(ListFlag("--class-loader-context-fds=",
ElementsAre(FdOf(location_provider_jar_), FdOf(services_jar_)))),
Contains(Flag("--cache-info-fd=", FdOf(cache_info_xml_))))))
@@ -476,17 +474,17 @@ TEST_F(OdRefreshTest, AllSystemServerJars) {
TEST_F(OdRefreshTest, PartialSystemServerJars) {
EXPECT_CALL(
*mock_exec_utils_,
- DoExecAndReturnCode(
- AllOf(Contains(Flag("--dex-file=", services_jar_)),
- Contains(Flag("--class-loader-context=", "PCL[{}]"_format(location_provider_jar_))),
- Contains(Flag("--class-loader-context-fds=", FdOf(location_provider_jar_))))))
+ DoExecAndReturnCode(AllOf(
+ Contains(Flag("--dex-file=", services_jar_)),
+ Contains(Flag("--class-loader-context=", ART_FORMAT("PCL[{}]", location_provider_jar_))),
+ Contains(Flag("--class-loader-context-fds=", FdOf(location_provider_jar_))))))
.WillOnce(Return(0));
EXPECT_CALL(
*mock_exec_utils_,
DoExecAndReturnCode(AllOf(
Contains(Flag("--dex-file=", services_bar_jar_)),
Contains(Flag("--class-loader-context=",
- "PCL[];PCL[{}:{}]"_format(location_provider_jar_, services_jar_))),
+ ART_FORMAT("PCL[];PCL[{}:{}]", location_provider_jar_, services_jar_))),
Contains(ListFlag("--class-loader-context-fds=",
ElementsAre(FdOf(location_provider_jar_), FdOf(services_jar_)))))))
.WillOnce(Return(0));
diff --git a/perfetto_hprof/perfetto_hprof.cc b/perfetto_hprof/perfetto_hprof.cc
index 906362ab29..e805cf37f1 100644
--- a/perfetto_hprof/perfetto_hprof.cc
+++ b/perfetto_hprof/perfetto_hprof.cc
@@ -507,8 +507,10 @@ perfetto::protos::pbzero::HeapGraphType::Kind ProtoClassKind(uint32_t class_flag
using perfetto::protos::pbzero::HeapGraphType;
switch (class_flags) {
case art::mirror::kClassFlagNormal:
+ case art::mirror::kClassFlagRecord:
return HeapGraphType::KIND_NORMAL;
case art::mirror::kClassFlagNoReferenceFields:
+ case art::mirror::kClassFlagNoReferenceFields | art::mirror::kClassFlagRecord:
return HeapGraphType::KIND_NOREFERENCES;
case art::mirror::kClassFlagString | art::mirror::kClassFlagNoReferenceFields:
return HeapGraphType::KIND_STRING;
diff --git a/profman/include/profman/profman_result.h b/profman/include/profman/profman_result.h
index 9c9aca9e05..c4ca98828d 100644
--- a/profman/include/profman/profman_result.h
+++ b/profman/include/profman/profman_result.h
@@ -40,9 +40,13 @@ class ProfmanResult {
kSuccess = 0,
// A merge has been performed, meaning the reference profile has been changed.
kCompile = 1,
- // `--profile-file(-fd)` is not specified, or the specified profiles are outdated (i.e., APK
- // filename or checksum mismatch), empty, or don't contain enough number of new classes and
- // methods that meets the threshold to trigger a merge.
+ // One of the following conditions is met:
+ // - `--profile-file(-fd)` is not specified.
+ // - The specified profiles are outdated (i.e., APK filename or checksum mismatch).
+ // - The specified profiles are empty.
+ // - The specified profiles don't contain any new class or method.
+ // - The specified profiles don't contain enough number of new classes and methods that meets
+ // the threshold to trigger a merge, and `--force-merge-and-analyze` is not set.
kSkipCompilationSmallDelta = 2,
// All the input profiles (including the reference profile) are either outdated (i.e., APK
// filename or checksum mismatch) or empty.
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index abbde2d527..4d98e4927d 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -54,7 +54,7 @@ ProfmanResult::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
ProfileCompilationInfo cur_info(options.IsBootImageMerge());
if (!cur_info.Load(profile_files[i]->Fd(), /*merge_classes=*/ true, filter_fn)) {
LOG(WARNING) << "Could not load profile file at index " << i;
- if (options.IsForceMerge()) {
+ if (options.IsForceMerge() || options.IsForceMergeAndAnalyze()) {
// If we have to merge forcefully, ignore load failures.
// This is useful for boot image profiles to ignore stale profiles which are
// cleared lazily.
@@ -79,17 +79,30 @@ ProfmanResult::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
if (info.IsEmpty()) {
return ProfmanResult::kSkipCompilationEmptyProfiles;
}
- uint32_t min_change_in_methods_for_compilation = std::max(
- (options.GetMinNewMethodsPercentChangeForCompilation() * number_of_methods) / 100,
- kMinNewMethodsForCompilation);
- uint32_t min_change_in_classes_for_compilation = std::max(
- (options.GetMinNewClassesPercentChangeForCompilation() * number_of_classes) / 100,
- kMinNewClassesForCompilation);
- // Check if there is enough new information added by the current profiles.
- if (((info.GetNumberOfMethods() - number_of_methods) < min_change_in_methods_for_compilation) &&
- ((info.GetNumberOfResolvedClasses() - number_of_classes)
- < min_change_in_classes_for_compilation)) {
- return ProfmanResult::kSkipCompilationSmallDelta;
+
+ if (options.IsForceMergeAndAnalyze()) {
+ // When we force merge and analyze, we want to always recompile unless there is absolutely no
+ // difference between before and after the merge (i.e., the classes and methods in the
+ // reference profile were already a superset of those in all current profiles before the
+ // merge.)
+ if (info.GetNumberOfMethods() == number_of_methods &&
+ info.GetNumberOfResolvedClasses() == number_of_classes) {
+ return ProfmanResult::kSkipCompilationSmallDelta;
+ }
+ } else {
+ uint32_t min_change_in_methods_for_compilation = std::max(
+ (options.GetMinNewMethodsPercentChangeForCompilation() * number_of_methods) / 100,
+ kMinNewMethodsForCompilation);
+ uint32_t min_change_in_classes_for_compilation = std::max(
+ (options.GetMinNewClassesPercentChangeForCompilation() * number_of_classes) / 100,
+ kMinNewClassesForCompilation);
+ // Check if there is enough new information added by the current profiles.
+ if (((info.GetNumberOfMethods() - number_of_methods) <
+ min_change_in_methods_for_compilation) &&
+ ((info.GetNumberOfResolvedClasses() - number_of_classes) <
+ min_change_in_classes_for_compilation)) {
+ return ProfmanResult::kSkipCompilationSmallDelta;
+ }
}
}
diff --git a/profman/profile_assistant.h b/profman/profile_assistant.h
index 6b7a7a67f5..4caf86b3e2 100644
--- a/profman/profile_assistant.h
+++ b/profman/profile_assistant.h
@@ -32,11 +32,12 @@ class ProfileAssistant {
public:
static constexpr bool kForceMergeDefault = false;
static constexpr bool kBootImageMergeDefault = false;
- static constexpr uint32_t kMinNewMethodsPercentChangeForCompilation = 20;
- static constexpr uint32_t kMinNewClassesPercentChangeForCompilation = 20;
+ static constexpr uint32_t kMinNewMethodsPercentChangeForCompilation = 2;
+ static constexpr uint32_t kMinNewClassesPercentChangeForCompilation = 2;
Options()
: force_merge_(kForceMergeDefault),
+ force_merge_and_analyze_(kForceMergeDefault),
boot_image_merge_(kBootImageMergeDefault),
min_new_methods_percent_change_for_compilation_(
kMinNewMethodsPercentChangeForCompilation),
@@ -44,7 +45,9 @@ class ProfileAssistant {
kMinNewClassesPercentChangeForCompilation) {
}
+ // Only for S and T uses. U+ should use `IsForceMergeAndAnalyze`.
bool IsForceMerge() const { return force_merge_; }
+ bool IsForceMergeAndAnalyze() const { return force_merge_and_analyze_; }
bool IsBootImageMerge() const { return boot_image_merge_; }
uint32_t GetMinNewMethodsPercentChangeForCompilation() const {
return min_new_methods_percent_change_for_compilation_;
@@ -54,6 +57,7 @@ class ProfileAssistant {
}
void SetForceMerge(bool value) { force_merge_ = value; }
+ void SetForceMergeAndAnalyze(bool value) { force_merge_and_analyze_ = value; }
void SetBootImageMerge(bool value) { boot_image_merge_ = value; }
void SetMinNewMethodsPercentChangeForCompilation(uint32_t value) {
min_new_methods_percent_change_for_compilation_ = value;
@@ -63,10 +67,15 @@ class ProfileAssistant {
}
private:
- // If true, performs a forced merge, without analyzing if there is a
- // significant difference between the current profile and the reference profile.
+ // If true, performs a forced merge, without analyzing if there is a significant difference
+ // between before and after the merge.
// See ProfileAssistant#ProcessProfile.
+ // Only for S and T uses. U+ should use `force_merge_and_analyze_`.
bool force_merge_;
+ // If true, performs a forced merge and analyzes if there is any difference between before and
+ // after the merge.
+ // See ProfileAssistant#ProcessProfile.
+ bool force_merge_and_analyze_;
// Signals that the merge is for boot image profiles. It will ignore differences
// in profile versions (instead of aborting).
bool boot_image_merge_;
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index f7c4255071..93766e5d52 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -684,16 +684,6 @@ TEST_F(ProfileAssistantTest, ShouldAdviseCompilationMethodPercentage) {
kNumberOfMethodsInCurProfile, kNumberOfMethodsInRefProfile, extra_args));
}
-TEST_F(ProfileAssistantTest, DoNotAdviseCompilationMethodPercentageWithNewMin) {
- const uint16_t kNumberOfMethodsInRefProfile = 6000;
- const uint16_t kNumberOfMethodsInCurProfile = 6200; // Threshold is 20%.
-
- // We should not advise compilation.
- ASSERT_EQ(ProfmanResult::kSkipCompilationSmallDelta,
- CheckCompilationMethodPercentChange(kNumberOfMethodsInCurProfile,
- kNumberOfMethodsInRefProfile));
-}
-
TEST_F(ProfileAssistantTest, DoNotAdviseCompilationClassPercentage) {
const uint16_t kNumberOfClassesInRefProfile = 6000;
const uint16_t kNumberOfClassesInCurProfile = 6110; // Threshold is 2%.
@@ -716,16 +706,6 @@ TEST_F(ProfileAssistantTest, ShouldAdviseCompilationClassPercentage) {
kNumberOfClassesInCurProfile, kNumberOfClassesInRefProfile, extra_args));
}
-TEST_F(ProfileAssistantTest, DoNotAdviseCompilationClassPercentageWithNewMin) {
- const uint16_t kNumberOfClassesInRefProfile = 6000;
- const uint16_t kNumberOfClassesInCurProfile = 6200; // Threshold is 20%.
-
- // We should not advise compilation.
- ASSERT_EQ(ProfmanResult::kSkipCompilationSmallDelta,
- CheckCompilationClassPercentChange(kNumberOfClassesInCurProfile,
- kNumberOfClassesInRefProfile));
-}
-
TEST_F(ProfileAssistantTest, FailProcessingBecauseOfProfiles) {
ScratchFile profile1;
ScratchFile profile2;
@@ -2078,38 +2058,60 @@ TEST_F(ProfileAssistantTest, CopyAndUpdateProfileKeyNoUpdate) {
}
TEST_F(ProfileAssistantTest, BootImageMerge) {
- ScratchFile profile;
- ScratchFile reference_profile;
- std::vector<int> profile_fds({GetFd(profile)});
- int reference_profile_fd = GetFd(reference_profile);
- std::vector<uint32_t> hot_methods_cur;
- std::vector<uint32_t> hot_methods_ref;
- std::vector<uint32_t> empty_vector;
- size_t num_methods = 100;
- for (size_t i = 0; i < num_methods; ++i) {
- hot_methods_cur.push_back(i);
+ ScratchFile profile1;
+ ScratchFile profile2;
+ ScratchFile profile3;
+ ScratchFile output_profile;
+ std::vector<uint32_t> hot_methods_1;
+ std::vector<uint32_t> hot_methods_2;
+ std::vector<uint32_t> hot_methods_3;
+ for (size_t i = 0; i < 100; ++i) {
+ hot_methods_1.push_back(i);
+ }
+ for (size_t i = 50; i < 150; ++i) {
+ hot_methods_2.push_back(i);
}
- for (size_t i = 0; i < num_methods; ++i) {
- hot_methods_ref.push_back(i);
+ for (size_t i = 100; i < 200; ++i) {
+ hot_methods_3.push_back(i);
}
- ProfileCompilationInfo info1(/*for_boot_image=*/ true);
- SetupBasicProfile(dex1, hot_methods_cur, empty_vector, empty_vector,
- profile, &info1);
+ ProfileCompilationInfo info1(/*for_boot_image=*/false);
+ SetupBasicProfile(
+ dex1, hot_methods_1, /*startup_methods=*/{}, /*post_startup_methods=*/{}, profile1, &info1);
ProfileCompilationInfo info2(/*for_boot_image=*/true);
- SetupBasicProfile(dex1, hot_methods_ref, empty_vector, empty_vector,
- reference_profile, &info2);
+ SetupBasicProfile(
+ dex1, hot_methods_2, /*startup_methods=*/{}, /*post_startup_methods=*/{}, profile2, &info2);
+ ProfileCompilationInfo info3(/*for_boot_image=*/true);
+ SetupBasicProfile(
+ dex1, hot_methods_3, /*startup_methods=*/{}, /*post_startup_methods=*/{}, profile3, &info3);
- std::vector<const std::string> extra_args({"--force-merge", "--boot-image-merge"});
-
- int return_code = ProcessProfiles(profile_fds, reference_profile_fd, extra_args);
-
- ASSERT_EQ(return_code, ProfmanResult::kSuccess);
+ {
+ int return_code = ProcessProfiles({profile1.GetFd(), profile2.GetFd(), profile3.GetFd()},
+ output_profile.GetFd(),
+ {"--force-merge-and-analyze", "--boot-image-merge"});
+ ASSERT_EQ(return_code, ProfmanResult::kCompile);
+
+ // Verify the result: it should be equal to info2 union info3 since info1 is a regular profile
+ // and should be ignored.
+ ProfileCompilationInfo result(/*for_boot_image=*/true);
+ ASSERT_TRUE(result.Load(output_profile.GetFd()));
+ ASSERT_TRUE(info2.MergeWith(info3));
+ ASSERT_TRUE(result.Equals(info2));
+ }
- // Verify the result: it should be equal to info2 since info1 is a regular profile
- // and should be ignored.
- ProfileCompilationInfo result(/*for_boot_image=*/ true);
- ASSERT_TRUE(result.Load(reference_profile.GetFd()));
- ASSERT_TRUE(result.Equals(info2));
+ // Same for the legacy force merge mode.
+ {
+ int return_code = ProcessProfiles({profile1.GetFd(), profile2.GetFd(), profile3.GetFd()},
+ output_profile.GetFd(),
+ {"--force-merge", "--boot-image-merge"});
+ ASSERT_EQ(return_code, ProfmanResult::kSuccess);
+
+ // Verify the result: it should be equal to info2 union info3 since info1 is a regular profile
+ // and should be ignored.
+ ProfileCompilationInfo result(/*for_boot_image=*/true);
+ ASSERT_TRUE(result.Load(output_profile.GetFd()));
+ ASSERT_TRUE(info2.MergeWith(info3));
+ ASSERT_TRUE(result.Equals(info2));
+ }
}
// Under default behaviour we should not advice compilation
@@ -2154,6 +2156,82 @@ TEST_F(ProfileAssistantTest, ForceMerge) {
ASSERT_TRUE(result.Equals(info1));
}
+TEST_F(ProfileAssistantTest, ForceMergeAndAnalyze) {
+ const uint16_t kNumberOfMethodsInRefProfile = 600;
+ const uint16_t kNumberOfMethodsInCurProfile = 601;
+
+ ScratchFile ref_profile;
+ ScratchFile cur_profile;
+
+ ProfileCompilationInfo ref_info;
+ SetupProfile(
+ dex1, dex2, kNumberOfMethodsInRefProfile, /*number_of_classes=*/0, ref_profile, &ref_info);
+ ProfileCompilationInfo cur_info;
+ SetupProfile(
+ dex1, dex2, kNumberOfMethodsInCurProfile, /*number_of_classes=*/0, cur_profile, &cur_info);
+
+ std::vector<const std::string> extra_args({"--force-merge-and-analyze"});
+ int return_code = ProcessProfiles({cur_profile.GetFd()}, ref_profile.GetFd(), extra_args);
+
+ ASSERT_EQ(return_code, ProfmanResult::kCompile);
+
+ // Check that the result is the aggregation.
+ ProfileCompilationInfo result;
+ ASSERT_TRUE(result.Load(ref_profile.GetFd()));
+ ASSERT_TRUE(ref_info.MergeWith(cur_info));
+ ASSERT_TRUE(result.Equals(ref_info));
+}
+
+TEST_F(ProfileAssistantTest, ForceMergeAndAnalyzeNoDelta) {
+ const uint16_t kNumberOfMethodsInRefProfile = 600;
+ const uint16_t kNumberOfMethodsInCurProfile = 600;
+
+ ScratchFile ref_profile;
+ ScratchFile cur_profile;
+
+ ProfileCompilationInfo ref_info;
+ SetupProfile(
+ dex1, dex2, kNumberOfMethodsInRefProfile, /*number_of_classes=*/0, ref_profile, &ref_info);
+ ProfileCompilationInfo cur_info;
+ SetupProfile(
+ dex1, dex2, kNumberOfMethodsInCurProfile, /*number_of_classes=*/0, cur_profile, &cur_info);
+
+ std::vector<const std::string> extra_args({"--force-merge-and-analyze"});
+ int return_code = ProcessProfiles({cur_profile.GetFd()}, ref_profile.GetFd(), extra_args);
+
+ ASSERT_EQ(return_code, ProfmanResult::kSkipCompilationSmallDelta);
+
+ // Check that the reference profile is unchanged.
+ ProfileCompilationInfo result;
+ ASSERT_TRUE(result.Load(ref_profile.GetFd()));
+ ASSERT_TRUE(result.Equals(ref_info));
+}
+
+TEST_F(ProfileAssistantTest, ForceMergeAndAnalyzeEmptyProfiles) {
+ const uint16_t kNumberOfMethodsInRefProfile = 0;
+ const uint16_t kNumberOfMethodsInCurProfile = 0;
+
+ ScratchFile ref_profile;
+ ScratchFile cur_profile;
+
+ ProfileCompilationInfo ref_info;
+ SetupProfile(
+ dex1, dex2, kNumberOfMethodsInRefProfile, /*number_of_classes=*/0, ref_profile, &ref_info);
+ ProfileCompilationInfo cur_info;
+ SetupProfile(
+ dex1, dex2, kNumberOfMethodsInCurProfile, /*number_of_classes=*/0, cur_profile, &cur_info);
+
+ std::vector<const std::string> extra_args({"--force-merge-and-analyze"});
+ int return_code = ProcessProfiles({cur_profile.GetFd()}, ref_profile.GetFd(), extra_args);
+
+ ASSERT_EQ(return_code, ProfmanResult::kSkipCompilationEmptyProfiles);
+
+ // Check that the reference profile is unchanged.
+ ProfileCompilationInfo result;
+ ASSERT_TRUE(result.Load(ref_profile.GetFd()));
+ ASSERT_TRUE(result.Equals(ref_info));
+}
+
// Test that we consider the annations when we merge boot image profiles.
TEST_F(ProfileAssistantTest, BootImageMergeWithAnnotations) {
ScratchFile profile;
@@ -2236,25 +2314,40 @@ TEST_F(ProfileAssistantTest, ForceMergeIgnoreProfilesItCannotLoad) {
std::string content = "giberish";
ASSERT_TRUE(profile1.GetFile()->WriteFully(content.c_str(), content.length()));
- ProfileCompilationInfo info2(/*for_boot_image=*/ true);
+ ProfileCompilationInfo info2(/*for_boot_image=*/true);
info2.Save(profile2.GetFd());
- std::vector<int> profile_fds({ GetFd(profile1)});
+ std::vector<int> profile_fds({GetFd(profile1)});
int reference_profile_fd = GetFd(profile2);
// With force-merge we should merge successfully.
- std::vector<const std::string> extra_args({"--force-merge", "--boot-image-merge"});
- ASSERT_EQ(ProcessProfiles(profile_fds, reference_profile_fd, extra_args),
- ProfmanResult::kSuccess);
+ {
+ ASSERT_EQ(
+ ProcessProfiles(
+ profile_fds, reference_profile_fd, {"--force-merge-and-analyze", "--boot-image-merge"}),
+ ProfmanResult::kSkipCompilationEmptyProfiles);
+
+ ProfileCompilationInfo result(/*for_boot_image=*/true);
+ ASSERT_TRUE(result.Load(reference_profile_fd));
+ ASSERT_TRUE(info2.Equals(result));
+ }
- ProfileCompilationInfo result(/*for_boot_image=*/ true);
- ASSERT_TRUE(result.Load(reference_profile_fd));
- ASSERT_TRUE(info2.Equals(result));
+ // Same for the legacy force merge mode.
+ {
+ ASSERT_EQ(
+ ProcessProfiles(profile_fds, reference_profile_fd, {"--force-merge", "--boot-image-merge"}),
+ ProfmanResult::kSuccess);
+
+ ProfileCompilationInfo result(/*for_boot_image=*/true);
+ ASSERT_TRUE(result.Load(reference_profile_fd));
+ ASSERT_TRUE(info2.Equals(result));
+ }
// Without force-merge we should fail.
- std::vector<const std::string> extra_args2({"--boot-image-merge"});
- ASSERT_EQ(ProcessProfiles(profile_fds, reference_profile_fd, extra_args2),
- ProfmanResult::kErrorBadProfiles);
+ {
+ ASSERT_EQ(ProcessProfiles(profile_fds, reference_profile_fd, {"--boot-image-merge"}),
+ ProfmanResult::kErrorBadProfiles);
+ }
}
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index 375a489821..0958f4b01c 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -190,10 +190,13 @@ NO_RETURN static void Usage(const char *fmt, ...) {
UsageError(" --boot-image-merge: indicates that this merge is for a boot image profile.");
UsageError(" In this case, the reference profile must have a boot profile version.");
UsageError(" --force-merge: performs a forced merge, without analyzing if there is a");
- UsageError(" significant difference between the current profile and the reference profile.");
- UsageError(" --min-new-methods-percent-change=percentage between 0 and 100 (default 20)");
+ UsageError(" significant difference between before and after the merge.");
+ UsageError(" Deprecated. Use --force-merge-and-analyze instead.");
+ UsageError(" --force-merge-and-analyze: performs a forced merge and analyzes if there is any");
+ UsageError(" difference between before and after the merge.");
+ UsageError(" --min-new-methods-percent-change=percentage between 0 and 100 (default 2)");
UsageError(" the min percent of new methods to trigger a compilation.");
- UsageError(" --min-new-classes-percent-change=percentage between 0 and 100 (default 20)");
+ UsageError(" --min-new-classes-percent-change=percentage between 0 and 100 (default 2)");
UsageError(" the min percent of new classes to trigger a compilation.");
UsageError("");
@@ -469,7 +472,11 @@ class ProfMan final {
} else if (option == "--boot-image-merge") {
profile_assistant_options_.SetBootImageMerge(true);
} else if (option == "--force-merge") {
+ // For backward compatibility only.
+ // TODO(jiakaiz): Remove this when S and T are no longer supported.
profile_assistant_options_.SetForceMerge(true);
+ } else if (option == "--force-merge-and-analyze") {
+ profile_assistant_options_.SetForceMergeAndAnalyze(true);
} else {
Usage("Unknown argument '%s'", raw_option);
}
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 1c4b8714e1..3c3b9f8681 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -449,6 +449,7 @@ libart_cc_defaults {
"heapprofd_client_api",
],
static_libs: [
+ "libmodules-utils-build",
"libstatslog_art",
],
generated_sources: [
diff --git a/runtime/base/gc_visited_arena_pool.cc b/runtime/base/gc_visited_arena_pool.cc
index 52b3829401..4b1494fa51 100644
--- a/runtime/base/gc_visited_arena_pool.cc
+++ b/runtime/base/gc_visited_arena_pool.cc
@@ -27,43 +27,67 @@
namespace art {
-TrackedArena::TrackedArena(uint8_t* start, size_t size, bool pre_zygote_fork)
- : Arena(), first_obj_array_(nullptr), pre_zygote_fork_(pre_zygote_fork) {
+TrackedArena::TrackedArena(uint8_t* start, size_t size, bool pre_zygote_fork, bool single_obj_arena)
+ : Arena(),
+ first_obj_array_(nullptr),
+ pre_zygote_fork_(pre_zygote_fork),
+ waiting_for_deletion_(false) {
static_assert(ArenaAllocator::kArenaAlignment <= kPageSize,
"Arena should not need stronger alignment than kPageSize.");
- DCHECK_ALIGNED(size, kPageSize);
- DCHECK_ALIGNED(start, kPageSize);
memory_ = start;
size_ = size;
- size_t arr_size = size / kPageSize;
- first_obj_array_.reset(new uint8_t*[arr_size]);
- std::fill_n(first_obj_array_.get(), arr_size, nullptr);
+ if (single_obj_arena) {
+ // We have only one object in this arena and it is expected to consume the
+ // entire arena.
+ bytes_allocated_ = size;
+ } else {
+ DCHECK_ALIGNED(size, kPageSize);
+ DCHECK_ALIGNED(start, kPageSize);
+ size_t arr_size = size / kPageSize;
+ first_obj_array_.reset(new uint8_t*[arr_size]);
+ std::fill_n(first_obj_array_.get(), arr_size, nullptr);
+ }
+}
+
+void TrackedArena::ReleasePages(uint8_t* begin, size_t size, bool pre_zygote_fork) {
+ DCHECK_ALIGNED(begin, kPageSize);
+ // Userfaultfd GC uses MAP_SHARED mappings for linear-alloc and therefore
+ // MADV_DONTNEED will not free the pages from page cache. Therefore use
+ // MADV_REMOVE instead, which is meant for this purpose.
+ // Arenas allocated pre-zygote fork are private anonymous and hence must be
+ // released using MADV_DONTNEED.
+ if (!gUseUserfaultfd || pre_zygote_fork ||
+ (madvise(begin, size, MADV_REMOVE) == -1 && errno == EINVAL)) {
+ // MADV_REMOVE fails if invoked on anonymous mapping, which could happen
+ // if the arena is released before userfaultfd-GC starts using memfd. So
+ // use MADV_DONTNEED.
+ ZeroAndReleasePages(begin, size);
+ }
}
void TrackedArena::Release() {
if (bytes_allocated_ > 0) {
- // Userfaultfd GC uses MAP_SHARED mappings for linear-alloc and therefore
- // MADV_DONTNEED will not free the pages from page cache. Therefore use
- // MADV_REMOVE instead, which is meant for this purpose.
- // Arenas allocated pre-zygote fork are private anonymous and hence must be
- // released using MADV_DONTNEED.
- if (!gUseUserfaultfd || pre_zygote_fork_ ||
- (madvise(Begin(), Size(), MADV_REMOVE) == -1 && errno == EINVAL)) {
- // MADV_REMOVE fails if invoked on anonymous mapping, which could happen
- // if the arena is released before userfaultfd-GC starts using memfd. So
- // use MADV_DONTNEED.
- ZeroAndReleasePages(Begin(), Size());
+ ReleasePages(Begin(), Size(), pre_zygote_fork_);
+ if (first_obj_array_.get() != nullptr) {
+ std::fill_n(first_obj_array_.get(), Size() / kPageSize, nullptr);
}
- std::fill_n(first_obj_array_.get(), Size() / kPageSize, nullptr);
bytes_allocated_ = 0;
}
}
void TrackedArena::SetFirstObject(uint8_t* obj_begin, uint8_t* obj_end) {
+ DCHECK(first_obj_array_.get() != nullptr);
DCHECK_LE(static_cast<void*>(Begin()), static_cast<void*>(obj_end));
DCHECK_LT(static_cast<void*>(obj_begin), static_cast<void*>(obj_end));
+ GcVisitedArenaPool* arena_pool =
+ static_cast<GcVisitedArenaPool*>(Runtime::Current()->GetLinearAllocArenaPool());
size_t idx = static_cast<size_t>(obj_begin - Begin()) / kPageSize;
size_t last_byte_idx = static_cast<size_t>(obj_end - 1 - Begin()) / kPageSize;
+ // Do the update below with arena-pool's lock in shared-mode to serialize with
+ // the compaction-pause wherein we acquire it exclusively. This is to ensure
+ // that last-byte read there doesn't change after reading it and before
+ // userfaultfd registration.
+ ReaderMutexLock rmu(Thread::Current(), arena_pool->GetLock());
// If the addr is at the beginning of a page, then we set it for that page too.
if (IsAligned<kPageSize>(obj_begin)) {
first_obj_array_[idx] = obj_begin;
@@ -106,7 +130,13 @@ uint8_t* GcVisitedArenaPool::AddMap(size_t min_size) {
}
GcVisitedArenaPool::GcVisitedArenaPool(bool low_4gb, bool is_zygote, const char* name)
- : bytes_allocated_(0), name_(name), low_4gb_(low_4gb), pre_zygote_fork_(is_zygote) {}
+ : lock_("gc-visited arena-pool", kGenericBottomLock),
+ bytes_allocated_(0),
+ unused_arenas_(nullptr),
+ name_(name),
+ defer_arena_freeing_(false),
+ low_4gb_(low_4gb),
+ pre_zygote_fork_(is_zygote) {}
GcVisitedArenaPool::~GcVisitedArenaPool() {
for (Chunk* chunk : free_chunks_) {
@@ -117,13 +147,12 @@ GcVisitedArenaPool::~GcVisitedArenaPool() {
}
size_t GcVisitedArenaPool::GetBytesAllocated() const {
- std::lock_guard<std::mutex> lock(lock_);
+ ReaderMutexLock rmu(Thread::Current(), lock_);
return bytes_allocated_;
}
uint8_t* GcVisitedArenaPool::AddPreZygoteForkMap(size_t size) {
DCHECK(pre_zygote_fork_);
- DCHECK(Runtime::Current()->IsZygote());
std::string pre_fork_name = "Pre-zygote-";
pre_fork_name += name_;
std::string err_msg;
@@ -137,18 +166,67 @@ uint8_t* GcVisitedArenaPool::AddPreZygoteForkMap(size_t size) {
return map.Begin();
}
-Arena* GcVisitedArenaPool::AllocArena(size_t size) {
+uint8_t* GcVisitedArenaPool::AllocSingleObjArena(size_t size) {
+ WriterMutexLock wmu(Thread::Current(), lock_);
+ Arena* arena;
+ DCHECK(gUseUserfaultfd);
+ // To minimize private dirty, all class and intern table allocations are
+ // done outside LinearAlloc range so they are untouched during GC.
+ if (pre_zygote_fork_) {
+ uint8_t* begin = static_cast<uint8_t*>(malloc(size));
+ auto insert_result = allocated_arenas_.insert(
+ new TrackedArena(begin, size, /*pre_zygote_fork=*/true, /*single_obj_arena=*/true));
+ arena = *insert_result.first;
+ } else {
+ arena = AllocArena(size, /*single_obj_arena=*/true);
+ }
+ return arena->Begin();
+}
+
+void GcVisitedArenaPool::FreeSingleObjArena(uint8_t* addr) {
+ Thread* self = Thread::Current();
+ size_t size;
+ bool zygote_arena;
+ {
+ TrackedArena temp_arena(addr);
+ WriterMutexLock wmu(self, lock_);
+ auto iter = allocated_arenas_.find(&temp_arena);
+ DCHECK(iter != allocated_arenas_.end());
+ TrackedArena* arena = *iter;
+ size = arena->Size();
+ zygote_arena = arena->IsPreZygoteForkArena();
+ DCHECK_EQ(arena->Begin(), addr);
+ DCHECK(arena->IsSingleObjectArena());
+ allocated_arenas_.erase(iter);
+ if (defer_arena_freeing_) {
+ arena->SetupForDeferredDeletion(unused_arenas_);
+ unused_arenas_ = arena;
+ } else {
+ delete arena;
+ }
+ }
+ // Refer to the comment in FreeArenaChain() for why the pages are released
+ // after deleting the arena.
+ if (zygote_arena) {
+ free(addr);
+ } else {
+ TrackedArena::ReleasePages(addr, size, /*pre_zygote_fork=*/false);
+ WriterMutexLock wmu(self, lock_);
+ FreeRangeLocked(addr, size);
+ }
+}
+
+Arena* GcVisitedArenaPool::AllocArena(size_t size, bool single_obj_arena) {
// Return only page aligned sizes so that madvise can be leveraged.
size = RoundUp(size, kPageSize);
- std::lock_guard<std::mutex> lock(lock_);
-
if (pre_zygote_fork_) {
// The first fork out of zygote hasn't happened yet. Allocate arena in a
// private-anonymous mapping to retain clean pages across fork.
- DCHECK(Runtime::Current()->IsZygote());
uint8_t* addr = AddPreZygoteForkMap(size);
- auto emplace_result = allocated_arenas_.emplace(addr, size, /*pre_zygote_fork=*/true);
- return const_cast<TrackedArena*>(&(*emplace_result.first));
+ auto insert_result = allocated_arenas_.insert(
+ new TrackedArena(addr, size, /*pre_zygote_fork=*/true, single_obj_arena));
+ DCHECK(insert_result.second);
+ return *insert_result.first;
}
Chunk temp_chunk(nullptr, size);
@@ -165,19 +243,21 @@ Arena* GcVisitedArenaPool::AllocArena(size_t size) {
// if the best-fit chunk < 2x the requested size, then give the whole chunk.
if (chunk->size_ < 2 * size) {
DCHECK_GE(chunk->size_, size);
- auto emplace_result = allocated_arenas_.emplace(chunk->addr_,
- chunk->size_,
- /*pre_zygote_fork=*/false);
- DCHECK(emplace_result.second);
+ auto insert_result = allocated_arenas_.insert(new TrackedArena(chunk->addr_,
+ chunk->size_,
+ /*pre_zygote_fork=*/false,
+ single_obj_arena));
+ DCHECK(insert_result.second);
free_chunks_.erase(free_chunks_iter);
best_fit_allocs_.erase(best_fit_iter);
delete chunk;
- return const_cast<TrackedArena*>(&(*emplace_result.first));
+ return *insert_result.first;
} else {
- auto emplace_result = allocated_arenas_.emplace(chunk->addr_,
- size,
- /*pre_zygote_fork=*/false);
- DCHECK(emplace_result.second);
+ auto insert_result = allocated_arenas_.insert(new TrackedArena(chunk->addr_,
+ size,
+ /*pre_zygote_fork=*/false,
+ single_obj_arena));
+ DCHECK(insert_result.second);
// Compute next iterators for faster insert later.
auto next_best_fit_iter = best_fit_iter;
next_best_fit_iter++;
@@ -190,7 +270,7 @@ Arena* GcVisitedArenaPool::AllocArena(size_t size) {
DCHECK_EQ(free_chunks_nh.value()->addr_, chunk->addr_);
best_fit_allocs_.insert(next_best_fit_iter, std::move(best_fit_nh));
free_chunks_.insert(next_free_chunks_iter, std::move(free_chunks_nh));
- return const_cast<TrackedArena*>(&(*emplace_result.first));
+ return *insert_result.first;
}
}
@@ -266,27 +346,79 @@ void GcVisitedArenaPool::FreeArenaChain(Arena* first) {
// TODO: Handle the case when arena_allocator::kArenaAllocatorPreciseTracking
// is true. See MemMapArenaPool::FreeArenaChain() for example.
CHECK(!arena_allocator::kArenaAllocatorPreciseTracking);
+ Thread* self = Thread::Current();
+ // vector of arena ranges to be freed and whether they are pre-zygote-fork.
+ std::vector<std::tuple<uint8_t*, size_t, bool>> free_ranges;
+
+ {
+ WriterMutexLock wmu(self, lock_);
+ while (first != nullptr) {
+ TrackedArena* temp = down_cast<TrackedArena*>(first);
+ DCHECK(!temp->IsSingleObjectArena());
+ first = first->Next();
+ free_ranges.emplace_back(temp->Begin(), temp->Size(), temp->IsPreZygoteForkArena());
+ // In other implementations of ArenaPool this is calculated when asked for,
+ // thanks to the list of free arenas that is kept around. But in this case,
+ // we release the freed arena back to the pool and therefore need to
+ // calculate here.
+ bytes_allocated_ += temp->GetBytesAllocated();
+ auto iter = allocated_arenas_.find(temp);
+ DCHECK(iter != allocated_arenas_.end());
+ allocated_arenas_.erase(iter);
+ if (defer_arena_freeing_) {
+ temp->SetupForDeferredDeletion(unused_arenas_);
+ unused_arenas_ = temp;
+ } else {
+ delete temp;
+ }
+ }
+ }
- // madvise the arenas before acquiring lock for scalability
- for (Arena* temp = first; temp != nullptr; temp = temp->Next()) {
- temp->Release();
+ // madvise of arenas must be done after the above loop which serializes with
+ // MarkCompact::ProcessLinearAlloc() so that if it finds an arena to be not
+ // 'waiting-for-deletion' then it finishes the arena's processing before
+ // clearing here. Otherwise, we could have a situation wherein arena-pool
+ // assumes the memory range of the arena(s) to be zero'ed (by madvise),
+ // whereas GC maps stale arena pages.
+ for (auto& iter : free_ranges) {
+ // No need to madvise pre-zygote-fork arenas as they will munmapped below.
+ if (!std::get<2>(iter)) {
+ TrackedArena::ReleasePages(std::get<0>(iter), std::get<1>(iter), /*pre_zygote_fork=*/false);
+ }
}
- std::lock_guard<std::mutex> lock(lock_);
- arenas_freed_ = true;
- while (first != nullptr) {
- FreeRangeLocked(first->Begin(), first->Size());
- // In other implementations of ArenaPool this is calculated when asked for,
- // thanks to the list of free arenas that is kept around. But in this case,
- // we release the freed arena back to the pool and therefore need to
- // calculate here.
- bytes_allocated_ += first->GetBytesAllocated();
- TrackedArena* temp = down_cast<TrackedArena*>(first);
- // TODO: Add logic to unmap the maps corresponding to pre-zygote-fork
- // arenas, which are expected to be released only during shutdown.
- first = first->Next();
- size_t erase_count = allocated_arenas_.erase(*temp);
- DCHECK_EQ(erase_count, 1u);
+ WriterMutexLock wmu(self, lock_);
+ for (auto& iter : free_ranges) {
+ if (UNLIKELY(std::get<2>(iter))) {
+ bool found = false;
+ for (auto map_iter = maps_.begin(); map_iter != maps_.end(); map_iter++) {
+ if (map_iter->Begin() == std::get<0>(iter)) {
+ // erase will destruct the MemMap and thereby munmap. But this happens
+ // very rarely so it's ok to do it with lock acquired.
+ maps_.erase(map_iter);
+ found = true;
+ break;
+ }
+ }
+ CHECK(found);
+ } else {
+ FreeRangeLocked(std::get<0>(iter), std::get<1>(iter));
+ }
+ }
+}
+
+void GcVisitedArenaPool::DeleteUnusedArenas() {
+ TrackedArena* arena;
+ {
+ WriterMutexLock wmu(Thread::Current(), lock_);
+ defer_arena_freeing_ = false;
+ arena = unused_arenas_;
+ unused_arenas_ = nullptr;
+ }
+ while (arena != nullptr) {
+ TrackedArena* temp = down_cast<TrackedArena*>(arena->Next());
+ delete arena;
+ arena = temp;
}
}
diff --git a/runtime/base/gc_visited_arena_pool.h b/runtime/base/gc_visited_arena_pool.h
index e307147c9e..390da55c43 100644
--- a/runtime/base/gc_visited_arena_pool.h
+++ b/runtime/base/gc_visited_arena_pool.h
@@ -17,12 +17,16 @@
#ifndef ART_RUNTIME_BASE_GC_VISITED_ARENA_POOL_H_
#define ART_RUNTIME_BASE_GC_VISITED_ARENA_POOL_H_
-#include "base/casts.h"
+#include <set>
+
+#include "base/allocator.h"
#include "base/arena_allocator.h"
+#include "base/casts.h"
+#include "base/hash_set.h"
#include "base/locks.h"
#include "base/mem_map.h"
-
-#include <set>
+#include "read_barrier_config.h"
+#include "runtime.h"
namespace art {
@@ -34,27 +38,45 @@ class TrackedArena final : public Arena {
public:
// Used for searching in maps. Only arena's starting address is relevant.
explicit TrackedArena(uint8_t* addr) : pre_zygote_fork_(false) { memory_ = addr; }
- TrackedArena(uint8_t* start, size_t size, bool pre_zygote_fork);
+ TrackedArena(uint8_t* start, size_t size, bool pre_zygote_fork, bool single_obj_arena);
template <typename PageVisitor>
void VisitRoots(PageVisitor& visitor) const REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK_ALIGNED(Size(), kPageSize);
- DCHECK_ALIGNED(Begin(), kPageSize);
- int nr_pages = Size() / kPageSize;
uint8_t* page_begin = Begin();
- for (int i = 0; i < nr_pages && first_obj_array_[i] != nullptr; i++, page_begin += kPageSize) {
- visitor(page_begin, first_obj_array_[i]);
+ if (first_obj_array_.get() != nullptr) {
+ DCHECK_ALIGNED(Size(), kPageSize);
+ DCHECK_ALIGNED(Begin(), kPageSize);
+ for (int i = 0, nr_pages = Size() / kPageSize; i < nr_pages; i++, page_begin += kPageSize) {
+ uint8_t* first = first_obj_array_[i];
+ if (first != nullptr) {
+ visitor(page_begin, first, kPageSize);
+ } else {
+ break;
+ }
+ }
+ } else {
+ size_t page_size = Size();
+ while (page_size > kPageSize) {
+ visitor(page_begin, nullptr, kPageSize);
+ page_begin += kPageSize;
+ page_size -= kPageSize;
+ }
+ visitor(page_begin, nullptr, page_size);
}
}
// Return the page addr of the first page with first_obj set to nullptr.
uint8_t* GetLastUsedByte() const REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK_ALIGNED(Begin(), kPageSize);
- DCHECK_ALIGNED(End(), kPageSize);
// Jump past bytes-allocated for arenas which are not currently being used
// by arena-allocator. This helps in reducing loop iterations below.
uint8_t* last_byte = AlignUp(Begin() + GetBytesAllocated(), kPageSize);
- DCHECK_LE(last_byte, End());
+ if (first_obj_array_.get() != nullptr) {
+ DCHECK_ALIGNED(Begin(), kPageSize);
+ DCHECK_ALIGNED(End(), kPageSize);
+ DCHECK_LE(last_byte, End());
+ } else {
+ DCHECK_EQ(last_byte, End());
+ }
for (size_t i = (last_byte - Begin()) / kPageSize;
last_byte < End() && first_obj_array_[i] != nullptr;
last_byte += kPageSize, i++) {
@@ -66,21 +88,43 @@ class TrackedArena final : public Arena {
uint8_t* GetFirstObject(uint8_t* addr) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_LE(Begin(), addr);
DCHECK_GT(End(), addr);
- return first_obj_array_[(addr - Begin()) / kPageSize];
+ if (first_obj_array_.get() != nullptr) {
+ return first_obj_array_[(addr - Begin()) / kPageSize];
+ } else {
+ // The pages of this arena contain array of GC-roots. So we don't need
+ // first-object of any given page of the arena.
+ // Returning null helps distinguish which visitor is to be called.
+ return nullptr;
+ }
}
// Set 'obj_begin' in first_obj_array_ in every element for which it's the
// first object.
void SetFirstObject(uint8_t* obj_begin, uint8_t* obj_end);
+ // Setup the arena for deferred deletion.
+ void SetupForDeferredDeletion(TrackedArena* next_arena) {
+ DCHECK(next_arena == nullptr || next_arena->waiting_for_deletion_);
+ DCHECK(!waiting_for_deletion_);
+ waiting_for_deletion_ = true;
+ next_ = next_arena;
+ }
+ bool IsWaitingForDeletion() const { return waiting_for_deletion_; }
+ // Madvise the pages in the given range. 'begin' is expected to be page
+ // aligned.
+ // TODO: Remove this once we remove the shmem (minor-fault) code in
+ // userfaultfd GC and directly use ZeroAndReleaseMemory().
+ static void ReleasePages(uint8_t* begin, size_t size, bool pre_zygote_fork);
void Release() override;
bool IsPreZygoteForkArena() const { return pre_zygote_fork_; }
+ bool IsSingleObjectArena() const { return first_obj_array_.get() == nullptr; }
private:
// first_obj_array_[i] is the object that overlaps with the ith page's
// beginning, i.e. first_obj_array_[i] <= ith page_begin.
std::unique_ptr<uint8_t*[]> first_obj_array_;
const bool pre_zygote_fork_;
+ bool waiting_for_deletion_;
};
// An arena-pool wherein allocations can be tracked so that the GC can visit all
@@ -101,15 +145,24 @@ class GcVisitedArenaPool final : public ArenaPool {
bool is_zygote = false,
const char* name = "LinearAlloc");
virtual ~GcVisitedArenaPool();
- Arena* AllocArena(size_t size) override;
- void FreeArenaChain(Arena* first) override;
- size_t GetBytesAllocated() const override;
+
+ Arena* AllocArena(size_t size, bool need_first_obj_arr) REQUIRES(lock_);
+ // Use by arena allocator.
+ Arena* AllocArena(size_t size) override REQUIRES(!lock_) {
+ WriterMutexLock wmu(Thread::Current(), lock_);
+ return AllocArena(size, /*single_obj_arena=*/false);
+ }
+ void FreeArenaChain(Arena* first) override REQUIRES(!lock_);
+ size_t GetBytesAllocated() const override REQUIRES(!lock_);
void ReclaimMemory() override {}
void LockReclaimMemory() override {}
void TrimMaps() override {}
- bool Contains(void* ptr) {
- std::lock_guard<std::mutex> lock(lock_);
+ uint8_t* AllocSingleObjArena(size_t size) REQUIRES(!lock_);
+ void FreeSingleObjArena(uint8_t* addr) REQUIRES(!lock_);
+
+ bool Contains(void* ptr) REQUIRES(!lock_) {
+ ReaderMutexLock rmu(Thread::Current(), lock_);
for (auto& map : maps_) {
if (map.HasAddress(ptr)) {
return true;
@@ -119,51 +172,43 @@ class GcVisitedArenaPool final : public ArenaPool {
}
template <typename PageVisitor>
- void VisitRoots(PageVisitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
- std::lock_guard<std::mutex> lock(lock_);
+ void VisitRoots(PageVisitor& visitor) REQUIRES_SHARED(Locks::mutator_lock_, lock_) {
for (auto& arena : allocated_arenas_) {
- arena.VisitRoots(visitor);
+ arena->VisitRoots(visitor);
}
}
template <typename Callback>
- void ForEachAllocatedArena(Callback cb) REQUIRES_SHARED(Locks::mutator_lock_) {
- std::lock_guard<std::mutex> lock(lock_);
+ void ForEachAllocatedArena(Callback cb) REQUIRES_SHARED(Locks::mutator_lock_, lock_) {
+ // We should not have any unused arenas when calling this function.
+ CHECK(unused_arenas_ == nullptr);
for (auto& arena : allocated_arenas_) {
- cb(arena);
+ cb(*arena);
}
}
// Called in Heap::PreZygoteFork(). All allocations after this are done in
// arena-pool which is visited by userfaultfd.
- void SetupPostZygoteMode() {
- std::lock_guard<std::mutex> lock(lock_);
+ void SetupPostZygoteMode() REQUIRES(!lock_) {
+ WriterMutexLock wmu(Thread::Current(), lock_);
DCHECK(pre_zygote_fork_);
pre_zygote_fork_ = false;
}
// For userfaultfd GC to be able to acquire the lock to avoid concurrent
// release of arenas when it is visiting them.
- std::mutex& GetLock() { return lock_; }
-
- // Find the given arena in allocated_arenas_. The function is called with
- // lock_ acquired.
- bool FindAllocatedArena(const TrackedArena* arena) const NO_THREAD_SAFETY_ANALYSIS {
- for (auto& allocated_arena : allocated_arenas_) {
- if (arena == &allocated_arena) {
- return true;
- }
- }
- return false;
- }
+ ReaderWriterMutex& GetLock() const RETURN_CAPABILITY(lock_) { return lock_; }
- void ClearArenasFreed() {
- std::lock_guard<std::mutex> lock(lock_);
- arenas_freed_ = false;
+ // Called in the compaction pause to indicate that all arenas that will be
+ // freed until compaction is done shouldn't delete the TrackedArena object to
+ // avoid ABA problem. Called with lock_ acquired.
+ void DeferArenaFreeing() REQUIRES(lock_) {
+ CHECK(unused_arenas_ == nullptr);
+ defer_arena_freeing_ = true;
}
- // The function is called with lock_ acquired.
- bool AreArenasFreed() const NO_THREAD_SAFETY_ANALYSIS { return arenas_freed_; }
+ // Clear defer_arena_freeing_ and delete all unused arenas.
+ void DeleteUnusedArenas() REQUIRES(!lock_);
private:
void FreeRangeLocked(uint8_t* range_begin, size_t range_size) REQUIRES(lock_);
@@ -197,31 +242,40 @@ class GcVisitedArenaPool final : public ArenaPool {
}
};
- class LessByArenaAddr {
+ class TrackedArenaEquals {
public:
- bool operator()(const TrackedArena& a, const TrackedArena& b) const {
- return std::less<uint8_t*>{}(a.Begin(), b.Begin());
+ bool operator()(const TrackedArena* a, const TrackedArena* b) const {
+ return std::equal_to<uint8_t*>{}(a->Begin(), b->Begin());
}
};
- // Use a std::mutex here as Arenas are second-from-the-bottom when using MemMaps, and MemMap
- // itself uses std::mutex scoped to within an allocate/free only.
- mutable std::mutex lock_;
+ class TrackedArenaHash {
+ public:
+ size_t operator()(const TrackedArena* arena) const {
+ return std::hash<size_t>{}(reinterpret_cast<uintptr_t>(arena->Begin()) / kPageSize);
+ }
+ };
+ using AllocatedArenaSet =
+ HashSet<TrackedArena*, DefaultEmptyFn<TrackedArena*>, TrackedArenaHash, TrackedArenaEquals>;
+
+ mutable ReaderWriterMutex lock_;
std::vector<MemMap> maps_ GUARDED_BY(lock_);
std::set<Chunk*, LessByChunkSize> best_fit_allocs_ GUARDED_BY(lock_);
std::set<Chunk*, LessByChunkAddr> free_chunks_ GUARDED_BY(lock_);
// Set of allocated arenas. It's required to be able to find the arena
// corresponding to a given address.
- // TODO: consider using HashSet, which is more memory efficient.
- std::set<TrackedArena, LessByArenaAddr> allocated_arenas_ GUARDED_BY(lock_);
+ AllocatedArenaSet allocated_arenas_ GUARDED_BY(lock_);
// Number of bytes allocated so far.
size_t bytes_allocated_ GUARDED_BY(lock_);
+ // To hold arenas that are freed while GC is happening. These are kept until
+ // the end of GC to avoid ABA problem.
+ TrackedArena* unused_arenas_ GUARDED_BY(lock_);
const char* name_;
// Flag to indicate that some arenas have been freed. This flag is used as an
// optimization by GC to know if it needs to find if the arena being visited
// has been freed or not. The flag is cleared in the compaction pause and read
// when linear-alloc space is concurrently visited updated to update GC roots.
- bool arenas_freed_ GUARDED_BY(lock_);
+ bool defer_arena_freeing_ GUARDED_BY(lock_);
const bool low_4gb_;
// Set to true in zygote process so that all linear-alloc allocations are in
// private-anonymous mappings and not on userfaultfd visited pages. At
@@ -232,6 +286,55 @@ class GcVisitedArenaPool final : public ArenaPool {
DISALLOW_COPY_AND_ASSIGN(GcVisitedArenaPool);
};
+// Allocator for class-table and intern-table hash-sets. It enables updating the
+// roots concurrently page-by-page.
+template <class T, AllocatorTag kTag>
+class GcRootArenaAllocator : public TrackingAllocator<T, kTag> {
+ public:
+ using value_type = typename TrackingAllocator<T, kTag>::value_type;
+ using size_type = typename TrackingAllocator<T, kTag>::size_type;
+ using difference_type = typename TrackingAllocator<T, kTag>::difference_type;
+ using pointer = typename TrackingAllocator<T, kTag>::pointer;
+ using const_pointer = typename TrackingAllocator<T, kTag>::const_pointer;
+ using reference = typename TrackingAllocator<T, kTag>::reference;
+ using const_reference = typename TrackingAllocator<T, kTag>::const_reference;
+
+ // Used internally by STL data structures.
+ template <class U>
+ explicit GcRootArenaAllocator(
+ [[maybe_unused]] const GcRootArenaAllocator<U, kTag>& alloc) noexcept {}
+ // Used internally by STL data structures.
+ GcRootArenaAllocator() noexcept : TrackingAllocator<T, kTag>() {}
+
+ // Enables an allocator for objects of one type to allocate storage for objects of another type.
+ // Used internally by STL data structures.
+ template <class U>
+ struct rebind {
+ using other = GcRootArenaAllocator<U, kTag>;
+ };
+
+ pointer allocate(size_type n, [[maybe_unused]] const_pointer hint = 0) {
+ if (!gUseUserfaultfd) {
+ return TrackingAllocator<T, kTag>::allocate(n);
+ }
+ size_t size = n * sizeof(T);
+ GcVisitedArenaPool* pool =
+ down_cast<GcVisitedArenaPool*>(Runtime::Current()->GetLinearAllocArenaPool());
+ return reinterpret_cast<pointer>(pool->AllocSingleObjArena(size));
+ }
+
+ template <typename PT>
+ void deallocate(PT p, size_type n) {
+ if (!gUseUserfaultfd) {
+ TrackingAllocator<T, kTag>::deallocate(p, n);
+ return;
+ }
+ GcVisitedArenaPool* pool =
+ down_cast<GcVisitedArenaPool*>(Runtime::Current()->GetLinearAllocArenaPool());
+ pool->FreeSingleObjArena(reinterpret_cast<uint8_t*>(p));
+ }
+};
+
} // namespace art
#endif // ART_RUNTIME_BASE_GC_VISITED_ARENA_POOL_H_
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 9c1d6445cd..d53a78e2e2 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2420,17 +2420,9 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
boot_class_table_->VisitRoots(root_visitor);
// If tracing is enabled, then mark all the class loaders to prevent unloading.
if ((flags & kVisitRootFlagClassLoader) != 0 || tracing_enabled) {
- gc::Heap* const heap = Runtime::Current()->GetHeap();
- // Don't visit class-loaders if compacting with userfaultfd GC as these
- // weaks are updated using Runtime::SweepSystemWeaks() and the GC doesn't
- // tolerate double updates.
- if (!heap->IsPerformingUffdCompaction()) {
- for (const ClassLoaderData& data : class_loaders_) {
- GcRoot<mirror::Object> root(GcRoot<mirror::Object>(self->DecodeJObject(data.weak_root)));
- root.VisitRoot(visitor, RootInfo(kRootVMInternal));
- }
- } else {
- DCHECK_EQ(heap->CurrentCollectorType(), gc::CollectorType::kCollectorTypeCMC);
+ for (const ClassLoaderData& data : class_loaders_) {
+ GcRoot<mirror::Object> root(GcRoot<mirror::Object>(self->DecodeJObject(data.weak_root)));
+ root.VisitRoot(visitor, RootInfo(kRootVMInternal));
}
}
} else if (!gUseReadBarrier && (flags & kVisitRootFlagNewRoots) != 0) {
@@ -2470,9 +2462,11 @@ void ClassLinker::VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags) {
// Keep in sync with InitCallback. Anything we visit, we need to
// reinit references to when reinitializing a ClassLinker from a
// mapped image.
-void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) {
+void ClassLinker::VisitRoots(RootVisitor* visitor, VisitRootFlags flags, bool visit_class_roots) {
class_roots_.VisitRootIfNonNull(visitor, RootInfo(kRootVMInternal));
- VisitClassRoots(visitor, flags);
+ if (visit_class_roots) {
+ VisitClassRoots(visitor, flags);
+ }
// Instead of visiting the find_array_class_cache_ drop it so that it doesn't prevent class
// unloading if we are marking roots.
DropFindArrayClassCache();
@@ -2625,7 +2619,11 @@ ClassLinker::~ClassLinker() {
for (const ClassLoaderData& data : class_loaders_) {
// CHA unloading analysis is not needed. No negative consequences are expected because
// all the classloaders are deleted at the same time.
- DeleteClassLoader(self, data, /*cleanup_cha=*/ false);
+ PrepareToDeleteClassLoader(self, data, /*cleanup_cha=*/false);
+ }
+ for (const ClassLoaderData& data : class_loaders_) {
+ delete data.allocator;
+ delete data.class_table;
}
class_loaders_.clear();
while (!running_visibly_initialized_callbacks_.empty()) {
@@ -2635,7 +2633,9 @@ ClassLinker::~ClassLinker() {
}
}
-void ClassLinker::DeleteClassLoader(Thread* self, const ClassLoaderData& data, bool cleanup_cha) {
+void ClassLinker::PrepareToDeleteClassLoader(Thread* self,
+ const ClassLoaderData& data,
+ bool cleanup_cha) {
Runtime* const runtime = Runtime::Current();
JavaVMExt* const vm = runtime->GetJavaVM();
vm->DeleteWeakGlobalRef(self, data.weak_root);
@@ -2666,9 +2666,6 @@ void ClassLinker::DeleteClassLoader(Thread* self, const ClassLoaderData& data, b
}
}
}
-
- delete data.allocator;
- delete data.class_table;
}
ObjPtr<mirror::PointerArray> ClassLinker::AllocPointerArray(Thread* self, size_t length) {
@@ -7917,9 +7914,9 @@ void ClassLinker::LinkMethodsHelper<kPointerSize>::ReallocMethods(ObjPtr<mirror:
kMethodSize,
kMethodAlignment);
const size_t old_methods_ptr_size = (old_methods != nullptr) ? old_size : 0;
- auto* methods = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(
- class_linker_->GetAllocatorForClassLoader(klass->GetClassLoader())->Realloc(
- self_, old_methods, old_methods_ptr_size, new_size, LinearAllocKind::kArtMethodArray));
+ LinearAlloc* allocator = class_linker_->GetAllocatorForClassLoader(klass->GetClassLoader());
+ auto* methods = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(allocator->Realloc(
+ self_, old_methods, old_methods_ptr_size, new_size, LinearAllocKind::kArtMethodArray));
CHECK(methods != nullptr); // Native allocation failure aborts.
if (methods != old_methods) {
@@ -7932,12 +7929,9 @@ void ClassLinker::LinkMethodsHelper<kPointerSize>::ReallocMethods(ObjPtr<mirror:
++out;
}
} else if (gUseUserfaultfd) {
- // Clear the declaring class of the old dangling method array so that GC doesn't
- // try to update them, which could cause crashes in userfaultfd GC due to
- // checks in post-compact address computation.
- for (auto& m : klass->GetMethods(kPointerSize)) {
- m.SetDeclaringClass(nullptr);
- }
+ // In order to make compaction code skip updating the declaring_class_ in
+ // old_methods, convert it into a 'no GC-root' array.
+ allocator->ConvertToNoGcRoots(old_methods, LinearAllocKind::kArtMethodArray);
}
}
@@ -10847,9 +10841,13 @@ void ClassLinker::CleanupClassLoaders() {
ScopedDebugDisallowReadBarriers sddrb(self);
for (ClassLoaderData& data : to_delete) {
// CHA unloading analysis and SingleImplementaion cleanups are required.
- DeleteClassLoader(self, data, /*cleanup_cha=*/ true);
+ PrepareToDeleteClassLoader(self, data, /*cleanup_cha=*/true);
}
}
+ for (const ClassLoaderData& data : to_delete) {
+ delete data.allocator;
+ delete data.class_table;
+ }
Runtime* runtime = Runtime::Current();
if (!unregistered_oat_files.empty()) {
for (const OatFile* oat_file : unregistered_oat_files) {
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index ae7921d850..cdb92b2543 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -514,7 +514,7 @@ class ClassLinker {
void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
+ void VisitRoots(RootVisitor* visitor, VisitRootFlags flags, bool visit_class_roots = true)
REQUIRES(!Locks::dex_lock_, !Locks::classlinker_classes_lock_, !Locks::trace_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Visits all dex-files accessible by any class-loader or the BCP.
@@ -866,6 +866,9 @@ class ClassLinker {
// Enable or disable public sdk checks.
virtual void SetEnablePublicSdkChecks(bool enabled);
void RemoveDexFromCaches(const DexFile& dex_file);
+ ClassTable* GetBootClassTable() REQUIRES_SHARED(Locks::classlinker_classes_lock_) {
+ return boot_class_table_.get();
+ }
protected:
virtual bool InitializeClass(Thread* self,
@@ -912,7 +915,11 @@ class ClassLinker {
REQUIRES(!Locks::dex_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- void DeleteClassLoader(Thread* self, const ClassLoaderData& data, bool cleanup_cha)
+ // Prepare by removing dependencies on things allocated in data.allocator.
+ // Please note that the allocator and class_table are not deleted in this
+ // function. They are to be deleted after preparing all the class-loaders that
+ // are to be deleted (see b/298575095).
+ void PrepareToDeleteClassLoader(Thread* self, const ClassLoaderData& data, bool cleanup_cha)
REQUIRES_SHARED(Locks::mutator_lock_);
void VisitClassesInternal(ClassVisitor* visitor)
diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h
index ecc8a0a620..4ee59a79f7 100644
--- a/runtime/class_table-inl.h
+++ b/runtime/class_table-inl.h
@@ -68,12 +68,14 @@ inline bool ClassTable::ClassDescriptorEquals::operator()(const TableSlot& a,
return a.Read<kWithoutReadBarrier>()->DescriptorEquals(b.first);
}
-template<class Visitor>
-void ClassTable::VisitRoots(Visitor& visitor) {
+template <class Visitor>
+void ClassTable::VisitRoots(Visitor& visitor, bool skip_classes) {
ReaderMutexLock mu(Thread::Current(), lock_);
- for (ClassSet& class_set : classes_) {
- for (TableSlot& table_slot : class_set) {
- table_slot.VisitRoot(visitor);
+ if (!skip_classes) {
+ for (ClassSet& class_set : classes_) {
+ for (TableSlot& table_slot : class_set) {
+ table_slot.VisitRoot(visitor);
+ }
}
}
for (GcRoot<mirror::Object>& root : strong_roots_) {
@@ -86,12 +88,14 @@ void ClassTable::VisitRoots(Visitor& visitor) {
}
}
-template<class Visitor>
-void ClassTable::VisitRoots(const Visitor& visitor) {
+template <class Visitor>
+void ClassTable::VisitRoots(const Visitor& visitor, bool skip_classes) {
ReaderMutexLock mu(Thread::Current(), lock_);
- for (ClassSet& class_set : classes_) {
- for (TableSlot& table_slot : class_set) {
- table_slot.VisitRoot(visitor);
+ if (!skip_classes) {
+ for (ClassSet& class_set : classes_) {
+ for (TableSlot& table_slot : class_set) {
+ table_slot.VisitRoot(visitor);
+ }
}
}
for (GcRoot<mirror::Object>& root : strong_roots_) {
@@ -104,6 +108,18 @@ void ClassTable::VisitRoots(const Visitor& visitor) {
}
}
+template <class Condition, class Visitor>
+void ClassTable::VisitClassesIfConditionMet(Condition& cond, Visitor& visitor) {
+ ReaderMutexLock mu(Thread::Current(), lock_);
+ for (ClassSet& class_set : classes_) {
+ if (cond(class_set)) {
+ for (TableSlot& table_slot : class_set) {
+ table_slot.VisitRoot(visitor);
+ }
+ }
+ }
+}
+
template <typename Visitor>
class ClassTable::TableSlot::ClassAndRootVisitor {
public:
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 7e263737c3..54e066a18c 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -21,7 +21,7 @@
#include <utility>
#include <vector>
-#include "base/allocator.h"
+#include "base/gc_visited_arena_pool.h"
#include "base/hash_set.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -151,7 +151,7 @@ class ClassTable {
TableSlotEmptyFn,
ClassDescriptorHash,
ClassDescriptorEquals,
- TrackingAllocator<TableSlot, kAllocatorTagClassTable>>;
+ GcRootArenaAllocator<TableSlot, kAllocatorTagClassTable>>;
ClassTable();
@@ -181,7 +181,7 @@ class ClassTable {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Returns the number of classes in the class table.
+ // Returns the number of class-sets in the class table.
size_t Size() const
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -194,17 +194,13 @@ class ClassTable {
REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS for object marking requiring heap bitmap lock.
- template<class Visitor>
- void VisitRoots(Visitor& visitor)
- NO_THREAD_SAFETY_ANALYSIS
- REQUIRES(!lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ template <class Visitor>
+ void VisitRoots(Visitor& visitor, bool skip_classes = false) NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!lock_) REQUIRES_SHARED(Locks::mutator_lock_);
- template<class Visitor>
- void VisitRoots(const Visitor& visitor)
- NO_THREAD_SAFETY_ANALYSIS
- REQUIRES(!lock_)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ template <class Visitor>
+ void VisitRoots(const Visitor& visitor, bool skip_classes = false) NO_THREAD_SAFETY_ANALYSIS
+ REQUIRES(!lock_) REQUIRES_SHARED(Locks::mutator_lock_);
template<class Visitor>
void VisitClassesAndRoots(Visitor& visitor)
@@ -212,6 +208,10 @@ class ClassTable {
REQUIRES(!lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Visit classes in those class-sets which satisfy 'cond'.
+ template <class Condition, class Visitor>
+ void VisitClassesIfConditionMet(Condition& cond, Visitor& visitor) REQUIRES(!lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Stops visit if the visitor returns false.
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
bool Visit(Visitor& visitor)
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index bd10958496..4e4109d1ac 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -33,12 +33,12 @@ Bitmap* Bitmap::CreateFromMemMap(MemMap&& mem_map, size_t num_bits) {
return new Bitmap(std::move(mem_map), num_bits);
}
-Bitmap::Bitmap(MemMap&& mem_map, size_t bitmap_size)
+Bitmap::Bitmap(MemMap&& mem_map, size_t num_bits)
: mem_map_(std::move(mem_map)),
bitmap_begin_(reinterpret_cast<uintptr_t*>(mem_map_.Begin())),
- bitmap_size_(bitmap_size) {
+ bitmap_numbits_(num_bits) {
CHECK(bitmap_begin_ != nullptr);
- CHECK_NE(bitmap_size, 0U);
+ CHECK_NE(num_bits, 0U);
}
Bitmap::~Bitmap() {
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index 06398d6b10..f413243094 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -19,10 +19,12 @@
#include <limits.h>
#include <stdint.h>
+
#include <memory>
#include <set>
#include <vector>
+#include "base/bit_utils.h"
#include "base/locks.h"
#include "base/mem_map.h"
#include "runtime_globals.h"
@@ -86,9 +88,7 @@ class Bitmap {
}
// Size of our bitmap in bits.
- size_t BitmapSize() const {
- return bitmap_size_;
- }
+ size_t BitmapSize() const { return bitmap_numbits_; }
// Check that a bit index is valid with a DCHECK.
ALWAYS_INLINE void CheckValidBitIndex(size_t bit_index) const {
@@ -118,7 +118,7 @@ class Bitmap {
uintptr_t* const bitmap_begin_;
// Number of bits in the bitmap.
- const size_t bitmap_size_;
+ size_t bitmap_numbits_;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Bitmap);
@@ -133,6 +133,14 @@ class MemoryRangeBitmap : public Bitmap {
static MemoryRangeBitmap* CreateFromMemMap(
MemMap&& mem_map, uintptr_t cover_begin, size_t num_bits);
+ void SetBitmapSize(size_t bytes) {
+ CHECK_ALIGNED(bytes, kAlignment);
+ bitmap_numbits_ = bytes / kAlignment;
+ size_t rounded_size =
+ RoundUp(bitmap_numbits_, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t);
+ mem_map_.SetSize(rounded_size);
+ }
+
// Beginning of the memory range that the bitmap covers.
ALWAYS_INLINE uintptr_t CoverBegin() const {
return cover_begin_;
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index e3189331c4..be3ccba25d 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -179,10 +179,12 @@ class SpaceBitmap {
}
void SetHeapSize(size_t bytes) {
- // TODO: Un-map the end of the mem map.
heap_limit_ = heap_begin_ + bytes;
- bitmap_size_ = OffsetToIndex(bytes) * sizeof(intptr_t);
+ bitmap_size_ = ComputeBitmapSize(bytes);
CHECK_EQ(HeapSize(), bytes);
+ if (mem_map_.IsValid()) {
+ mem_map_.SetSize(bitmap_size_);
+ }
}
uintptr_t HeapBegin() const {
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 1c2f040805..6af30feab8 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -923,9 +923,11 @@ class ConcurrentCopying::ImmuneSpaceScanObjVisitor {
// Only need to scan gray objects.
if (obj->GetReadBarrierState() == ReadBarrier::GrayState()) {
collector_->ScanImmuneObject(obj);
- // Done scanning the object, go back to black (non-gray).
- bool success = obj->AtomicSetReadBarrierState(ReadBarrier::GrayState(),
- ReadBarrier::NonGrayState());
+ // Done scanning the object, go back to black (non-gray). Release order
+ // required to ensure that stores of to-space references done by
+ // ScanImmuneObject() are visible before state change.
+ bool success = obj->AtomicSetReadBarrierState(
+ ReadBarrier::GrayState(), ReadBarrier::NonGrayState(), std::memory_order_release);
CHECK(success)
<< Runtime::Current()->GetHeap()->GetVerification()->DumpObjectInfo(obj, "failed CAS");
}
@@ -2378,9 +2380,8 @@ inline void ConcurrentCopying::ProcessMarkStackRef(mirror::Object* to_ref) {
// above IsInToSpace() evaluates to true and we change the color from gray to non-gray here in
// this else block.
if (kUseBakerReadBarrier) {
- bool success = to_ref->AtomicSetReadBarrierState<std::memory_order_release>(
- ReadBarrier::GrayState(),
- ReadBarrier::NonGrayState());
+ bool success = to_ref->AtomicSetReadBarrierState(
+ ReadBarrier::GrayState(), ReadBarrier::NonGrayState(), std::memory_order_release);
DCHECK(success) << "Must succeed as we won the race.";
}
}
diff --git a/runtime/gc/collector/mark_compact-inl.h b/runtime/gc/collector/mark_compact-inl.h
index c9b792e8f6..fe67906d4b 100644
--- a/runtime/gc/collector/mark_compact-inl.h
+++ b/runtime/gc/collector/mark_compact-inl.h
@@ -30,7 +30,7 @@ inline void MarkCompact::UpdateClassAfterObjectMap(mirror::Object* obj) {
// Track a class if it needs walking super-classes for visiting references or
// if it's higher in address order than its objects and is in moving space.
if (UNLIKELY(
- (std::less<mirror::Object*>{}(obj, klass) && bump_pointer_space_->HasAddress(klass)) ||
+ (std::less<mirror::Object*>{}(obj, klass) && HasAddress(klass)) ||
(klass->GetReferenceInstanceOffsets<kVerifyNone>() == mirror::Class::kClassWalkSuper &&
walk_super_class_cache_ != klass))) {
// Since this function gets invoked in the compaction pause as well, it is
@@ -42,12 +42,12 @@ inline void MarkCompact::UpdateClassAfterObjectMap(mirror::Object* obj) {
if (klass->GetReferenceInstanceOffsets<kVerifyNone>() == mirror::Class::kClassWalkSuper) {
// In this case we require traversing through the super class hierarchy
// and find the super class at the highest address order.
- mirror::Class* highest_klass = bump_pointer_space_->HasAddress(klass) ? klass : nullptr;
+ mirror::Class* highest_klass = HasAddress(klass) ? klass : nullptr;
for (ObjPtr<mirror::Class> k = klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>();
k != nullptr;
k = k->GetSuperClass<kVerifyNone, kWithoutReadBarrier>()) {
// TODO: Can we break once we encounter a super class outside the moving space?
- if (bump_pointer_space_->HasAddress(k.Ptr())) {
+ if (HasAddress(k.Ptr())) {
highest_klass = std::max(highest_klass, k.Ptr(), std::less<mirror::Class*>());
}
}
@@ -220,13 +220,16 @@ uint32_t MarkCompact::LiveWordsBitmap<kAlignment>::FindNthLiveWordOffset(size_t
UNREACHABLE();
}
-inline void MarkCompact::UpdateRef(mirror::Object* obj, MemberOffset offset) {
+inline void MarkCompact::UpdateRef(mirror::Object* obj,
+ MemberOffset offset,
+ uint8_t* begin,
+ uint8_t* end) {
mirror::Object* old_ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, /*kIsVolatile*/false>(offset);
if (kIsDebugBuild) {
- if (live_words_bitmap_->HasAddress(old_ref)
- && reinterpret_cast<uint8_t*>(old_ref) < black_allocations_begin_
- && !moving_space_bitmap_->Test(old_ref)) {
+ if (HasAddress(old_ref) &&
+ reinterpret_cast<uint8_t*>(old_ref) < black_allocations_begin_ &&
+ !moving_space_bitmap_->Test(old_ref)) {
mirror::Object* from_ref = GetFromSpaceAddr(old_ref);
std::ostringstream oss;
heap_->DumpSpaces(oss);
@@ -249,7 +252,7 @@ inline void MarkCompact::UpdateRef(mirror::Object* obj, MemberOffset offset) {
<< " maps\n" << oss.str();
}
}
- mirror::Object* new_ref = PostCompactAddress(old_ref);
+ mirror::Object* new_ref = PostCompactAddress(old_ref, begin, end);
if (new_ref != old_ref) {
obj->SetFieldObjectWithoutWriteBarrier<
/*kTransactionActive*/false, /*kCheckTransaction*/false, kVerifyNone, /*kIsVolatile*/false>(
@@ -267,7 +270,7 @@ inline bool MarkCompact::VerifyRootSingleUpdate(void* root,
if (kIsDebugBuild && !kMemoryToolIsAvailable) {
void* stack_low_addr = stack_low_addr_;
void* stack_high_addr = stack_high_addr_;
- if (!live_words_bitmap_->HasAddress(old_ref)) {
+ if (!HasAddress(old_ref)) {
return false;
}
Thread* self = Thread::Current();
@@ -291,21 +294,26 @@ inline bool MarkCompact::VerifyRootSingleUpdate(void* root,
}
inline void MarkCompact::UpdateRoot(mirror::CompressedReference<mirror::Object>* root,
+ uint8_t* begin,
+ uint8_t* end,
const RootInfo& info) {
DCHECK(!root->IsNull());
mirror::Object* old_ref = root->AsMirrorPtr();
if (VerifyRootSingleUpdate(root, old_ref, info)) {
- mirror::Object* new_ref = PostCompactAddress(old_ref);
+ mirror::Object* new_ref = PostCompactAddress(old_ref, begin, end);
if (old_ref != new_ref) {
root->Assign(new_ref);
}
}
}
-inline void MarkCompact::UpdateRoot(mirror::Object** root, const RootInfo& info) {
+inline void MarkCompact::UpdateRoot(mirror::Object** root,
+ uint8_t* begin,
+ uint8_t* end,
+ const RootInfo& info) {
mirror::Object* old_ref = *root;
if (VerifyRootSingleUpdate(root, old_ref, info)) {
- mirror::Object* new_ref = PostCompactAddress(old_ref);
+ mirror::Object* new_ref = PostCompactAddress(old_ref, begin, end);
if (old_ref != new_ref) {
*root = new_ref;
}
@@ -378,10 +386,10 @@ inline mirror::Object* MarkCompact::PostCompactAddressUnchecked(mirror::Object*
return PostCompactOldObjAddr(old_ref);
}
-inline mirror::Object* MarkCompact::PostCompactAddress(mirror::Object* old_ref) const {
- // TODO: To further speedup the check, maybe we should consider caching heap
- // start/end in this object.
- if (LIKELY(live_words_bitmap_->HasAddress(old_ref))) {
+inline mirror::Object* MarkCompact::PostCompactAddress(mirror::Object* old_ref,
+ uint8_t* begin,
+ uint8_t* end) const {
+ if (LIKELY(HasAddress(old_ref, begin, end))) {
return PostCompactAddressUnchecked(old_ref);
}
return old_ref;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index cb6f844ce7..8cd92a71f2 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -58,6 +58,7 @@
#include "thread_list.h"
#ifdef ART_TARGET_ANDROID
+#include "android-modules-utils/sdk_level.h"
#include "com_android_art.h"
#endif
@@ -83,13 +84,16 @@
#endif // __NR_userfaultfd
#endif // __BIONIC__
+#ifdef ART_TARGET_ANDROID
namespace {
using ::android::base::GetBoolProperty;
using ::android::base::ParseBool;
using ::android::base::ParseBoolResult;
+using ::android::modules::sdklevel::IsAtLeastT;
} // namespace
+#endif
namespace art {
@@ -188,7 +192,7 @@ static int GetOverrideCacheInfoFd() {
return -1;
}
-static bool GetCachedBoolProperty(const std::string& key, bool default_value) {
+static std::unordered_map<std::string, std::string> GetCachedProperties() {
// For simplicity, we don't handle multiple calls because otherwise we would have to reset the fd.
static bool called = false;
CHECK(!called) << "GetCachedBoolProperty can be called only once";
@@ -199,7 +203,7 @@ static bool GetCachedBoolProperty(const std::string& key, bool default_value) {
if (fd >= 0) {
if (!android::base::ReadFdToString(fd, &cache_info_contents)) {
PLOG(ERROR) << "Failed to read cache-info from fd " << fd;
- return default_value;
+ return {};
}
} else {
std::string path = GetApexDataDalvikCacheDirectory(InstructionSet::kNone) + "/cache-info.xml";
@@ -210,7 +214,7 @@ static bool GetCachedBoolProperty(const std::string& key, bool default_value) {
if (errno != ENOENT) {
PLOG(ERROR) << "Failed to read cache-info from the default path";
}
- return default_value;
+ return {};
}
}
@@ -219,41 +223,55 @@ static bool GetCachedBoolProperty(const std::string& key, bool default_value) {
if (!cache_info.has_value()) {
// This should never happen.
LOG(ERROR) << "Failed to parse cache-info";
- return default_value;
+ return {};
}
const com::android::art::KeyValuePairList* list = cache_info->getFirstSystemProperties();
if (list == nullptr) {
// This should never happen.
LOG(ERROR) << "Missing system properties from cache-info";
- return default_value;
+ return {};
}
const std::vector<com::android::art::KeyValuePair>& properties = list->getItem();
+ std::unordered_map<std::string, std::string> result;
for (const com::android::art::KeyValuePair& pair : properties) {
- if (pair.getK() == key) {
- ParseBoolResult result = ParseBool(pair.getV());
- switch (result) {
- case ParseBoolResult::kTrue:
- return true;
- case ParseBoolResult::kFalse:
- return false;
- case ParseBoolResult::kError:
- return default_value;
- }
- }
+ result[pair.getK()] = pair.getV();
+ }
+ return result;
+}
+
+static bool GetCachedBoolProperty(
+ const std::unordered_map<std::string, std::string>& cached_properties,
+ const std::string& key,
+ bool default_value) {
+ auto it = cached_properties.find(key);
+ if (it == cached_properties.end()) {
+ return default_value;
+ }
+ ParseBoolResult result = ParseBool(it->second);
+ switch (result) {
+ case ParseBoolResult::kTrue:
+ return true;
+ case ParseBoolResult::kFalse:
+ return false;
+ case ParseBoolResult::kError:
+ return default_value;
}
- return default_value;
}
static bool SysPropSaysUffdGc() {
// The phenotype flag can change at time time after boot, but it shouldn't take effect until a
// reboot. Therefore, we read the phenotype flag from the cache info, which is generated on boot.
- return GetCachedBoolProperty("persist.device_config.runtime_native_boot.enable_uffd_gc_2",
- false) ||
- GetBoolProperty("ro.dalvik.vm.enable_uffd_gc", false);
+ std::unordered_map<std::string, std::string> cached_properties = GetCachedProperties();
+ bool phenotype_enable = GetCachedBoolProperty(
+ cached_properties, "persist.device_config.runtime_native_boot.enable_uffd_gc_2", false);
+ bool phenotype_force_disable = GetCachedBoolProperty(
+ cached_properties, "persist.device_config.runtime_native_boot.force_disable_uffd_gc", false);
+ bool build_enable = GetBoolProperty("ro.dalvik.vm.enable_uffd_gc", false);
+ return (phenotype_enable || build_enable || IsAtLeastT()) && !phenotype_force_disable;
}
#else
// Never called.
-static bool SysPropSaysUffdGc() { return false; }
+static bool SysPropSaysUffdGc() { return true; }
#endif
static bool ShouldUseUserfaultfd() {
@@ -289,9 +307,8 @@ static constexpr size_t kMaxNumUffdWorkers = 2;
// Number of compaction buffers reserved for mutator threads in SIGBUS feature
// case. It's extremely unlikely that we will ever have more than these number
// of mutator threads trying to access the moving-space during one compaction
-// phase. Using a lower number in debug builds to hopefully catch the issue
-// before it becomes a problem on user builds.
-static constexpr size_t kMutatorCompactionBufferCount = kIsDebugBuild ? 256 : 512;
+// phase.
+static constexpr size_t kMutatorCompactionBufferCount = 2048;
// Minimum from-space chunk to be madvised (during concurrent compaction) in one go.
static constexpr ssize_t kMinFromSpaceMadviseSize = 1 * MB;
// Concurrent compaction termination logic is different (and slightly more efficient) if the
@@ -367,12 +384,32 @@ static bool IsSigbusFeatureAvailable() {
return gUffdFeatures & UFFD_FEATURE_SIGBUS;
}
+size_t MarkCompact::InitializeInfoMap(uint8_t* p, size_t moving_space_sz) {
+ size_t nr_moving_pages = moving_space_sz / kPageSize;
+
+ chunk_info_vec_ = reinterpret_cast<uint32_t*>(p);
+ vector_length_ = moving_space_sz / kOffsetChunkSize;
+ size_t total = vector_length_ * sizeof(uint32_t);
+
+ first_objs_non_moving_space_ = reinterpret_cast<ObjReference*>(p + total);
+ total += heap_->GetNonMovingSpace()->Capacity() / kPageSize * sizeof(ObjReference);
+
+ first_objs_moving_space_ = reinterpret_cast<ObjReference*>(p + total);
+ total += nr_moving_pages * sizeof(ObjReference);
+
+ pre_compact_offset_moving_space_ = reinterpret_cast<uint32_t*>(p + total);
+ total += nr_moving_pages * sizeof(uint32_t);
+ return total;
+}
+
MarkCompact::MarkCompact(Heap* heap)
: GarbageCollector(heap, "concurrent mark compact"),
gc_barrier_(0),
lock_("mark compact lock", kGenericBottomLock),
bump_pointer_space_(heap->GetBumpPointerSpace()),
moving_space_bitmap_(bump_pointer_space_->GetMarkBitmap()),
+ moving_space_begin_(bump_pointer_space_->Begin()),
+ moving_space_end_(bump_pointer_space_->Limit()),
moving_to_space_fd_(kFdUnused),
moving_from_space_fd_(kFdUnused),
uffd_(kFdUnused),
@@ -384,7 +421,8 @@ MarkCompact::MarkCompact(Heap* heap)
uffd_minor_fault_supported_(false),
use_uffd_sigbus_(IsSigbusFeatureAvailable()),
minor_fault_initialized_(false),
- map_linear_alloc_shared_(false) {
+ map_linear_alloc_shared_(false),
+ clamp_info_map_status_(ClampInfoStatus::kClampInfoNotDone) {
if (kIsDebugBuild) {
updated_roots_.reset(new std::unordered_set<void*>());
}
@@ -422,18 +460,8 @@ MarkCompact::MarkCompact(Heap* heap)
if (UNLIKELY(!info_map_.IsValid())) {
LOG(FATAL) << "Failed to allocate concurrent mark-compact chunk-info vector: " << err_msg;
} else {
- uint8_t* p = info_map_.Begin();
- chunk_info_vec_ = reinterpret_cast<uint32_t*>(p);
- vector_length_ = chunk_info_vec_size;
-
- p += chunk_info_vec_size * sizeof(uint32_t);
- first_objs_non_moving_space_ = reinterpret_cast<ObjReference*>(p);
-
- p += nr_non_moving_pages * sizeof(ObjReference);
- first_objs_moving_space_ = reinterpret_cast<ObjReference*>(p);
-
- p += nr_moving_pages * sizeof(ObjReference);
- pre_compact_offset_moving_space_ = reinterpret_cast<uint32_t*>(p);
+ size_t total = InitializeInfoMap(info_map_.Begin(), moving_space_size);
+ DCHECK_EQ(total, info_map_.Size());
}
size_t moving_space_alignment = BestPageTableAlignment(moving_space_size);
@@ -561,6 +589,50 @@ void MarkCompact::AddLinearAllocSpaceData(uint8_t* begin, size_t len) {
is_shared);
}
+void MarkCompact::ClampGrowthLimit(size_t new_capacity) {
+ // From-space is the same size as moving-space in virtual memory.
+ // However, if it's in >4GB address space then we don't need to do it
+ // synchronously.
+#if defined(__LP64__)
+ constexpr bool kClampFromSpace = kObjPtrPoisoning;
+#else
+ constexpr bool kClampFromSpace = true;
+#endif
+ size_t old_capacity = bump_pointer_space_->Capacity();
+ new_capacity = bump_pointer_space_->ClampGrowthLimit(new_capacity);
+ if (new_capacity < old_capacity) {
+ CHECK(from_space_map_.IsValid());
+ if (kClampFromSpace) {
+ from_space_map_.SetSize(new_capacity);
+ }
+ // NOTE: We usually don't use shadow_to_space_map_ and therefore the condition will
+ // mostly be false.
+ if (shadow_to_space_map_.IsValid() && shadow_to_space_map_.Size() > new_capacity) {
+ shadow_to_space_map_.SetSize(new_capacity);
+ }
+ clamp_info_map_status_ = ClampInfoStatus::kClampInfoPending;
+ }
+ CHECK_EQ(moving_space_begin_, bump_pointer_space_->Begin());
+}
+
+void MarkCompact::MaybeClampGcStructures() {
+ size_t moving_space_size = bump_pointer_space_->Capacity();
+ DCHECK(thread_running_gc_ != nullptr);
+ if (UNLIKELY(clamp_info_map_status_ == ClampInfoStatus::kClampInfoPending)) {
+ CHECK(from_space_map_.IsValid());
+ if (from_space_map_.Size() > moving_space_size) {
+ from_space_map_.SetSize(moving_space_size);
+ }
+ // Bitmaps and other data structures
+ live_words_bitmap_->SetBitmapSize(moving_space_size);
+ size_t set_size = InitializeInfoMap(info_map_.Begin(), moving_space_size);
+ CHECK_LT(set_size, info_map_.Size());
+ info_map_.SetSize(set_size);
+
+ clamp_info_map_status_ = ClampInfoStatus::kClampInfoFinished;
+ }
+}
+
void MarkCompact::PrepareCardTableForMarking(bool clear_alloc_space_cards) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
accounting::CardTable* const card_table = heap_->GetCardTable();
@@ -656,6 +728,8 @@ void MarkCompact::InitializePhase() {
compaction_buffer_counter_.store(1, std::memory_order_relaxed);
from_space_slide_diff_ = from_space_begin_ - bump_pointer_space_->Begin();
black_allocations_begin_ = bump_pointer_space_->Limit();
+ CHECK_EQ(moving_space_begin_, bump_pointer_space_->Begin());
+ moving_space_end_ = bump_pointer_space_->Limit();
walk_super_class_cache_ = nullptr;
// TODO: Would it suffice to read it once in the constructor, which is called
// in zygote process?
@@ -861,7 +935,7 @@ void MarkCompact::InitNonMovingSpaceFirstObjects() {
DCHECK_LT(prev_obj, reinterpret_cast<mirror::Object*>(begin));
first_objs_non_moving_space_[page_idx].Assign(prev_obj);
mirror::Class* klass = prev_obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
- if (bump_pointer_space_->HasAddress(klass)) {
+ if (HasAddress(klass)) {
LOG(WARNING) << "found inter-page object " << prev_obj
<< " in non-moving space with klass " << klass
<< " in moving space";
@@ -879,7 +953,7 @@ void MarkCompact::InitNonMovingSpaceFirstObjects() {
}
if (prev_obj_end > begin) {
mirror::Class* klass = prev_obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
- if (bump_pointer_space_->HasAddress(klass)) {
+ if (HasAddress(klass)) {
LOG(WARNING) << "found inter-page object " << prev_obj
<< " in non-moving space with klass " << klass
<< " in moving space";
@@ -985,6 +1059,8 @@ void MarkCompact::PrepareForCompaction() {
post_compact_end_ = AlignUp(space_begin + total, kPageSize);
CHECK_EQ(post_compact_end_, space_begin + moving_first_objs_count_ * kPageSize);
black_objs_slide_diff_ = black_allocations_begin_ - post_compact_end_;
+ // We shouldn't be consuming more space after compaction than pre-compaction.
+ CHECK_GE(black_objs_slide_diff_, 0);
// How do we handle compaction of heap portion used for allocations after the
// marking-pause?
// All allocations after the marking-pause are considered black (reachable)
@@ -1271,9 +1347,8 @@ void MarkCompact::MarkingPause() {
// Align-up to page boundary so that black allocations happen from next page
// onwards. Also, it ensures that 'end' is aligned for card-table's
// ClearCardRange().
- black_allocations_begin_ = bump_pointer_space_->AlignEnd(thread_running_gc_, kPageSize);
- DCHECK(IsAligned<kAlignment>(black_allocations_begin_));
- black_allocations_begin_ = AlignUp(black_allocations_begin_, kPageSize);
+ black_allocations_begin_ = bump_pointer_space_->AlignEnd(thread_running_gc_, kPageSize, heap_);
+ DCHECK_ALIGNED_PARAM(black_allocations_begin_, kPageSize);
// Re-mark root set. Doesn't include thread-roots as they are already marked
// above.
@@ -1334,11 +1409,11 @@ void MarkCompact::Sweep(bool swap_bitmaps) {
DCHECK(mark_stack_->IsEmpty());
}
for (const auto& space : GetHeap()->GetContinuousSpaces()) {
- if (space->IsContinuousMemMapAllocSpace() && space != bump_pointer_space_) {
+ if (space->IsContinuousMemMapAllocSpace() && space != bump_pointer_space_ &&
+ !immune_spaces_.ContainsSpace(space)) {
space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
- TimingLogger::ScopedTiming split(
- alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace",
- GetTimings());
+ DCHECK(!alloc_space->IsZygoteSpace());
+ TimingLogger::ScopedTiming split("SweepMallocSpace", GetTimings());
RecordFree(alloc_space->Sweep(swap_bitmaps));
}
}
@@ -1389,7 +1464,12 @@ class MarkCompact::RefsUpdateVisitor {
mirror::Object* obj,
uint8_t* begin,
uint8_t* end)
- : collector_(collector), obj_(obj), begin_(begin), end_(end) {
+ : collector_(collector),
+ moving_space_begin_(collector->moving_space_begin_),
+ moving_space_end_(collector->moving_space_end_),
+ obj_(obj),
+ begin_(begin),
+ end_(end) {
DCHECK(!kCheckBegin || begin != nullptr);
DCHECK(!kCheckEnd || end != nullptr);
}
@@ -1403,7 +1483,7 @@ class MarkCompact::RefsUpdateVisitor {
update = (!kCheckBegin || ref >= begin_) && (!kCheckEnd || ref < end_);
}
if (update) {
- collector_->UpdateRef(obj_, offset);
+ collector_->UpdateRef(obj_, offset, moving_space_begin_, moving_space_end_);
}
}
@@ -1417,7 +1497,7 @@ class MarkCompact::RefsUpdateVisitor {
bool /*is_obj_array*/)
const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
- collector_->UpdateRef(obj_, offset);
+ collector_->UpdateRef(obj_, offset, moving_space_begin_, moving_space_end_);
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
@@ -1431,11 +1511,13 @@ class MarkCompact::RefsUpdateVisitor {
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
REQUIRES_SHARED(Locks::mutator_lock_) {
- collector_->UpdateRoot(root);
+ collector_->UpdateRoot(root, moving_space_begin_, moving_space_end_);
}
private:
MarkCompact* const collector_;
+ uint8_t* const moving_space_begin_;
+ uint8_t* const moving_space_end_;
mirror::Object* const obj_;
uint8_t* const begin_;
uint8_t* const end_;
@@ -1457,7 +1539,7 @@ void MarkCompact::VerifyObject(mirror::Object* ref, Callback& callback) const {
mirror::Class* pre_compact_klass = ref->GetClass<kVerifyNone, kWithoutReadBarrier>();
mirror::Class* klass_klass = klass->GetClass<kVerifyNone, kWithFromSpaceBarrier>();
mirror::Class* klass_klass_klass = klass_klass->GetClass<kVerifyNone, kWithFromSpaceBarrier>();
- if (bump_pointer_space_->HasAddress(pre_compact_klass) &&
+ if (HasAddress(pre_compact_klass) &&
reinterpret_cast<uint8_t*>(pre_compact_klass) < black_allocations_begin_) {
CHECK(moving_space_bitmap_->Test(pre_compact_klass))
<< "ref=" << ref
@@ -2167,7 +2249,7 @@ void MarkCompact::UpdateClassAfterObjMap() {
? super_class_iter->second
: pair.first;
if (std::less<mirror::Object*>{}(pair.second.AsMirrorPtr(), key.AsMirrorPtr()) &&
- bump_pointer_space_->HasAddress(key.AsMirrorPtr())) {
+ HasAddress(key.AsMirrorPtr())) {
auto [ret_iter, success] = class_after_obj_ordered_map_.try_emplace(key, pair.second);
// It could fail only if the class 'key' has objects of its own, which are lower in
// address order, as well of some of its derived class. In this case
@@ -2315,7 +2397,7 @@ void MarkCompact::UpdateNonMovingPage(mirror::Object* first, uint8_t* page) {
}
void MarkCompact::UpdateNonMovingSpace() {
- TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ TimingLogger::ScopedTiming t("(Paused)UpdateNonMovingSpace", GetTimings());
// Iterating in reverse ensures that the class pointer in objects which span
// across more than one page gets updated in the end. This is necessary for
// VisitRefsForCompaction() to work correctly.
@@ -2350,6 +2432,9 @@ void MarkCompact::UpdateMovingSpaceBlackAllocations() {
DCHECK_LE(begin, black_allocs);
size_t consumed_blocks_count = 0;
size_t first_block_size;
+ // Needed only for debug at the end of the function. Hopefully compiler will
+ // eliminate it otherwise.
+ size_t num_blocks = 0;
// Get the list of all blocks allocated in the bump-pointer space.
std::vector<size_t>* block_sizes = bump_pointer_space_->GetBlockSizes(thread_running_gc_,
&first_block_size);
@@ -2360,6 +2445,7 @@ void MarkCompact::UpdateMovingSpaceBlackAllocations() {
uint32_t remaining_chunk_size = 0;
uint32_t first_chunk_size = 0;
mirror::Object* first_obj = nullptr;
+ num_blocks = block_sizes->size();
for (size_t block_size : *block_sizes) {
block_end += block_size;
// Skip the blocks that are prior to the black allocations. These will be
@@ -2464,6 +2550,24 @@ void MarkCompact::UpdateMovingSpaceBlackAllocations() {
bump_pointer_space_->SetBlockSizes(thread_running_gc_,
post_compact_end_ - begin,
consumed_blocks_count);
+ if (kIsDebugBuild) {
+ size_t moving_space_size = bump_pointer_space_->Size();
+ size_t los_size = 0;
+ if (heap_->GetLargeObjectsSpace()) {
+ los_size = heap_->GetLargeObjectsSpace()->GetBytesAllocated();
+ }
+ // The moving-space size is already updated to post-compact size in SetBlockSizes above.
+ // Also, bytes-allocated has already been adjusted with large-object space' freed-bytes
+ // in Sweep(), but not with moving-space freed-bytes.
+ CHECK_GE(heap_->GetBytesAllocated() - black_objs_slide_diff_, moving_space_size + los_size)
+ << " moving-space size:" << moving_space_size
+ << " moving-space bytes-freed:" << black_objs_slide_diff_
+ << " large-object-space size:" << los_size
+ << " large-object-space bytes-freed:" << GetCurrentIteration()->GetFreedLargeObjectBytes()
+ << " num-tlabs-merged:" << consumed_blocks_count
+ << " main-block-size:" << (post_compact_end_ - begin)
+ << " total-tlabs-moving-space:" << num_blocks;
+ }
}
void MarkCompact::UpdateNonMovingSpaceBlackAllocations() {
@@ -2502,21 +2606,15 @@ void MarkCompact::UpdateNonMovingSpaceBlackAllocations() {
class MarkCompact::ImmuneSpaceUpdateObjVisitor {
public:
- ImmuneSpaceUpdateObjVisitor(MarkCompact* collector, bool visit_native_roots)
- : collector_(collector), visit_native_roots_(visit_native_roots) {}
+ explicit ImmuneSpaceUpdateObjVisitor(MarkCompact* collector) : collector_(collector) {}
- ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES(Locks::mutator_lock_) {
+ void operator()(mirror::Object* obj) const ALWAYS_INLINE REQUIRES(Locks::mutator_lock_) {
RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false> visitor(collector_,
obj,
/*begin_*/nullptr,
/*end_*/nullptr);
- if (visit_native_roots_) {
- obj->VisitRefsForCompaction</*kFetchObjSize*/ false, /*kVisitNativeRoots*/ true>(
- visitor, MemberOffset(0), MemberOffset(-1));
- } else {
- obj->VisitRefsForCompaction</*kFetchObjSize*/ false>(
- visitor, MemberOffset(0), MemberOffset(-1));
- }
+ obj->VisitRefsForCompaction</*kFetchObjSize*/ false>(
+ visitor, MemberOffset(0), MemberOffset(-1));
}
static void Callback(mirror::Object* obj, void* arg) REQUIRES(Locks::mutator_lock_) {
@@ -2525,43 +2623,55 @@ class MarkCompact::ImmuneSpaceUpdateObjVisitor {
private:
MarkCompact* const collector_;
- const bool visit_native_roots_;
};
class MarkCompact::ClassLoaderRootsUpdater : public ClassLoaderVisitor {
public:
- explicit ClassLoaderRootsUpdater(MarkCompact* collector) : collector_(collector) {}
+ explicit ClassLoaderRootsUpdater(MarkCompact* collector)
+ : collector_(collector),
+ moving_space_begin_(collector->moving_space_begin_),
+ moving_space_end_(collector->moving_space_end_) {}
void Visit(ObjPtr<mirror::ClassLoader> class_loader) override
REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
- class_table->VisitRoots(*this);
+ // Classes are updated concurrently.
+ class_table->VisitRoots(*this, /*skip_classes=*/true);
}
}
- void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const ALWAYS_INLINE
REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
- void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const ALWAYS_INLINE
REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
- collector_->VisitRoots(&root, 1, RootInfo(RootType::kRootVMInternal));
+ collector_->UpdateRoot(
+ root, moving_space_begin_, moving_space_end_, RootInfo(RootType::kRootVMInternal));
}
private:
MarkCompact* collector_;
+ uint8_t* const moving_space_begin_;
+ uint8_t* const moving_space_end_;
};
class MarkCompact::LinearAllocPageUpdater {
public:
- explicit LinearAllocPageUpdater(MarkCompact* collector) : collector_(collector) {}
-
- void operator()(uint8_t* page_begin, uint8_t* first_obj) ALWAYS_INLINE
+ explicit LinearAllocPageUpdater(MarkCompact* collector)
+ : collector_(collector),
+ moving_space_begin_(collector->moving_space_begin_),
+ moving_space_end_(collector->moving_space_end_),
+ last_page_touched_(false) {}
+
+ // Update a page in multi-object arena.
+ void MultiObjectArena(uint8_t* page_begin, uint8_t* first_obj)
REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(first_obj != nullptr);
DCHECK_ALIGNED(page_begin, kPageSize);
uint8_t* page_end = page_begin + kPageSize;
uint32_t obj_size;
@@ -2591,6 +2701,28 @@ class MarkCompact::LinearAllocPageUpdater {
last_page_touched_ = true;
}
+ // This version is only used for cases where the entire page is filled with
+ // GC-roots. For example, class-table and intern-table.
+ void SingleObjectArena(uint8_t* page_begin, size_t page_size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ static_assert(sizeof(uint32_t) == sizeof(GcRoot<mirror::Object>));
+ DCHECK_ALIGNED(page_begin, kAlignment);
+ // Least significant bits are used by class-table.
+ static constexpr uint32_t kMask = kObjectAlignment - 1;
+ size_t num_roots = page_size / sizeof(GcRoot<mirror::Object>);
+ uint32_t* root_ptr = reinterpret_cast<uint32_t*>(page_begin);
+ for (size_t i = 0; i < num_roots; root_ptr++, i++) {
+ uint32_t word = *root_ptr;
+ if (word != 0) {
+ uint32_t lsbs = word & kMask;
+ word &= ~kMask;
+ VisitRootIfNonNull(reinterpret_cast<mirror::CompressedReference<mirror::Object>*>(&word));
+ *root_ptr = word | lsbs;
+ last_page_touched_ = true;
+ }
+ }
+ }
+
bool WasLastPageTouched() const { return last_page_touched_; }
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
@@ -2604,12 +2736,13 @@ class MarkCompact::LinearAllocPageUpdater {
ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* old_ref = root->AsMirrorPtr();
DCHECK_NE(old_ref, nullptr);
- if (collector_->live_words_bitmap_->HasAddress(old_ref)) {
+ if (MarkCompact::HasAddress(old_ref, moving_space_begin_, moving_space_end_)) {
mirror::Object* new_ref = old_ref;
if (reinterpret_cast<uint8_t*>(old_ref) >= collector_->black_allocations_begin_) {
new_ref = collector_->PostCompactBlackObjAddr(old_ref);
} else if (collector_->live_words_bitmap_->Test(old_ref)) {
- DCHECK(collector_->moving_space_bitmap_->Test(old_ref)) << old_ref;
+ DCHECK(collector_->moving_space_bitmap_->Test(old_ref))
+ << "ref:" << old_ref << " root:" << root;
new_ref = collector_->PostCompactOldObjAddr(old_ref);
}
if (old_ref != new_ref) {
@@ -2622,7 +2755,8 @@ class MarkCompact::LinearAllocPageUpdater {
void VisitObject(LinearAllocKind kind,
void* obj,
uint8_t* start_boundary,
- uint8_t* end_boundary) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint8_t* end_boundary) const ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
switch (kind) {
case LinearAllocKind::kNoGCRoots:
break;
@@ -2673,10 +2807,38 @@ class MarkCompact::LinearAllocPageUpdater {
}
MarkCompact* const collector_;
+ // Cache to speed up checking if GC-root is in moving space or not.
+ uint8_t* const moving_space_begin_;
+ uint8_t* const moving_space_end_;
// Whether the last page was touched or not.
- bool last_page_touched_;
+ bool last_page_touched_ = false;
};
+void MarkCompact::UpdateClassTableClasses(Runtime* runtime, bool immune_class_table_only) {
+ // If the process is debuggable then redefinition is allowed, which may mean
+ // pre-zygote-fork class-tables may have pointer to class in moving-space.
+ // So visit classes from class-sets that are not in linear-alloc arena-pool.
+ if (UNLIKELY(runtime->IsJavaDebuggableAtInit())) {
+ ClassLinker* linker = runtime->GetClassLinker();
+ ClassLoaderRootsUpdater updater(this);
+ GcVisitedArenaPool* pool = static_cast<GcVisitedArenaPool*>(runtime->GetLinearAllocArenaPool());
+ auto cond = [this, pool, immune_class_table_only](ClassTable::ClassSet& set) -> bool {
+ if (!set.empty()) {
+ return immune_class_table_only ?
+ immune_spaces_.ContainsObject(reinterpret_cast<mirror::Object*>(&*set.begin())) :
+ !pool->Contains(reinterpret_cast<void*>(&*set.begin()));
+ }
+ return false;
+ };
+ linker->VisitClassTables([cond, &updater](ClassTable* table)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ table->VisitClassesIfConditionMet(cond, updater);
+ });
+ ReaderMutexLock rmu(thread_running_gc_, *Locks::classlinker_classes_lock_);
+ linker->GetBootClassTable()->VisitClassesIfConditionMet(cond, updater);
+ }
+}
+
void MarkCompact::CompactionPause() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
Runtime* runtime = Runtime::Current();
@@ -2730,65 +2892,6 @@ void MarkCompact::CompactionPause() {
heap_->GetReferenceProcessor()->UpdateRoots(this);
}
{
- TimingLogger::ScopedTiming t2("(Paused)UpdateClassLoaderRoots", GetTimings());
- ReaderMutexLock rmu(thread_running_gc_, *Locks::classlinker_classes_lock_);
- {
- ClassLoaderRootsUpdater updater(this);
- runtime->GetClassLinker()->VisitClassLoaders(&updater);
- }
- }
-
- bool has_zygote_space = heap_->HasZygoteSpace();
- // TODO: Find out why it's not sufficient to visit native roots of immune
- // spaces, and why all the pre-zygote fork arenas have to be linearly updated.
- // Is it possible that some native root starts getting pointed to by some object
- // in moving space after fork? Or are we missing a write-barrier somewhere
- // when a native root is updated?
- GcVisitedArenaPool* arena_pool =
- static_cast<GcVisitedArenaPool*>(runtime->GetLinearAllocArenaPool());
- if (uffd_ == kFallbackMode || (!has_zygote_space && runtime->IsZygote())) {
- // Besides fallback-mode, visit linear-alloc space in the pause for zygote
- // processes prior to first fork (that's when zygote space gets created).
- if (kIsDebugBuild && IsValidFd(uffd_)) {
- // All arenas allocated so far are expected to be pre-zygote fork.
- arena_pool->ForEachAllocatedArena(
- [](const TrackedArena& arena)
- REQUIRES_SHARED(Locks::mutator_lock_) { CHECK(arena.IsPreZygoteForkArena()); });
- }
- LinearAllocPageUpdater updater(this);
- arena_pool->VisitRoots(updater);
- } else {
- // Clear the flag as we care about this only if arenas are freed during
- // concurrent compaction.
- arena_pool->ClearArenasFreed();
- arena_pool->ForEachAllocatedArena(
- [this](const TrackedArena& arena) REQUIRES_SHARED(Locks::mutator_lock_) {
- // The pre-zygote fork arenas are not visited concurrently in the
- // zygote children processes. The native roots of the dirty objects
- // are visited during immune space visit below.
- if (!arena.IsPreZygoteForkArena()) {
- uint8_t* last_byte = arena.GetLastUsedByte();
- CHECK(linear_alloc_arenas_.insert({&arena, last_byte}).second);
- } else {
- LinearAllocPageUpdater updater(this);
- arena.VisitRoots(updater);
- }
- });
- }
-
- SweepSystemWeaks(thread_running_gc_, runtime, /*paused*/ true);
-
- {
- TimingLogger::ScopedTiming t2("(Paused)UpdateConcurrentRoots", GetTimings());
- runtime->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
- }
- {
- // TODO: don't visit the transaction roots if it's not active.
- TimingLogger::ScopedTiming t2("(Paused)UpdateNonThreadRoots", GetTimings());
- runtime->VisitNonThreadRoots(this);
- }
-
- {
// TODO: Immune space updation has to happen either before or after
// remapping pre-compact pages to from-space. And depending on when it's
// done, we have to invoke VisitRefsForCompaction() with or without
@@ -2803,7 +2906,7 @@ void MarkCompact::CompactionPause() {
// place and that the classes/dex-caches in immune-spaces may have allocations
// (ArtMethod/ArtField arrays, dex-cache array, etc.) in the
// non-userfaultfd visited private-anonymous mappings. Visit them here.
- ImmuneSpaceUpdateObjVisitor visitor(this, /*visit_native_roots=*/false);
+ ImmuneSpaceUpdateObjVisitor visitor(this);
if (table != nullptr) {
table->ProcessCards();
table->VisitObjects(ImmuneSpaceUpdateObjVisitor::Callback, &visitor);
@@ -2819,11 +2922,98 @@ void MarkCompact::CompactionPause() {
}
}
- if (use_uffd_sigbus_) {
- // Release order wrt to mutator threads' SIGBUS handler load.
- sigbus_in_progress_count_.store(0, std::memory_order_release);
+ {
+ TimingLogger::ScopedTiming t2("(Paused)UpdateRoots", GetTimings());
+ runtime->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
+ runtime->VisitNonThreadRoots(this);
+ {
+ ClassLinker* linker = runtime->GetClassLinker();
+ ClassLoaderRootsUpdater updater(this);
+ ReaderMutexLock rmu(thread_running_gc_, *Locks::classlinker_classes_lock_);
+ linker->VisitClassLoaders(&updater);
+ linker->GetBootClassTable()->VisitRoots(updater, /*skip_classes=*/true);
+ }
+ SweepSystemWeaks(thread_running_gc_, runtime, /*paused=*/true);
+
+ bool has_zygote_space = heap_->HasZygoteSpace();
+ GcVisitedArenaPool* arena_pool =
+ static_cast<GcVisitedArenaPool*>(runtime->GetLinearAllocArenaPool());
+ // Update immune/pre-zygote class-tables in case class redefinition took
+ // place. pre-zygote class-tables that are not in immune spaces are updated
+ // below if we are in fallback-mode or if there is no zygote space. So in
+ // that case only visit class-tables that are there in immune-spaces.
+ UpdateClassTableClasses(runtime, uffd_ == kFallbackMode || !has_zygote_space);
+
+ // Acquire arena-pool's lock, which should be released after the pool is
+ // userfaultfd registered. This is to ensure that no new arenas are
+ // allocated and used in between. Since they will not be captured in
+ // linear_alloc_arenas_ below, we will miss updating their pages. The same
+ // reason also applies to new allocations within the existing arena which
+ // may change last_byte.
+ // Since we are in a STW pause, this shouldn't happen anyways, but holding
+ // the lock confirms it.
+ // TODO (b/305779657): Replace with ExclusiveTryLock() and assert that it
+ // doesn't fail once it is available for ReaderWriterMutex.
+ WriterMutexLock pool_wmu(thread_running_gc_, arena_pool->GetLock());
+
+ // TODO: Find out why it's not sufficient to visit native roots of immune
+ // spaces, and why all the pre-zygote fork arenas have to be linearly updated.
+ // Is it possible that some native root starts getting pointed to by some object
+ // in moving space after fork? Or are we missing a write-barrier somewhere
+ // when a native root is updated?
+ auto arena_visitor = [this](uint8_t* page_begin, uint8_t* first_obj, size_t page_size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ LinearAllocPageUpdater updater(this);
+ if (first_obj != nullptr) {
+ updater.MultiObjectArena(page_begin, first_obj);
+ } else {
+ updater.SingleObjectArena(page_begin, page_size);
+ }
+ };
+ if (uffd_ == kFallbackMode || (!has_zygote_space && runtime->IsZygote())) {
+ // Besides fallback-mode, visit linear-alloc space in the pause for zygote
+ // processes prior to first fork (that's when zygote space gets created).
+ if (kIsDebugBuild && IsValidFd(uffd_)) {
+ // All arenas allocated so far are expected to be pre-zygote fork.
+ arena_pool->ForEachAllocatedArena(
+ [](const TrackedArena& arena)
+ REQUIRES_SHARED(Locks::mutator_lock_) { CHECK(arena.IsPreZygoteForkArena()); });
+ }
+ arena_pool->VisitRoots(arena_visitor);
+ } else {
+ // Inform the arena-pool that compaction is going on. So the TrackedArena
+ // objects corresponding to the arenas that are freed shouldn't be deleted
+ // immediately. We will do that in FinishPhase(). This is to avoid ABA
+ // problem.
+ arena_pool->DeferArenaFreeing();
+ arena_pool->ForEachAllocatedArena(
+ [this, arena_visitor, has_zygote_space](const TrackedArena& arena)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ // The pre-zygote fork arenas are not visited concurrently in the
+ // zygote children processes. The native roots of the dirty objects
+ // are visited during immune space visit below.
+ if (!arena.IsPreZygoteForkArena()) {
+ uint8_t* last_byte = arena.GetLastUsedByte();
+ auto ret = linear_alloc_arenas_.insert({&arena, last_byte});
+ CHECK(ret.second);
+ } else if (!arena.IsSingleObjectArena() || !has_zygote_space) {
+ // Pre-zygote class-table and intern-table don't need to be updated.
+ // TODO: Explore the possibility of using /proc/self/pagemap to
+ // fetch which pages in these arenas are private-dirty and then only
+ // visit those pages. To optimize it further, we can keep all
+ // pre-zygote arenas in a single memory range so that just one read
+ // from pagemap is sufficient.
+ arena.VisitRoots(arena_visitor);
+ }
+ });
+ }
+ if (use_uffd_sigbus_) {
+ // Release order wrt to mutator threads' SIGBUS handler load.
+ sigbus_in_progress_count_.store(0, std::memory_order_release);
+ }
+ KernelPreparation();
}
- KernelPreparation();
+
UpdateNonMovingSpace();
// fallback mode
if (uffd_ == kFallbackMode) {
@@ -2889,7 +3079,7 @@ void MarkCompact::KernelPrepareRangeForUffd(uint8_t* to_addr,
}
void MarkCompact::KernelPreparation() {
- TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ TimingLogger::ScopedTiming t("(Paused)KernelPreparation", GetTimings());
uint8_t* moving_space_begin = bump_pointer_space_->Begin();
size_t moving_space_size = bump_pointer_space_->Capacity();
int mode = kCopyMode;
@@ -3017,7 +3207,7 @@ void MarkCompact::ConcurrentCompaction(uint8_t* buf) {
break;
}
uint8_t* fault_page = AlignDown(fault_addr, kPageSize);
- if (bump_pointer_space_->HasAddress(reinterpret_cast<mirror::Object*>(fault_addr))) {
+ if (HasAddress(reinterpret_cast<mirror::Object*>(fault_addr))) {
ConcurrentlyProcessMovingPage<kMode>(fault_page, buf, nr_moving_space_used_pages);
} else if (minor_fault_initialized_) {
ConcurrentlyProcessLinearAllocPage<kMinorFaultMode>(
@@ -3072,7 +3262,7 @@ bool MarkCompact::SigbusHandler(siginfo_t* info) {
ScopedInProgressCount spc(this);
uint8_t* fault_page = AlignDown(reinterpret_cast<uint8_t*>(info->si_addr), kPageSize);
if (!spc.IsCompactionDone()) {
- if (bump_pointer_space_->HasAddress(reinterpret_cast<mirror::Object*>(fault_page))) {
+ if (HasAddress(reinterpret_cast<mirror::Object*>(fault_page))) {
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertSharedHeld(self);
size_t nr_moving_space_used_pages = moving_first_objs_count_ + black_page_count_;
@@ -3103,7 +3293,7 @@ bool MarkCompact::SigbusHandler(siginfo_t* info) {
// We may spuriously get SIGBUS fault, which was initiated before the
// compaction was finished, but ends up here. In that case, if the fault
// address is valid then consider it handled.
- return bump_pointer_space_->HasAddress(reinterpret_cast<mirror::Object*>(fault_page)) ||
+ return HasAddress(reinterpret_cast<mirror::Object*>(fault_page)) ||
linear_alloc_spaces_data_.end() !=
std::find_if(linear_alloc_spaces_data_.begin(),
linear_alloc_spaces_data_.end(),
@@ -3316,14 +3506,22 @@ void MarkCompact::ConcurrentlyProcessLinearAllocPage(uint8_t* fault_page, bool i
arena_iter = arena_iter != linear_alloc_arenas_.begin() ? std::prev(arena_iter)
: linear_alloc_arenas_.end();
}
- if (arena_iter == linear_alloc_arenas_.end() || arena_iter->second <= fault_page) {
+ // Unlike ProcessLinearAlloc(), we don't need to hold arena-pool's lock here
+ // because a thread trying to access the page and as a result causing this
+ // userfault confirms that nobody can delete the corresponding arena and
+ // release its pages.
+ // NOTE: We may have some memory range be recycled several times during a
+ // compaction cycle, thereby potentially causing userfault on the same page
+ // several times. That's not a problem as all of them (except for possibly the
+ // first one) would require us mapping a zero-page, which we do without updating
+ // the 'state_arr'.
+ if (arena_iter == linear_alloc_arenas_.end() ||
+ arena_iter->first->IsWaitingForDeletion() ||
+ arena_iter->second <= fault_page) {
// Fault page isn't in any of the arenas that existed before we started
// compaction. So map zeropage and return.
ZeropageIoctl(fault_page, /*tolerate_eexist=*/true, /*tolerate_enoent=*/false);
} else {
- // fault_page should always belong to some arena.
- DCHECK(arena_iter != linear_alloc_arenas_.end())
- << "fault_page:" << static_cast<void*>(fault_page) << "is_minor_fault:" << is_minor_fault;
// Find the linear-alloc space containing fault-page
LinearAllocSpaceData* space_data = nullptr;
for (auto& data : linear_alloc_spaces_data_) {
@@ -3348,10 +3546,15 @@ void MarkCompact::ConcurrentlyProcessLinearAllocPage(uint8_t* fault_page, bool i
if (state_arr[page_idx].compare_exchange_strong(
state, PageState::kProcessingAndMapping, std::memory_order_acquire)) {
if (kMode == kCopyMode || is_minor_fault) {
- uint8_t* first_obj = arena_iter->first->GetFirstObject(fault_page);
- DCHECK_NE(first_obj, nullptr);
LinearAllocPageUpdater updater(this);
- updater(fault_page + diff, first_obj + diff);
+ uint8_t* first_obj = arena_iter->first->GetFirstObject(fault_page);
+ // null first_obj indicates that it's a page from arena for
+ // intern-table/class-table. So first object isn't required.
+ if (first_obj != nullptr) {
+ updater.MultiObjectArena(fault_page + diff, first_obj + diff);
+ } else {
+ updater.SingleObjectArena(fault_page + diff, kPageSize);
+ }
if (kMode == kCopyMode) {
MapUpdatedLinearAllocPage(fault_page,
fault_page + diff,
@@ -3420,6 +3623,7 @@ void MarkCompact::ConcurrentlyProcessLinearAllocPage(uint8_t* fault_page, bool i
void MarkCompact::ProcessLinearAlloc() {
GcVisitedArenaPool* arena_pool =
static_cast<GcVisitedArenaPool*>(Runtime::Current()->GetLinearAllocArenaPool());
+ DCHECK_EQ(thread_running_gc_, Thread::Current());
for (auto& pair : linear_alloc_arenas_) {
const TrackedArena* arena = pair.first;
size_t arena_size;
@@ -3427,12 +3631,15 @@ void MarkCompact::ProcessLinearAlloc() {
ptrdiff_t diff;
bool others_processing;
{
- // Acquire arena-pool's lock so that the arena being worked cannot be
- // deallocated at the same time.
- std::lock_guard<std::mutex> lock(arena_pool->GetLock());
+ // Acquire arena-pool's lock (in shared-mode) so that the arena being updated
+ // does not get deleted at the same time. If this critical section is too
+ // long and impacts mutator response time, then we get rid of this lock by
+ // holding onto memory ranges of all deleted (since compaction pause)
+ // arenas until completion finishes.
+ ReaderMutexLock rmu(thread_running_gc_, arena_pool->GetLock());
// If any arenas were freed since compaction pause then skip them from
// visiting.
- if (arena_pool->AreArenasFreed() && !arena_pool->FindAllocatedArena(arena)) {
+ if (arena->IsWaitingForDeletion()) {
continue;
}
uint8_t* last_byte = pair.second;
@@ -3448,11 +3655,12 @@ void MarkCompact::ProcessLinearAlloc() {
break;
}
}
- DCHECK_NE(space_data, nullptr);
+ CHECK_NE(space_data, nullptr);
diff = space_data->shadow_.Begin() - space_data->begin_;
auto visitor = [space_data, last_byte, diff, this, &others_processing](
uint8_t* page_begin,
- uint8_t* first_obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ uint8_t* first_obj,
+ size_t page_size) REQUIRES_SHARED(Locks::mutator_lock_) {
// No need to process pages past last_byte as they already have updated
// gc-roots, if any.
if (page_begin >= last_byte) {
@@ -3471,7 +3679,14 @@ void MarkCompact::ProcessLinearAlloc() {
// reason, we used 'release' order for changing the state to 'processed'.
if (state_arr[page_idx].compare_exchange_strong(
expected_state, desired_state, std::memory_order_acquire)) {
- updater(page_begin + diff, first_obj + diff);
+ // null first_obj indicates that it's a page from arena for
+ // intern-table/class-table. So first object isn't required.
+ if (first_obj != nullptr) {
+ updater.MultiObjectArena(page_begin + diff, first_obj + diff);
+ } else {
+ DCHECK_EQ(page_size, kPageSize);
+ updater.SingleObjectArena(page_begin + diff, page_size);
+ }
expected_state = PageState::kProcessing;
if (!minor_fault_initialized_) {
MapUpdatedLinearAllocPage(
@@ -3922,6 +4137,7 @@ void MarkCompact::MarkingPhase() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
DCHECK_EQ(thread_running_gc_, Thread::Current());
WriterMutexLock mu(thread_running_gc_, *Locks::heap_bitmap_lock_);
+ MaybeClampGcStructures();
PrepareCardTableForMarking(/*clear_alloc_space_cards*/ true);
MarkZygoteLargeObjects();
MarkRoots(
@@ -3958,15 +4174,13 @@ class MarkCompact::RefFieldsVisitor {
mark_compact_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
}
- void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
- REQUIRES(Locks::heap_bitmap_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const ALWAYS_INLINE
+ REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
mark_compact_->DelayReferenceReferent(klass, ref);
}
- void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- REQUIRES(Locks::heap_bitmap_lock_)
- REQUIRES_SHARED(Locks::mutator_lock_) {
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const ALWAYS_INLINE
+ REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
@@ -4028,7 +4242,7 @@ void MarkCompact::ScanObject(mirror::Object* obj) {
RefFieldsVisitor visitor(this);
DCHECK(IsMarked(obj)) << "Scanning marked object " << obj << "\n" << heap_->DumpSpaces();
- if (kUpdateLiveWords && moving_space_bitmap_->HasAddress(obj)) {
+ if (kUpdateLiveWords && HasAddress(obj)) {
UpdateLivenessInfo(obj, obj_size);
}
obj->VisitReferences(visitor, visitor);
@@ -4078,7 +4292,7 @@ inline bool MarkCompact::MarkObjectNonNullNoPush(mirror::Object* obj,
MemberOffset offset) {
// We expect most of the referenes to be in bump-pointer space, so try that
// first to keep the cost of this function minimal.
- if (LIKELY(moving_space_bitmap_->HasAddress(obj))) {
+ if (LIKELY(HasAddress(obj))) {
return kParallel ? !moving_space_bitmap_->AtomicTestAndSet(obj)
: !moving_space_bitmap_->Set(obj);
} else if (non_moving_space_bitmap_->HasAddress(obj)) {
@@ -4135,8 +4349,10 @@ void MarkCompact::VisitRoots(mirror::Object*** roots,
size_t count,
const RootInfo& info) {
if (compacting_) {
+ uint8_t* moving_space_begin = moving_space_begin_;
+ uint8_t* moving_space_end = moving_space_end_;
for (size_t i = 0; i < count; ++i) {
- UpdateRoot(roots[i], info);
+ UpdateRoot(roots[i], moving_space_begin, moving_space_end, info);
}
} else {
for (size_t i = 0; i < count; ++i) {
@@ -4150,8 +4366,10 @@ void MarkCompact::VisitRoots(mirror::CompressedReference<mirror::Object>** roots
const RootInfo& info) {
// TODO: do we need to check if the root is null or not?
if (compacting_) {
+ uint8_t* moving_space_begin = moving_space_begin_;
+ uint8_t* moving_space_end = moving_space_end_;
for (size_t i = 0; i < count; ++i) {
- UpdateRoot(roots[i], info);
+ UpdateRoot(roots[i], moving_space_begin, moving_space_end, info);
}
} else {
for (size_t i = 0; i < count; ++i) {
@@ -4161,7 +4379,7 @@ void MarkCompact::VisitRoots(mirror::CompressedReference<mirror::Object>** roots
}
mirror::Object* MarkCompact::IsMarked(mirror::Object* obj) {
- if (moving_space_bitmap_->HasAddress(obj)) {
+ if (HasAddress(obj)) {
const bool is_black = reinterpret_cast<uint8_t*>(obj) >= black_allocations_begin_;
if (compacting_) {
if (is_black) {
@@ -4272,6 +4490,9 @@ void MarkCompact::FinishPhase() {
DCHECK_EQ(fstat(moving_to_space_fd_, &buf), 0) << "fstat failed: " << strerror(errno);
DCHECK_EQ(buf.st_blocks, 0u);
}
+ GcVisitedArenaPool* arena_pool =
+ static_cast<GcVisitedArenaPool*>(Runtime::Current()->GetLinearAllocArenaPool());
+ arena_pool->DeleteUnusedArenas();
}
} // namespace collector
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 3f16d06825..1ecb49ae52 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -76,6 +76,7 @@ class MarkCompact final : public GarbageCollector {
void RunPhases() override REQUIRES(!Locks::mutator_lock_, !lock_);
+ void ClampGrowthLimit(size_t new_capacity) REQUIRES(Locks::heap_bitmap_lock_);
// Updated before (or in) pre-compaction pause and is accessed only in the
// pause or during concurrent compaction. The flag is reset in next GC cycle's
// InitializePhase(). Therefore, it's safe to update without any memory ordering.
@@ -135,7 +136,7 @@ class MarkCompact final : public GarbageCollector {
mirror::Object* GetFromSpaceAddrFromBarrier(mirror::Object* old_ref) {
CHECK(compacting_);
- if (live_words_bitmap_->HasAddress(old_ref)) {
+ if (HasAddress(old_ref)) {
return GetFromSpaceAddr(old_ref);
}
return old_ref;
@@ -166,6 +167,13 @@ class MarkCompact final : public GarbageCollector {
kProcessedAndMapped = 6 // Processed and mapped. For SIGBUS.
};
+ // Different heap clamping states.
+ enum class ClampInfoStatus : uint8_t {
+ kClampInfoNotDone,
+ kClampInfoPending,
+ kClampInfoFinished
+ };
+
private:
using ObjReference = mirror::CompressedReference<mirror::Object>;
// Number of bits (live-words) covered by a single chunk-info (below)
@@ -191,6 +199,7 @@ class MarkCompact final : public GarbageCollector {
static constexpr uint32_t kBitmapWordsPerVectorWord =
kBitsPerVectorWord / Bitmap::kBitsPerBitmapWord;
static_assert(IsPowerOfTwo(kBitmapWordsPerVectorWord));
+ using MemRangeBitmap::SetBitmapSize;
static LiveWordsBitmap* Create(uintptr_t begin, uintptr_t end);
// Return offset (within the indexed chunk-info) of the nth live word.
@@ -231,11 +240,19 @@ class MarkCompact final : public GarbageCollector {
}
};
+ static bool HasAddress(mirror::Object* obj, uint8_t* begin, uint8_t* end) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(obj);
+ return ptr >= begin && ptr < end;
+ }
+
+ bool HasAddress(mirror::Object* obj) const {
+ return HasAddress(obj, moving_space_begin_, moving_space_end_);
+ }
// For a given object address in pre-compact space, return the corresponding
// address in the from-space, where heap pages are relocated in the compaction
// pause.
mirror::Object* GetFromSpaceAddr(mirror::Object* obj) const {
- DCHECK(live_words_bitmap_->HasAddress(obj)) << " obj=" << obj;
+ DCHECK(HasAddress(obj)) << " obj=" << obj;
return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(obj)
+ from_space_slide_diff_);
}
@@ -257,9 +274,11 @@ class MarkCompact final : public GarbageCollector {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
// Update the reference at given offset in the given object with post-compact
- // address.
- ALWAYS_INLINE void UpdateRef(mirror::Object* obj, MemberOffset offset)
- REQUIRES_SHARED(Locks::mutator_lock_);
+ // address. [begin, end) is moving-space range.
+ ALWAYS_INLINE void UpdateRef(mirror::Object* obj,
+ MemberOffset offset,
+ uint8_t* begin,
+ uint8_t* end) REQUIRES_SHARED(Locks::mutator_lock_);
// Verify that the gc-root is updated only once. Returns false if the update
// shouldn't be done.
@@ -267,16 +286,23 @@ class MarkCompact final : public GarbageCollector {
mirror::Object* old_ref,
const RootInfo& info)
REQUIRES_SHARED(Locks::mutator_lock_);
- // Update the given root with post-compact address.
+ // Update the given root with post-compact address. [begin, end) is
+ // moving-space range.
ALWAYS_INLINE void UpdateRoot(mirror::CompressedReference<mirror::Object>* root,
+ uint8_t* begin,
+ uint8_t* end,
const RootInfo& info = RootInfo(RootType::kRootUnknown))
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void UpdateRoot(mirror::Object** root,
+ uint8_t* begin,
+ uint8_t* end,
const RootInfo& info = RootInfo(RootType::kRootUnknown))
REQUIRES_SHARED(Locks::mutator_lock_);
// Given the pre-compact address, the function returns the post-compact
- // address of the given object.
- ALWAYS_INLINE mirror::Object* PostCompactAddress(mirror::Object* old_ref) const
+ // address of the given object. [begin, end) is moving-space range.
+ ALWAYS_INLINE mirror::Object* PostCompactAddress(mirror::Object* old_ref,
+ uint8_t* begin,
+ uint8_t* end) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Compute post-compact address of an object in moving space. This function
// assumes that old_ref is in moving space.
@@ -526,6 +552,19 @@ class MarkCompact final : public GarbageCollector {
uint8_t* shadow_page,
Atomic<PageState>& state,
bool page_touched);
+ // Called for clamping of 'info_map_' and other GC data structures, which are
+ // small and/or in >4GB address space. There is no real benefit of clamping
+ // them synchronously during app forking. It clamps only if clamp_info_map_status_
+ // is set to kClampInfoPending, which is done by ClampGrowthLimit().
+ void MaybeClampGcStructures() REQUIRES(Locks::heap_bitmap_lock_);
+ // Initialize all the info-map related fields of this GC. Returns total size
+ // of all the structures in info-map.
+ size_t InitializeInfoMap(uint8_t* p, size_t moving_space_sz);
+ // Update class-table classes in compaction pause if we are running in debuggable
+ // mode. Only visit class-table in image spaces if 'immune_class_table_only'
+ // is true.
+ void UpdateClassTableClasses(Runtime* runtime, bool immune_class_table_only)
+ REQUIRES_SHARED(Locks::mutator_lock_);
// For checkpoints
Barrier gc_barrier_;
@@ -690,6 +729,11 @@ class MarkCompact final : public GarbageCollector {
size_t black_page_count_;
uint8_t* from_space_begin_;
+ // Cached values of moving-space range to optimize checking if reference
+ // belongs to moving-space or not. May get updated if and when heap is
+ // clamped.
+ uint8_t* const moving_space_begin_;
+ uint8_t* moving_space_end_;
// moving-space's end pointer at the marking pause. All allocations beyond
// this will be considered black in the current GC cycle. Aligned up to page
// size.
@@ -761,6 +805,10 @@ class MarkCompact final : public GarbageCollector {
// non-zygote processes during first GC, which sets up everyting for using
// minor-fault from next GC.
bool map_linear_alloc_shared_;
+ // Clamping statue of `info_map_`. Initialized with 'NotDone'. Once heap is
+ // clamped but info_map_ is delayed, we set it to 'Pending'. Once 'info_map_'
+ // is also clamped, then we set it to 'Finished'.
+ ClampInfoStatus clamp_info_map_status_;
class FlipCallback;
class ThreadFlipVisitor;
@@ -781,6 +829,7 @@ class MarkCompact final : public GarbageCollector {
};
std::ostream& operator<<(std::ostream& os, MarkCompact::PageState value);
+std::ostream& operator<<(std::ostream& os, MarkCompact::ClampInfoStatus value);
} // namespace collector
} // namespace gc
diff --git a/runtime/gc/collector/mark_sweep-inl.h b/runtime/gc/collector/mark_sweep-inl.h
index e4993ce718..2257b0db8f 100644
--- a/runtime/gc/collector/mark_sweep-inl.h
+++ b/runtime/gc/collector/mark_sweep-inl.h
@@ -40,7 +40,7 @@ inline void MarkSweep::ScanObjectVisit(mirror::Object* obj,
uint32_t class_flags = klass->GetClassFlags();
if ((class_flags & mirror::kClassFlagNoReferenceFields) != 0) {
++no_reference_class_count_;
- } else if (class_flags == mirror::kClassFlagNormal) {
+ } else if (class_flags == mirror::kClassFlagNormal || class_flags == mirror::kClassFlagRecord) {
++normal_count_;
} else if (class_flags == mirror::kClassFlagObjectArray) {
++object_array_count_;
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index c20e3a7347..290860136b 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -32,6 +32,8 @@ enum CollectorType {
kCollectorTypeCMS,
// Concurrent mark-compact.
kCollectorTypeCMC,
+ // The background compaction of the Concurrent mark-compact GC.
+ kCollectorTypeCMCBackground,
// Semi-space / mark-sweep hybrid, enables compaction.
kCollectorTypeSS,
// Heap trimming collector, doesn't do any actual collecting.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 922b58870d..c5bd79d24c 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -193,8 +193,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
}
if (bytes_tl_bulk_allocated > 0) {
starting_gc_num = GetCurrentGcNum();
- size_t num_bytes_allocated_before =
- num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
+ size_t num_bytes_allocated_before = AddBytesAllocated(bytes_tl_bulk_allocated);
new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
// Only trace when we get an increase in the number of bytes allocated. This happens when
// obtaining a new TLAB and isn't often enough to hurt performance according to golem.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f27bddb361..2d1d393000 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -421,7 +421,10 @@ Heap::Heap(size_t initial_size,
}
LOG(INFO) << "Using " << foreground_collector_type_ << " GC.";
- if (!gUseUserfaultfd) {
+ if (gUseUserfaultfd) {
+ CHECK_EQ(foreground_collector_type_, kCollectorTypeCMC);
+ CHECK_EQ(background_collector_type_, kCollectorTypeCMCBackground);
+ } else {
// This ensures that userfaultfd syscall is done before any seccomp filter is installed.
// TODO(b/266731037): Remove this when we no longer need to collect metric on userfaultfd
// support.
@@ -1562,7 +1565,7 @@ void Heap::DoPendingCollectorTransition() {
VLOG(gc) << "Homogeneous compaction ignored due to jank perceptible process state";
}
} else if (desired_collector_type == kCollectorTypeCCBackground ||
- desired_collector_type == kCollectorTypeCMC) {
+ desired_collector_type == kCollectorTypeCMCBackground) {
if (!CareAboutPauseTimes()) {
// Invoke full compaction.
CollectGarbageInternal(collector::kGcTypeFull,
@@ -2441,9 +2444,11 @@ void Heap::PreZygoteFork() {
return;
}
Runtime* runtime = Runtime::Current();
+ // Setup linear-alloc pool for post-zygote fork allocations before freezing
+ // snapshots of intern-table and class-table.
+ runtime->SetupLinearAllocForPostZygoteFork(self);
runtime->GetInternTable()->AddNewTable();
runtime->GetClassLinker()->MoveClassTableToPreZygote();
- runtime->SetupLinearAllocForPostZygoteFork(self);
VLOG(heap) << "Starting PreZygoteFork";
// The end of the non-moving space may be protected, unprotect it so that we can copy the zygote
// there.
@@ -3749,7 +3754,10 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
grow_bytes = 0;
}
}
- CHECK_LE(target_size, std::numeric_limits<size_t>::max());
+ CHECK_LE(target_size, std::numeric_limits<size_t>::max())
+ << " bytes_allocated:" << bytes_allocated
+ << " bytes_freed:" << current_gc_iteration_.GetFreedBytes()
+ << " large_obj_bytes_freed:" << current_gc_iteration_.GetFreedLargeObjectBytes();
if (!ignore_target_footprint_) {
SetIdealFootprint(target_size);
// Store target size (computed with foreground heap growth multiplier) for updating
@@ -3813,10 +3821,18 @@ void Heap::ClampGrowthLimit() {
malloc_space->ClampGrowthLimit();
}
}
+ if (large_object_space_ != nullptr) {
+ large_object_space_->ClampGrowthLimit(capacity_);
+ }
if (collector_type_ == kCollectorTypeCC) {
DCHECK(region_space_ != nullptr);
// Twice the capacity as CC needs extra space for evacuating objects.
region_space_->ClampGrowthLimit(2 * capacity_);
+ } else if (collector_type_ == kCollectorTypeCMC) {
+ DCHECK(gUseUserfaultfd);
+ DCHECK_NE(mark_compact_, nullptr);
+ DCHECK_NE(bump_pointer_space_, nullptr);
+ mark_compact_->ClampGrowthLimit(capacity_);
}
// This space isn't added for performance reasons.
if (main_space_backup_.get() != nullptr) {
@@ -3975,7 +3991,12 @@ void Heap::RequestCollectorTransition(CollectorType desired_collector_type, uint
// doesn't change.
DCHECK_EQ(desired_collector_type_, kCollectorTypeCCBackground);
}
+ if (collector_type_ == kCollectorTypeCMC) {
+ // For CMC collector type doesn't change.
+ DCHECK_EQ(desired_collector_type_, kCollectorTypeCMCBackground);
+ }
DCHECK_NE(collector_type_, kCollectorTypeCCBackground);
+ DCHECK_NE(collector_type_, kCollectorTypeCMCBackground);
CollectorTransitionTask* added_task = nullptr;
const uint64_t target_time = NanoTime() + delta_time;
{
@@ -4515,10 +4536,9 @@ mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
}
// Try allocating a new thread local buffer, if the allocation fails the space must be
// full so return null.
- if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
+ if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size, bytes_tl_bulk_allocated)) {
return nullptr;
}
- *bytes_tl_bulk_allocated = new_tlab_size;
if (CheckPerfettoJHPEnabled()) {
VLOG(heap) << "JHP:kAllocatorTypeTLAB, New Tlab bytes allocated= " << new_tlab_size;
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 31a1b2b6a2..d7f6948b85 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -550,6 +550,11 @@ class Heap {
return num_bytes_allocated_.load(std::memory_order_relaxed);
}
+ // Returns bytes_allocated before adding 'bytes' to it.
+ size_t AddBytesAllocated(size_t bytes) {
+ return num_bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed);
+ }
+
bool GetUseGenerationalCC() const {
return use_generational_cc_;
}
@@ -1046,6 +1051,7 @@ class Heap {
collector_type == kCollectorTypeSS ||
collector_type == kCollectorTypeCMC ||
collector_type == kCollectorTypeCCBackground ||
+ collector_type == kCollectorTypeCMCBackground ||
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
@@ -1229,13 +1235,13 @@ class Heap {
void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
- // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
- // sweep GC, false for other GC types.
+ // What kind of concurrency behavior is the runtime after?
bool IsGcConcurrent() const ALWAYS_INLINE {
return collector_type_ == kCollectorTypeCC ||
collector_type_ == kCollectorTypeCMC ||
collector_type_ == kCollectorTypeCMS ||
- collector_type_ == kCollectorTypeCCBackground;
+ collector_type_ == kCollectorTypeCCBackground ||
+ collector_type_ == kCollectorTypeCMCBackground;
}
// Trim the managed and native spaces by releasing unused memory back to the OS.
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index 6bdacaf18c..53eef9c027 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -73,7 +73,8 @@ ObjPtr<mirror::Reference> ReferenceQueue::DequeuePendingReference() {
}
// This must be called whenever DequeuePendingReference is called.
-void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref) {
+void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref,
+ std::memory_order order) {
Heap* heap = Runtime::Current()->GetHeap();
if (kUseBakerReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC &&
heap->ConcurrentCopyingCollector()->IsActive()) {
@@ -84,7 +85,7 @@ void ReferenceQueue::DisableReadBarrierForReference(ObjPtr<mirror::Reference> re
collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector();
uint32_t rb_state = ref->GetReadBarrierState();
if (rb_state == ReadBarrier::GrayState()) {
- ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::NonGrayState());
+ ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::NonGrayState(), order);
CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::NonGrayState());
} else {
// In ConcurrentCopying::ProcessMarkStackRef() we may leave a non-gray reference in the queue
@@ -158,7 +159,7 @@ void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references,
}
// Delay disabling the read barrier until here so that the ClearReferent call above in
// transaction mode will trigger the read barrier.
- DisableReadBarrierForReference(ref);
+ DisableReadBarrierForReference(ref, std::memory_order_relaxed);
}
}
@@ -186,7 +187,7 @@ FinalizerStats ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleare
}
// Delay disabling the read barrier until here so that the ClearReferent call above in
// transaction mode will trigger the read barrier.
- DisableReadBarrierForReference(ref->AsReference());
+ DisableReadBarrierForReference(ref->AsReference(), std::memory_order_relaxed);
}
return FinalizerStats(num_refs, num_enqueued);
}
@@ -216,7 +217,7 @@ uint32_t ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) {
visitor->MarkHeapReference(referent_addr, /*do_atomic_update=*/ true);
++num_refs;
}
- DisableReadBarrierForReference(buf[i]->AsReference());
+ DisableReadBarrierForReference(buf[i]->AsReference(), std::memory_order_release);
}
} while (!empty);
return num_refs;
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 3fda7167d4..69f04d783a 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -80,8 +80,10 @@ class ReferenceQueue {
// If applicable, disable the read barrier for the reference after its referent is handled (see
// ConcurrentCopying::ProcessMarkStackRef.) This must be called for a reference that's dequeued
- // from pending queue (DequeuePendingReference).
- void DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref)
+ // from pending queue (DequeuePendingReference). 'order' is expected to be
+ // 'release' if called outside 'weak-ref access disabled' critical section.
+ // Otherwise 'relaxed' order will suffice.
+ void DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref, std::memory_order order)
REQUIRES_SHARED(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
diff --git a/runtime/gc/space/bump_pointer_space-walk-inl.h b/runtime/gc/space/bump_pointer_space-walk-inl.h
index a978f62c61..89e42bcf27 100644
--- a/runtime/gc/space/bump_pointer_space-walk-inl.h
+++ b/runtime/gc/space/bump_pointer_space-walk-inl.h
@@ -49,7 +49,7 @@ inline void BumpPointerSpace::Walk(Visitor&& visitor) {
};
{
- MutexLock mu(Thread::Current(), block_lock_);
+ MutexLock mu(Thread::Current(), lock_);
// If we have 0 blocks then we need to update the main header since we have bump pointer style
// allocation into an unbounded region (actually bounded by Capacity()).
if (block_sizes_.empty()) {
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 7753f73ca4..c63559a555 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -45,15 +45,12 @@ BumpPointerSpace* BumpPointerSpace::CreateFromMemMap(const std::string& name, Me
}
BumpPointerSpace::BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit)
- : ContinuousMemMapAllocSpace(name,
- MemMap::Invalid(),
- begin,
- begin,
- limit,
- kGcRetentionPolicyAlwaysCollect),
+ : ContinuousMemMapAllocSpace(
+ name, MemMap::Invalid(), begin, begin, limit, kGcRetentionPolicyAlwaysCollect),
growth_end_(limit),
- objects_allocated_(0), bytes_allocated_(0),
- block_lock_("Block lock"),
+ objects_allocated_(0),
+ bytes_allocated_(0),
+ lock_("Bump-pointer space lock"),
main_block_size_(0) {
// This constructor gets called only from Heap::PreZygoteFork(), which
// doesn't require a mark_bitmap.
@@ -67,8 +64,9 @@ BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap&& mem_map)
mem_map.End(),
kGcRetentionPolicyAlwaysCollect),
growth_end_(mem_map_.End()),
- objects_allocated_(0), bytes_allocated_(0),
- block_lock_("Block lock", kBumpPointerSpaceBlockLock),
+ objects_allocated_(0),
+ bytes_allocated_(0),
+ lock_("Bump-pointer space lock", kBumpPointerSpaceBlockLock),
main_block_size_(0) {
mark_bitmap_ =
accounting::ContinuousSpaceBitmap::Create("bump-pointer space live bitmap",
@@ -87,14 +85,34 @@ void BumpPointerSpace::Clear() {
SetEnd(Begin());
objects_allocated_.store(0, std::memory_order_relaxed);
bytes_allocated_.store(0, std::memory_order_relaxed);
- growth_end_ = Limit();
{
- MutexLock mu(Thread::Current(), block_lock_);
+ MutexLock mu(Thread::Current(), lock_);
+ growth_end_ = Limit();
block_sizes_.clear();
main_block_size_ = 0;
}
}
+size_t BumpPointerSpace::ClampGrowthLimit(size_t new_capacity) {
+ CHECK(gUseUserfaultfd);
+ MutexLock mu(Thread::Current(), lock_);
+ CHECK_EQ(growth_end_, Limit());
+ uint8_t* end = End();
+ CHECK_LE(end, growth_end_);
+ size_t free_capacity = growth_end_ - end;
+ size_t clamp_size = Capacity() - new_capacity;
+ if (clamp_size > free_capacity) {
+ new_capacity += clamp_size - free_capacity;
+ }
+ SetLimit(Begin() + new_capacity);
+ growth_end_ = Limit();
+ GetMemMap()->SetSize(new_capacity);
+ if (GetMarkBitmap()->HeapBegin() != 0) {
+ GetMarkBitmap()->SetHeapSize(new_capacity);
+ }
+ return new_capacity;
+}
+
void BumpPointerSpace::Dump(std::ostream& os) const {
os << GetName() << " "
<< reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - "
@@ -102,7 +120,7 @@ void BumpPointerSpace::Dump(std::ostream& os) const {
}
size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
- MutexLock mu(Thread::Current(), block_lock_);
+ MutexLock mu(Thread::Current(), lock_);
RevokeThreadLocalBuffersLocked(thread);
return 0U;
}
@@ -121,7 +139,7 @@ size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() {
void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
if (kIsDebugBuild) {
- MutexLock mu(Thread::Current(), block_lock_);
+ MutexLock mu(Thread::Current(), lock_);
DCHECK(!thread->HasTlab());
}
}
@@ -146,7 +164,6 @@ void BumpPointerSpace::UpdateMainBlock() {
// Returns the start of the storage.
uint8_t* BumpPointerSpace::AllocBlock(size_t bytes) {
- bytes = RoundUp(bytes, kAlignment);
if (block_sizes_.empty()) {
UpdateMainBlock();
}
@@ -169,7 +186,7 @@ uint64_t BumpPointerSpace::GetBytesAllocated() {
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
- MutexLock mu3(Thread::Current(), block_lock_);
+ MutexLock mu3(Thread::Current(), lock_);
// If we don't have any blocks, we don't have any thread local buffers. This check is required
// since there can exist multiple bump pointer spaces which exist at the same time.
if (!block_sizes_.empty()) {
@@ -187,7 +204,7 @@ uint64_t BumpPointerSpace::GetObjectsAllocated() {
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
- MutexLock mu3(Thread::Current(), block_lock_);
+ MutexLock mu3(Thread::Current(), lock_);
// If we don't have any blocks, we don't have any thread local buffers. This check is required
// since there can exist multiple bump pointer spaces which exist at the same time.
if (!block_sizes_.empty()) {
@@ -204,14 +221,18 @@ void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
thread->ResetTlab();
}
-bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) {
- MutexLock mu(Thread::Current(), block_lock_);
+bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes, size_t* bytes_tl_bulk_allocated) {
+ bytes = RoundUp(bytes, kAlignment);
+ MutexLock mu(Thread::Current(), lock_);
RevokeThreadLocalBuffersLocked(self);
uint8_t* start = AllocBlock(bytes);
if (start == nullptr) {
return false;
}
self->SetTlab(start, start + bytes, start + bytes);
+ if (bytes_tl_bulk_allocated != nullptr) {
+ *bytes_tl_bulk_allocated = bytes;
+ }
return true;
}
@@ -235,7 +256,7 @@ size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* u
return num_bytes;
}
-uint8_t* BumpPointerSpace::AlignEnd(Thread* self, size_t alignment) {
+uint8_t* BumpPointerSpace::AlignEnd(Thread* self, size_t alignment, Heap* heap) {
Locks::mutator_lock_->AssertExclusiveHeld(self);
DCHECK(IsAligned<kAlignment>(alignment));
uint8_t* end = end_.load(std::memory_order_relaxed);
@@ -243,19 +264,20 @@ uint8_t* BumpPointerSpace::AlignEnd(Thread* self, size_t alignment) {
ptrdiff_t diff = aligned_end - end;
if (diff > 0) {
end_.store(aligned_end, std::memory_order_relaxed);
+ heap->AddBytesAllocated(diff);
// If we have blocks after the main one. Then just add the diff to the last
// block.
- MutexLock mu(self, block_lock_);
+ MutexLock mu(self, lock_);
if (!block_sizes_.empty()) {
block_sizes_.back() += diff;
}
}
- return end;
+ return aligned_end;
}
std::vector<size_t>* BumpPointerSpace::GetBlockSizes(Thread* self, size_t* main_block_size) {
std::vector<size_t>* block_sizes = nullptr;
- MutexLock mu(self, block_lock_);
+ MutexLock mu(self, lock_);
if (!block_sizes_.empty()) {
block_sizes = new std::vector<size_t>(block_sizes_.begin(), block_sizes_.end());
} else {
@@ -268,7 +290,7 @@ std::vector<size_t>* BumpPointerSpace::GetBlockSizes(Thread* self, size_t* main_
void BumpPointerSpace::SetBlockSizes(Thread* self,
const size_t main_block_size,
const size_t first_valid_idx) {
- MutexLock mu(self, block_lock_);
+ MutexLock mu(self, lock_);
main_block_size_ = main_block_size;
if (!block_sizes_.empty()) {
block_sizes_.erase(block_sizes_.begin(), block_sizes_.begin() + first_valid_idx);
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index bba171109d..d5ab5069ec 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -88,6 +88,14 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
growth_end_ = Limit();
}
+ // Attempts to clamp the space limit to 'new_capacity'. If not possible, then
+ // clamps to whatever possible. Returns the new capacity. 'lock_' is used to
+ // ensure that TLAB allocations, which are the only ones which may be happening
+ // concurrently with this function are synchronized. The other Alloc* functions
+ // are either used in single-threaded mode, or when used in multi-threaded mode,
+ // then the space is used by GCs (like SS) which don't have clamping implemented.
+ size_t ClampGrowthLimit(size_t new_capacity) REQUIRES(!lock_);
+
// Override capacity so that we only return the possibly limited capacity
size_t Capacity() const override {
return growth_end_ - begin_;
@@ -103,21 +111,21 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
}
// Reset the space to empty.
- void Clear() override REQUIRES(!block_lock_);
+ void Clear() override REQUIRES(!lock_);
void Dump(std::ostream& os) const override;
- size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!block_lock_);
+ size_t RevokeThreadLocalBuffers(Thread* thread) override REQUIRES(!lock_);
size_t RevokeAllThreadLocalBuffers() override
- REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
- void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!block_lock_);
+ REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !lock_);
+ void AssertThreadLocalBuffersAreRevoked(Thread* thread) REQUIRES(!lock_);
void AssertAllThreadLocalBuffersAreRevoked()
- REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
+ REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !lock_);
uint64_t GetBytesAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
+ REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !lock_);
uint64_t GetObjectsAllocated() override REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
+ REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !lock_);
// Return the pre-determined allocated object count. This could be beneficial
// when we know that all the TLABs are revoked.
int32_t GetAccumulatedObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -134,8 +142,9 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
// TODO: Change this? Mainly used for compacting to a particular region of memory.
BumpPointerSpace(const std::string& name, uint8_t* begin, uint8_t* limit);
- // Allocate a new TLAB, returns false if the allocation failed.
- bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
+ // Allocate a new TLAB and updates bytes_tl_bulk_allocated with the
+ // allocation-size, returns false if the allocation failed.
+ bool AllocNewTlab(Thread* self, size_t bytes, size_t* bytes_tl_bulk_allocated) REQUIRES(!lock_);
BumpPointerSpace* AsBumpPointerSpace() override {
return this;
@@ -143,9 +152,7 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
// Go through all of the blocks and visit the continuous objects.
template <typename Visitor>
- ALWAYS_INLINE void Walk(Visitor&& visitor)
- REQUIRES_SHARED(Locks::mutator_lock_)
- REQUIRES(!block_lock_);
+ ALWAYS_INLINE void Walk(Visitor&& visitor) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override;
@@ -165,27 +172,27 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
BumpPointerSpace(const std::string& name, MemMap&& mem_map);
// Allocate a raw block of bytes.
- uint8_t* AllocBlock(size_t bytes) REQUIRES(block_lock_);
- void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(block_lock_);
+ uint8_t* AllocBlock(size_t bytes) REQUIRES(lock_);
+ void RevokeThreadLocalBuffersLocked(Thread* thread) REQUIRES(lock_);
// The main block is an unbounded block where objects go when there are no other blocks. This
// enables us to maintain tightly packed objects when you are not using thread local buffers for
// allocation. The main block starts at the space Begin().
- void UpdateMainBlock() REQUIRES(block_lock_);
+ void UpdateMainBlock() REQUIRES(lock_);
uint8_t* growth_end_;
AtomicInteger objects_allocated_; // Accumulated from revoked thread local regions.
AtomicInteger bytes_allocated_; // Accumulated from revoked thread local regions.
- Mutex block_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
// The objects at the start of the space are stored in the main block.
- size_t main_block_size_ GUARDED_BY(block_lock_);
+ size_t main_block_size_ GUARDED_BY(lock_);
// List of block sizes (in bytes) after the main-block. Needed for Walk().
// If empty then the space has only one long continuous block. Each TLAB
// allocation has one entry in this deque.
// Keeping block-sizes off-heap simplifies sliding compaction algorithms.
// The compaction algorithm should ideally compact all objects into the main
// block, thereby enabling erasing corresponding entries from here.
- std::deque<size_t> block_sizes_ GUARDED_BY(block_lock_);
+ std::deque<size_t> block_sizes_ GUARDED_BY(lock_);
private:
// Return the object which comes after obj, while ensuring alignment.
@@ -194,7 +201,7 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
// Return a vector of block sizes on the space. Required by MarkCompact GC for
// walking black objects allocated after marking phase.
- std::vector<size_t>* GetBlockSizes(Thread* self, size_t* main_block_size) REQUIRES(!block_lock_);
+ std::vector<size_t>* GetBlockSizes(Thread* self, size_t* main_block_size) REQUIRES(!lock_);
// Once the MarkCompact decides the post-compact layout of the space in the
// pre-compaction pause, it calls this function to update the block sizes. It is
@@ -202,12 +209,12 @@ class BumpPointerSpace final : public ContinuousMemMapAllocSpace {
// into itself, and the index of first unconsumed block. This works as all the
// block sizes are ordered. Also updates 'end_' to reflect the change.
void SetBlockSizes(Thread* self, const size_t main_block_size, const size_t first_valid_idx)
- REQUIRES(!block_lock_, Locks::mutator_lock_);
+ REQUIRES(!lock_, Locks::mutator_lock_);
// Align end to the given alignment. This is done in MarkCompact GC when
// mutators are suspended so that upcoming TLAB allocations start with a new
- // page. Returns the pre-alignment end.
- uint8_t* AlignEnd(Thread* self, size_t alignment) REQUIRES(Locks::mutator_lock_);
+ // page. Adjust's heap's bytes_allocated accordingly. Returns the aligned end.
+ uint8_t* AlignEnd(Thread* self, size_t alignment, Heap* heap) REQUIRES(Locks::mutator_lock_);
friend class collector::MarkSweep;
friend class collector::MarkCompact;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 13966d8d97..f4606018b2 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -51,7 +51,6 @@
#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
-#include "fmt/format.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/task_processor.h"
#include "image-inl.h"
@@ -78,8 +77,6 @@ using ::android::base::Join;
using ::android::base::StringAppendF;
using ::android::base::StringPrintf;
-using ::fmt::literals::operator""_format; // NOLINT
-
// We do not allow the boot image and extensions to take more than 1GiB. They are
// supposed to be much smaller and allocating more that this would likely fail anyway.
static constexpr size_t kMaxTotalImageReservationSize = 1 * GB;
@@ -1055,6 +1052,7 @@ class ImageSpace::Loader {
Thread* const self = Thread::Current();
static constexpr size_t kMinBlocks = 2u;
const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
+ bool failed_decompression = false;
for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
auto function = [&](Thread*) {
const uint64_t start2 = NanoTime();
@@ -1062,8 +1060,11 @@ class ImageSpace::Loader {
bool result = block.Decompress(/*out_ptr=*/map.Begin(),
/*in_ptr=*/temp_map.Begin(),
error_msg);
- if (!result && error_msg != nullptr) {
- *error_msg = "Failed to decompress image block " + *error_msg;
+ if (!result) {
+ failed_decompression = true;
+ if (error_msg != nullptr) {
+ *error_msg = "Failed to decompress image block " + *error_msg;
+ }
}
VLOG(image) << "Decompress block " << block.GetDataSize() << " -> "
<< block.GetImageSize() << " in " << PrettyDuration(NanoTime() - start2);
@@ -1085,6 +1086,10 @@ class ImageSpace::Loader {
VLOG(image) << "Decompressing image took " << PrettyDuration(time) << " ("
<< PrettySize(static_cast<uint64_t>(map.Size()) * MsToNs(1000) / (time + 1))
<< "/s)";
+ if (failed_decompression) {
+ DCHECK(error_msg == nullptr || !error_msg->empty());
+ return MemMap::Invalid();
+ }
} else {
DCHECK(!allow_direct_mapping);
// We do not allow direct mapping for boot image extensions compiled to a memfd.
@@ -3446,8 +3451,9 @@ bool ImageSpace::ValidateOatFile(const OatFile& oat_file,
if (oat_file.GetOatHeader().GetKeyValueStoreSize() != 0 &&
oat_file.GetOatHeader().IsConcurrentCopying() != gUseReadBarrier) {
*error_msg =
- "ValidateOatFile found read barrier state mismatch (oat file: {}, runtime: {})"_format(
- oat_file.GetOatHeader().IsConcurrentCopying(), gUseReadBarrier);
+ ART_FORMAT("ValidateOatFile found read barrier state mismatch (oat file: {}, runtime: {})",
+ oat_file.GetOatHeader().IsConcurrentCopying(),
+ gUseReadBarrier);
return false;
}
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index f1df45f19a..b5f26cd806 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -390,6 +390,27 @@ FreeListSpace::FreeListSpace(const std::string& name,
allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_.Begin());
}
+void FreeListSpace::ClampGrowthLimit(size_t new_capacity) {
+ MutexLock mu(Thread::Current(), lock_);
+ new_capacity = RoundUp(new_capacity, kAlignment);
+ CHECK_LE(new_capacity, Size());
+ size_t diff = Size() - new_capacity;
+ // If we don't have enough free-bytes at the end to clamp, then do the best
+ // that we can.
+ if (diff > free_end_) {
+ new_capacity = Size() - free_end_;
+ diff = free_end_;
+ }
+
+ size_t alloc_info_size = sizeof(AllocationInfo) * (new_capacity / kAlignment);
+ allocation_info_map_.SetSize(alloc_info_size);
+ mem_map_.SetSize(new_capacity);
+ // We don't need to change anything in 'free_blocks_' as the free block at
+ // the end of the space isn't in there.
+ free_end_ -= diff;
+ end_ -= diff;
+}
+
FreeListSpace::~FreeListSpace() {}
void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index d94f467f6f..7611784080 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -115,6 +115,8 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
// GetRangeAtomic returns Begin() and End() atomically, that is, it never returns Begin() and
// End() from different allocations.
virtual std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const = 0;
+ // Clamp the space size to the given capacity.
+ virtual void ClampGrowthLimit(size_t capacity) = 0;
protected:
explicit LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end,
@@ -164,6 +166,7 @@ class LargeObjectMapSpace : public LargeObjectSpace {
bool Contains(const mirror::Object* obj) const override NO_THREAD_SAFETY_ANALYSIS;
void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
+ void ClampGrowthLimit(size_t capacity ATTRIBUTE_UNUSED) override {}
protected:
struct LargeObject {
@@ -199,6 +202,7 @@ class FreeListSpace final : public LargeObjectSpace {
void Dump(std::ostream& os) const override REQUIRES(!lock_);
void ForEachMemMap(std::function<void(const MemMap&)> func) const override REQUIRES(!lock_);
std::pair<uint8_t*, uint8_t*> GetBeginEndAtomic() const override REQUIRES(!lock_);
+ void ClampGrowthLimit(size_t capacity) override REQUIRES(!lock_);
protected:
FreeListSpace(const std::string& name, MemMap&& mem_map, uint8_t* begin, uint8_t* end);
diff --git a/runtime/image.cc b/runtime/image.cc
index bb1701f8fe..37437b4b86 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -219,7 +219,14 @@ bool ImageHeader::Block::Decompress(uint8_t* out_ptr,
if (!ok) {
return false;
}
- CHECK_EQ(decompressed_size, image_size_);
+ if (decompressed_size != image_size_) {
+ if (error_msg != nullptr) {
+ // Maybe some disk / memory corruption, just bail.
+ *error_msg = (std::ostringstream() << "Decompressed size different than image size: "
+ << decompressed_size << ", and " << image_size_).str();
+ }
+ return false;
+ }
break;
}
default: {
@@ -421,7 +428,7 @@ bool ImageHeader::WriteData(const ImageFileGuard& image_file,
if (update_checksum) {
// Calculate the image checksum of the remaining data.
- image_checksum = adler32(GetImageChecksum(),
+ image_checksum = adler32(image_checksum,
reinterpret_cast<const uint8_t*>(bitmap_data),
bitmap_section.Size());
this->SetImageChecksum(image_checksum);
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index d115f51f7e..0fcada1af4 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -17,8 +17,8 @@
#ifndef ART_RUNTIME_INTERN_TABLE_H_
#define ART_RUNTIME_INTERN_TABLE_H_
-#include "base/allocator.h"
#include "base/dchecked_vector.h"
+#include "base/gc_visited_arena_pool.h"
#include "base/hash_set.h"
#include "base/mutex.h"
#include "gc/weak_root_state.h"
@@ -109,11 +109,12 @@ class InternTable {
}
};
- using UnorderedSet = HashSet<GcRoot<mirror::String>,
- GcRootEmptyFn,
- StringHash,
- StringEquals,
- TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>>;
+ using UnorderedSet =
+ HashSet<GcRoot<mirror::String>,
+ GcRootEmptyFn,
+ StringHash,
+ StringEquals,
+ GcRootArenaAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>>;
InternTable();
diff --git a/runtime/javaheapprof/javaheapsampler.cc b/runtime/javaheapprof/javaheapsampler.cc
index a73ed0b719..74671347c7 100644
--- a/runtime/javaheapprof/javaheapsampler.cc
+++ b/runtime/javaheapprof/javaheapsampler.cc
@@ -131,10 +131,6 @@ void HeapSampler::AdjustSampleOffset(size_t adjustment) {
<< " next_bytes_until_sample = " << next_bytes_until_sample;
}
-bool HeapSampler::IsEnabled() {
- return enabled_.load(std::memory_order_acquire);
-}
-
int HeapSampler::GetSamplingInterval() {
return p_sampling_interval_.load(std::memory_order_acquire);
}
diff --git a/runtime/javaheapprof/javaheapsampler.h b/runtime/javaheapprof/javaheapsampler.h
index 618893cad0..41514726cd 100644
--- a/runtime/javaheapprof/javaheapsampler.h
+++ b/runtime/javaheapprof/javaheapsampler.h
@@ -68,7 +68,7 @@ class HeapSampler {
// of new Tlab after Reset.
void AdjustSampleOffset(size_t adjustment);
// Is heap sampler enabled?
- bool IsEnabled();
+ bool IsEnabled() { return enabled_.load(std::memory_order_acquire); }
// Set the sampling interval.
void SetSamplingInterval(int sampling_interval) REQUIRES(!geo_dist_rng_lock_);
// Return the sampling interval.
@@ -80,7 +80,7 @@ class HeapSampler {
// possibly decreasing sample intervals by sample_adj_bytes.
size_t PickAndAdjustNextSample(size_t sample_adj_bytes = 0) REQUIRES(!geo_dist_rng_lock_);
- std::atomic<bool> enabled_;
+ std::atomic<bool> enabled_{false};
// Default sampling interval is 4kb.
// Writes guarded by geo_dist_rng_lock_.
std::atomic<int> p_sampling_interval_{4 * 1024};
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index b231cce0bc..3472c788ab 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -71,7 +71,7 @@ static constexpr uint32_t kJitStressDefaultOptimizeThreshold = kJitDefaultOptimi
static constexpr uint32_t kJitSlowStressDefaultOptimizeThreshold =
kJitStressDefaultOptimizeThreshold / 2;
-static constexpr uint32_t kJitDefaultWarmupThreshold = 0xffff;
+static constexpr uint32_t kJitDefaultWarmupThreshold = 0x3fff;
// Different warm-up threshold constants. These default to the equivalent warmup thresholds divided
// by 2, but can be overridden at the command-line.
static constexpr uint32_t kJitStressDefaultWarmupThreshold = kJitDefaultWarmupThreshold / 2;
diff --git a/runtime/jni/local_reference_table.cc b/runtime/jni/local_reference_table.cc
index 59c4e31ba1..15aaf5baa3 100644
--- a/runtime/jni/local_reference_table.cc
+++ b/runtime/jni/local_reference_table.cc
@@ -483,6 +483,13 @@ bool LocalReferenceTable::Remove(LRTSegmentState previous_state, IndirectRef ire
}
DCHECK_LT(entry_index, top_index);
+ // Workaround for double `DeleteLocalRef` bug. b/298297411
+ if (entry->IsFree()) {
+ // In debug build or with CheckJNI enabled, we would have detected this above.
+ LOG(ERROR) << "App error: `DeleteLocalRef()` on already deleted local ref. b/298297411";
+ return false;
+ }
+
// Prune the free entry list if a segment with holes was popped before the `Remove()` call.
uint32_t first_free_index = GetFirstFreeIndex();
if (first_free_index != kFreeListEnd && first_free_index >= top_index) {
diff --git a/runtime/linear_alloc-inl.h b/runtime/linear_alloc-inl.h
index 13dbea11d0..7c81352cd9 100644
--- a/runtime/linear_alloc-inl.h
+++ b/runtime/linear_alloc-inl.h
@@ -40,6 +40,18 @@ inline void LinearAlloc::SetFirstObject(void* begin, size_t bytes) const {
down_cast<TrackedArena*>(arena)->SetFirstObject(static_cast<uint8_t*>(begin), end);
}
+inline void LinearAlloc::ConvertToNoGcRoots(void* ptr, LinearAllocKind orig_kind) {
+ if (track_allocations_ && ptr != nullptr) {
+ TrackingHeader* header = static_cast<TrackingHeader*>(ptr);
+ header--;
+ DCHECK_EQ(header->GetKind(), orig_kind);
+ DCHECK_GT(header->GetSize(), 0u);
+ // 16-byte allocations are not supported yet.
+ DCHECK(!header->Is16Aligned());
+ header->SetKind(LinearAllocKind::kNoGCRoots);
+ }
+}
+
inline void LinearAlloc::SetupForPostZygoteFork(Thread* self) {
MutexLock mu(self, lock_);
DCHECK(track_allocations_);
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index c40af8ad46..c81077abfc 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -56,9 +56,13 @@ class TrackingHeader final {
bool Is16Aligned() const { return size_ & kIs16Aligned; }
private:
+ void SetKind(LinearAllocKind kind) { kind_ = kind; }
+
LinearAllocKind kind_;
uint32_t size_;
+ friend class LinearAlloc; // For SetKind()
+
DISALLOW_IMPLICIT_CONSTRUCTORS(TrackingHeader);
};
@@ -93,6 +97,9 @@ class LinearAlloc {
// Force arena allocator to ask for a new arena on next allocation. This
// is to preserve private/shared clean pages across zygote fork.
void SetupForPostZygoteFork(Thread* self) REQUIRES(!lock_);
+ // Convert the given allocated object into a `no GC-root` so that compaction
+ // skips it. Currently only used during class linking for ArtMethod array.
+ void ConvertToNoGcRoots(void* ptr, LinearAllocKind orig_kind);
// Return true if the linear alloc contains an address.
bool Contains(void* ptr) const REQUIRES(!lock_);
diff --git a/runtime/metrics/statsd.cc b/runtime/metrics/statsd.cc
index 7002f22fae..2105bdb953 100644
--- a/runtime/metrics/statsd.cc
+++ b/runtime/metrics/statsd.cc
@@ -304,6 +304,9 @@ constexpr int32_t EncodeGcCollectorType(gc::CollectorType collector_type) {
return statsd::ART_DATUM_REPORTED__GC__ART_GC_COLLECTOR_TYPE_CONCURRENT_MARK_SWEEP;
case gc::CollectorType::kCollectorTypeCMC:
return statsd::ART_DATUM_REPORTED__GC__ART_GC_COLLECTOR_TYPE_CONCURRENT_MARK_COMPACT;
+ case gc::CollectorType::kCollectorTypeCMCBackground:
+ return statsd::
+ ART_DATUM_REPORTED__GC__ART_GC_COLLECTOR_TYPE_CONCURRENT_MARK_COMPACT_BACKGROUND;
case gc::CollectorType::kCollectorTypeSS:
return statsd::ART_DATUM_REPORTED__GC__ART_GC_COLLECTOR_TYPE_SEMI_SPACE;
case gc::kCollectorTypeCC:
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index 259b3dd6f5..2895009d17 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -146,8 +146,9 @@ inline uint32_t Object::GetReadBarrierStateAcquire() {
return rb_state;
}
-template<std::memory_order kMemoryOrder>
-inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
+inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state,
+ uint32_t rb_state,
+ std::memory_order order) {
if (!kUseBakerReadBarrier) {
LOG(FATAL) << "Unreachable";
UNREACHABLE();
@@ -171,7 +172,7 @@ inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32
// If `kMemoryOrder` == `std::memory_order_release`, use a CAS release so that when GC updates
// all the fields of an object and then changes the object from gray to black (non-gray), the
// field updates (stores) will be visible (won't be reordered after this CAS.)
- } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, kMemoryOrder));
+ } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, order));
return true;
}
diff --git a/runtime/mirror/object-refvisitor-inl.h b/runtime/mirror/object-refvisitor-inl.h
index 4c72cd58c3..de60c8e1e7 100644
--- a/runtime/mirror/object-refvisitor-inl.h
+++ b/runtime/mirror/object-refvisitor-inl.h
@@ -26,6 +26,40 @@
namespace art {
namespace mirror {
+template <VerifyObjectFlags kVerifyFlags,
+ ReadBarrierOption kReadBarrierOption>
+static void CheckNoReferenceField(ObjPtr<mirror::Class> klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ if (!kIsDebugBuild) {
+ return;
+ }
+ CHECK(!klass->IsClassClass<kVerifyFlags>());
+ CHECK((!klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
+ // String still has instance fields for reflection purposes but these don't exist in
+ // actual string instances.
+ if (!klass->IsStringClass<kVerifyFlags>()) {
+ size_t total_reference_instance_fields = 0;
+ ObjPtr<Class> super_class = klass;
+ do {
+ total_reference_instance_fields +=
+ super_class->NumReferenceInstanceFields<kVerifyFlags>();
+ super_class = super_class->GetSuperClass<kVerifyFlags, kReadBarrierOption>();
+ } while (super_class != nullptr);
+ // The only reference field should be the object's class.
+ CHECK_EQ(total_reference_instance_fields, 1u);
+ }
+}
+
+template <VerifyObjectFlags kVerifyFlags>
+static void CheckNormalClass(ObjPtr<mirror::Class> klass)
+ REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ DCHECK(!klass->IsVariableSize<kVerifyFlags>());
+ DCHECK(!klass->IsClassClass<kVerifyFlags>());
+ DCHECK(!klass->IsStringClass<kVerifyFlags>());
+ DCHECK(!klass->IsClassLoaderClass<kVerifyFlags>());
+ DCHECK(!klass->IsArrayClass<kVerifyFlags>());
+}
+
template <bool kVisitNativeRoots,
VerifyObjectFlags kVerifyFlags,
ReadBarrierOption kReadBarrierOption,
@@ -36,58 +70,58 @@ inline void Object::VisitReferences(const Visitor& visitor,
visitor(this, ClassOffset(), /* is_static= */ false);
ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
- if (LIKELY(class_flags == kClassFlagNormal)) {
- DCHECK((!klass->IsVariableSize<kVerifyFlags>()));
+ if (LIKELY(class_flags == kClassFlagNormal) || class_flags == kClassFlagRecord) {
+ CheckNormalClass<kVerifyFlags>(klass);
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
- DCHECK((!klass->IsClassClass<kVerifyFlags>()));
- DCHECK(!klass->IsStringClass<kVerifyFlags>());
- DCHECK(!klass->IsClassLoaderClass<kVerifyFlags>());
- DCHECK((!klass->IsArrayClass<kVerifyFlags>()));
- } else {
- if ((class_flags & kClassFlagNoReferenceFields) == 0) {
- DCHECK(!klass->IsStringClass<kVerifyFlags>());
- if (class_flags == kClassFlagClass) {
- DCHECK((klass->IsClassClass<kVerifyFlags>()));
- ObjPtr<Class> as_klass = AsClass<kVerifyNone>();
- as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass,
- visitor);
- } else if (class_flags == kClassFlagObjectArray) {
- DCHECK((klass->IsObjectArrayClass<kVerifyFlags>()));
- AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences(visitor);
- } else if ((class_flags & kClassFlagReference) != 0) {
- VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
- ref_visitor(klass, AsReference<kVerifyFlags, kReadBarrierOption>());
- } else if (class_flags == kClassFlagDexCache) {
- ObjPtr<mirror::DexCache> const dex_cache = AsDexCache<kVerifyFlags, kReadBarrierOption>();
- dex_cache->VisitReferences<kVisitNativeRoots,
- kVerifyFlags,
- kReadBarrierOption>(klass, visitor);
- } else {
- ObjPtr<mirror::ClassLoader> const class_loader =
- AsClassLoader<kVerifyFlags, kReadBarrierOption>();
- class_loader->VisitReferences<kVisitNativeRoots,
- kVerifyFlags,
- kReadBarrierOption>(klass, visitor);
- }
- } else if (kIsDebugBuild) {
- CHECK((!klass->IsClassClass<kVerifyFlags>()));
- CHECK((!klass->IsObjectArrayClass<kVerifyFlags>()));
- // String still has instance fields for reflection purposes but these don't exist in
- // actual string instances.
- if (!klass->IsStringClass<kVerifyFlags>()) {
- size_t total_reference_instance_fields = 0;
- ObjPtr<Class> super_class = klass;
- do {
- total_reference_instance_fields +=
- super_class->NumReferenceInstanceFields<kVerifyFlags>();
- super_class = super_class->GetSuperClass<kVerifyFlags, kReadBarrierOption>();
- } while (super_class != nullptr);
- // The only reference field should be the object's class. This field is handled at the
- // beginning of the function.
- CHECK_EQ(total_reference_instance_fields, 1u);
- }
- }
+ return;
+ }
+
+ if ((class_flags & kClassFlagNoReferenceFields) != 0) {
+ CheckNoReferenceField<kVerifyFlags, kReadBarrierOption>(klass);
+ return;
+ }
+
+ DCHECK(!klass->IsStringClass<kVerifyFlags>());
+ if (class_flags == kClassFlagClass) {
+ DCHECK(klass->IsClassClass<kVerifyFlags>());
+ ObjPtr<Class> as_klass = AsClass<kVerifyNone>();
+ as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass, visitor);
+ return;
+ }
+
+ if (class_flags == kClassFlagObjectArray) {
+ DCHECK((klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
+ AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences(visitor);
+ return;
}
+
+ if ((class_flags & kClassFlagReference) != 0) {
+ VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
+ ref_visitor(klass, AsReference<kVerifyFlags, kReadBarrierOption>());
+ return;
+ }
+
+ if (class_flags == kClassFlagDexCache) {
+ DCHECK(klass->IsDexCacheClass<kVerifyFlags>());
+ ObjPtr<mirror::DexCache> const dex_cache = AsDexCache<kVerifyFlags, kReadBarrierOption>();
+ dex_cache->VisitReferences<kVisitNativeRoots,
+ kVerifyFlags,
+ kReadBarrierOption>(klass, visitor);
+ return;
+ }
+
+ if (class_flags == kClassFlagClassLoader) {
+ DCHECK(klass->IsClassLoaderClass<kVerifyFlags>());
+ ObjPtr<mirror::ClassLoader> const class_loader =
+ AsClassLoader<kVerifyFlags, kReadBarrierOption>();
+ class_loader->VisitReferences<kVisitNativeRoots,
+ kVerifyFlags,
+ kReadBarrierOption>(klass, visitor);
+ return;
+ }
+
+ LOG(FATAL) << "Unexpected class flags: " << std::hex << class_flags
+ << " for " << klass->PrettyClass();
}
// Could be called with from-space address of the object as we access klass and
@@ -106,85 +140,64 @@ inline size_t Object::VisitRefsForCompaction(const Visitor& visitor,
ObjPtr<Class> klass = GetClass<kVerifyFlags, kReadBarrierOption>();
DCHECK(klass != nullptr) << "obj=" << this;
const uint32_t class_flags = klass->GetClassFlags<kVerifyNone>();
- if (LIKELY(class_flags == kClassFlagNormal)) {
- DCHECK((!klass->IsVariableSize<kVerifyFlags>()));
+ if (LIKELY(class_flags == kClassFlagNormal) || class_flags == kClassFlagRecord) {
+ CheckNormalClass<kVerifyFlags>(klass);
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
- DCHECK((!klass->IsClassClass<kVerifyFlags>()));
- DCHECK(!klass->IsStringClass<kVerifyFlags>());
- DCHECK(!klass->IsClassLoaderClass<kVerifyFlags>());
- DCHECK((!klass->IsArrayClass<kVerifyFlags>()));
- } else {
- if ((class_flags & kClassFlagNoReferenceFields) == 0) {
- DCHECK(!klass->IsStringClass<kVerifyFlags>());
- if (class_flags == kClassFlagClass) {
- DCHECK((klass->IsClassClass<kVerifyFlags>()));
- ObjPtr<Class> as_klass = ObjPtr<Class>::DownCast(this);
- as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass,
- visitor);
- size = kFetchObjSize ? as_klass->SizeOf<kSizeOfFlags>() : 0;
- } else if (class_flags == kClassFlagObjectArray) {
- DCHECK((klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
- ObjPtr<ObjectArray<Object>> obj_arr = ObjPtr<ObjectArray<Object>>::DownCast(this);
- obj_arr->VisitReferences(visitor, begin, end);
- size = kFetchObjSize ?
- obj_arr->SizeOf<kSizeOfFlags, kReadBarrierOption, /*kIsObjArray*/ true>() :
- 0;
- } else if ((class_flags & kClassFlagReference) != 0) {
- VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
- // Visit referent also as this is about updating the reference only.
- // There is no reference processing happening here.
- visitor(this, mirror::Reference::ReferentOffset(), /* is_static= */ false);
- size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
- } else if (class_flags == kClassFlagDexCache) {
- ObjPtr<DexCache> const dex_cache = ObjPtr<DexCache>::DownCast(this);
- dex_cache->VisitReferences<kVisitNativeRoots,
- kVerifyFlags,
- kReadBarrierOption>(klass, visitor);
- size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
- } else {
- ObjPtr<ClassLoader> const class_loader = ObjPtr<ClassLoader>::DownCast(this);
- class_loader->VisitReferences<kVisitNativeRoots,
- kVerifyFlags,
- kReadBarrierOption>(klass, visitor);
- size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
- }
+ } else if ((class_flags & kClassFlagNoReferenceFields) != 0) {
+ CheckNoReferenceField<kVerifyFlags, kReadBarrierOption>(klass);
+ if ((class_flags & kClassFlagString) != 0) {
+ size = kFetchObjSize ? static_cast<String*>(this)->SizeOf<kSizeOfFlags>() : 0;
+ } else if (klass->IsArrayClass<kVerifyFlags>()) {
+ // TODO: We can optimize this by implementing a SizeOf() version which takes
+ // component-size-shift as an argument, thereby avoiding multiple loads of
+ // component_type.
+ size = kFetchObjSize
+ ? static_cast<Array*>(this)->SizeOf<kSizeOfFlags, kReadBarrierOption>()
+ : 0;
} else {
- DCHECK((!klass->IsClassClass<kVerifyFlags>()));
- DCHECK((!klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
- if ((class_flags & kClassFlagString) != 0) {
- size = kFetchObjSize ? static_cast<String*>(this)->SizeOf<kSizeOfFlags>() : 0;
- } else if (klass->IsArrayClass<kVerifyFlags>()) {
- // TODO: We can optimize this by implementing a SizeOf() version which takes
- // component-size-shift as an argument, thereby avoiding multiple loads of
- // component_type.
- size = kFetchObjSize
- ? static_cast<Array*>(this)->SizeOf<kSizeOfFlags, kReadBarrierOption>()
- : 0;
- } else {
- DCHECK_EQ(class_flags, kClassFlagNoReferenceFields)
- << "class_flags: " << std::hex << class_flags;
- // Only possibility left is of a normal klass instance with no references.
- size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
- }
-
- if (kIsDebugBuild) {
- // String still has instance fields for reflection purposes but these don't exist in
- // actual string instances.
- if (!klass->IsStringClass<kVerifyFlags>()) {
- size_t total_reference_instance_fields = 0;
- ObjPtr<Class> super_class = klass;
- do {
- total_reference_instance_fields +=
- super_class->NumReferenceInstanceFields<kVerifyFlags>();
- super_class = super_class->GetSuperClass<kVerifyFlags, kReadBarrierOption>();
- } while (super_class != nullptr);
- // The only reference field should be the object's class. This field is handled at the
- // beginning of the function.
- CHECK_EQ(total_reference_instance_fields, 1u);
- }
- }
+ DCHECK_EQ(class_flags, kClassFlagNoReferenceFields)
+ << "class_flags: " << std::hex << class_flags;
+ // Only possibility left is of a normal klass instance with no references.
+ size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
}
+ } else if (class_flags == kClassFlagClass) {
+ DCHECK(klass->IsClassClass<kVerifyFlags>());
+ ObjPtr<Class> as_klass = ObjPtr<Class>::DownCast(this);
+ as_klass->VisitReferences<kVisitNativeRoots, kVerifyFlags, kReadBarrierOption>(klass,
+ visitor);
+ size = kFetchObjSize ? as_klass->SizeOf<kSizeOfFlags>() : 0;
+ } else if (class_flags == kClassFlagObjectArray) {
+ DCHECK((klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
+ ObjPtr<ObjectArray<Object>> obj_arr = ObjPtr<ObjectArray<Object>>::DownCast(this);
+ obj_arr->VisitReferences(visitor, begin, end);
+ size = kFetchObjSize ?
+ obj_arr->SizeOf<kSizeOfFlags, kReadBarrierOption, /*kIsObjArray*/ true>() :
+ 0;
+ } else if ((class_flags & kClassFlagReference) != 0) {
+ VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
+ // Visit referent also as this is about updating the reference only.
+ // There is no reference processing happening here.
+ visitor(this, mirror::Reference::ReferentOffset(), /* is_static= */ false);
+ size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
+ } else if (class_flags == kClassFlagDexCache) {
+ DCHECK(klass->IsDexCacheClass<kVerifyFlags>());
+ ObjPtr<DexCache> const dex_cache = ObjPtr<DexCache>::DownCast(this);
+ dex_cache->VisitReferences<kVisitNativeRoots,
+ kVerifyFlags,
+ kReadBarrierOption>(klass, visitor);
+ size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
+ } else if (class_flags == kClassFlagClassLoader) {
+ DCHECK(klass->IsClassLoaderClass<kVerifyFlags>());
+ ObjPtr<ClassLoader> const class_loader = ObjPtr<ClassLoader>::DownCast(this);
+ class_loader->VisitReferences<kVisitNativeRoots,
+ kVerifyFlags,
+ kReadBarrierOption>(klass, visitor);
+ size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
+ } else {
+ LOG(FATAL) << "Unexpected class flags: " << std::hex << class_flags
+ << " for " << klass->PrettyClass();
+ size = -1;
}
visitor(this, ClassOffset(), /* is_static= */ false);
return size;
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 95b9f86a4b..54a17b1d4e 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -111,8 +111,9 @@ class MANAGED LOCKABLE Object {
ALWAYS_INLINE void SetReadBarrierState(uint32_t rb_state) REQUIRES_SHARED(Locks::mutator_lock_);
- template<std::memory_order kMemoryOrder = std::memory_order_relaxed>
- ALWAYS_INLINE bool AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state)
+ ALWAYS_INLINE bool AtomicSetReadBarrierState(uint32_t expected_rb_state,
+ uint32_t rb_state,
+ std::memory_order order = std::memory_order_relaxed)
REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE uint32_t GetMarkBit() REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index 3c73cc569e..af0ee53854 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -347,10 +347,12 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
runtime->GetHeap()->PostForkChildAction(thread);
- // Setup an app startup complete task in case the app doesn't notify it
- // through VMRuntime::notifyStartupCompleted.
- static constexpr uint64_t kMaxAppStartupTimeNs = MsToNs(5000); // 5 seconds
- runtime->GetHeap()->AddHeapTask(new StartupCompletedTask(NanoTime() + kMaxAppStartupTimeNs));
+ if (!is_zygote) {
+ // Setup an app startup complete task in case the app doesn't notify it
+ // through VMRuntime::notifyStartupCompleted.
+ static constexpr uint64_t kMaxAppStartupTimeNs = MsToNs(5000); // 5 seconds
+ runtime->GetHeap()->AddHeapTask(new StartupCompletedTask(NanoTime() + kMaxAppStartupTimeNs));
+ }
if (runtime->GetJit() != nullptr) {
if (!is_system_server) {
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 7191ec23e2..ce2b5609fc 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -45,7 +45,6 @@
#include "dex/art_dex_file_loader.h"
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
-#include "fmt/format.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
#include "image.h"
@@ -61,8 +60,6 @@ namespace art {
using ::android::base::ConsumePrefix;
using ::android::base::StringPrintf;
-using ::fmt::literals::operator""_format; // NOLINT
-
static constexpr const char* kAnonymousDexPrefix = "Anonymous-DexFile@";
static constexpr const char* kVdexExtension = ".vdex";
static constexpr const char* kDmExtension = ".dm";
@@ -888,15 +885,15 @@ OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() {
// If the odex is not useable, and we have a useable vdex, return the vdex
// instead.
- VLOG(oat) << "GetBestInfo checking odex next to the dex file ({})"_format(
- odex_.DisplayFilename());
+ VLOG(oat) << ART_FORMAT("GetBestInfo checking odex next to the dex file ({})",
+ odex_.DisplayFilename());
if (!odex_.IsUseable()) {
- VLOG(oat) << "GetBestInfo checking vdex next to the dex file ({})"_format(
- vdex_for_odex_.DisplayFilename());
+ VLOG(oat) << ART_FORMAT("GetBestInfo checking vdex next to the dex file ({})",
+ vdex_for_odex_.DisplayFilename());
if (vdex_for_odex_.IsUseable()) {
return vdex_for_odex_;
}
- VLOG(oat) << "GetBestInfo checking dm ({})"_format(dm_for_odex_.DisplayFilename());
+ VLOG(oat) << ART_FORMAT("GetBestInfo checking dm ({})", dm_for_odex_.DisplayFilename());
if (dm_for_odex_.IsUseable()) {
return dm_for_odex_;
}
@@ -907,7 +904,7 @@ OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() {
// We cannot write to the odex location. This must be a system app.
// If the oat location is useable take it.
- VLOG(oat) << "GetBestInfo checking odex in dalvik-cache ({})"_format(oat_.DisplayFilename());
+ VLOG(oat) << ART_FORMAT("GetBestInfo checking odex in dalvik-cache ({})", oat_.DisplayFilename());
if (oat_.IsUseable()) {
return oat_;
}
@@ -915,29 +912,29 @@ OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() {
// The oat file is not useable but the odex file might be up to date.
// This is an indication that we are dealing with an up to date prebuilt
// (that doesn't need relocation).
- VLOG(oat) << "GetBestInfo checking odex next to the dex file ({})"_format(
- odex_.DisplayFilename());
+ VLOG(oat) << ART_FORMAT("GetBestInfo checking odex next to the dex file ({})",
+ odex_.DisplayFilename());
if (odex_.IsUseable()) {
return odex_;
}
// Look for a useable vdex file.
- VLOG(oat) << "GetBestInfo checking vdex in dalvik-cache ({})"_format(
- vdex_for_oat_.DisplayFilename());
+ VLOG(oat) << ART_FORMAT("GetBestInfo checking vdex in dalvik-cache ({})",
+ vdex_for_oat_.DisplayFilename());
if (vdex_for_oat_.IsUseable()) {
return vdex_for_oat_;
}
- VLOG(oat) << "GetBestInfo checking vdex next to the dex file ({})"_format(
- vdex_for_odex_.DisplayFilename());
+ VLOG(oat) << ART_FORMAT("GetBestInfo checking vdex next to the dex file ({})",
+ vdex_for_odex_.DisplayFilename());
if (vdex_for_odex_.IsUseable()) {
return vdex_for_odex_;
}
- VLOG(oat) << "GetBestInfo checking dm ({})"_format(dm_for_oat_.DisplayFilename());
+ VLOG(oat) << ART_FORMAT("GetBestInfo checking dm ({})", dm_for_oat_.DisplayFilename());
if (dm_for_oat_.IsUseable()) {
return dm_for_oat_;
}
// TODO(jiakaiz): Is this the same as above?
- VLOG(oat) << "GetBestInfo checking dm ({})"_format(dm_for_odex_.DisplayFilename());
+ VLOG(oat) << ART_FORMAT("GetBestInfo checking dm ({})", dm_for_odex_.DisplayFilename());
if (dm_for_odex_.IsUseable()) {
return dm_for_odex_;
}
@@ -1158,18 +1155,21 @@ bool OatFileAssistant::OatFileInfo::ShouldRecompileForFilter(CompilerFilter::Fil
CompilerFilter::Filter current = file->GetCompilerFilter();
if (dexopt_trigger.targetFilterIsBetter && CompilerFilter::IsBetter(target, current)) {
- VLOG(oat) << "Should recompile: targetFilterIsBetter (current: {}, target: {})"_format(
- CompilerFilter::NameOfFilter(current), CompilerFilter::NameOfFilter(target));
+ VLOG(oat) << ART_FORMAT("Should recompile: targetFilterIsBetter (current: {}, target: {})",
+ CompilerFilter::NameOfFilter(current),
+ CompilerFilter::NameOfFilter(target));
return true;
}
if (dexopt_trigger.targetFilterIsSame && current == target) {
- VLOG(oat) << "Should recompile: targetFilterIsSame (current: {}, target: {})"_format(
- CompilerFilter::NameOfFilter(current), CompilerFilter::NameOfFilter(target));
+ VLOG(oat) << ART_FORMAT("Should recompile: targetFilterIsSame (current: {}, target: {})",
+ CompilerFilter::NameOfFilter(current),
+ CompilerFilter::NameOfFilter(target));
return true;
}
if (dexopt_trigger.targetFilterIsWorse && CompilerFilter::IsBetter(current, target)) {
- VLOG(oat) << "Should recompile: targetFilterIsWorse (current: {}, target: {})"_format(
- CompilerFilter::NameOfFilter(current), CompilerFilter::NameOfFilter(target));
+ VLOG(oat) << ART_FORMAT("Should recompile: targetFilterIsWorse (current: {}, target: {})",
+ CompilerFilter::NameOfFilter(current),
+ CompilerFilter::NameOfFilter(target));
return true;
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 36d7d7ed7a..7eff246d12 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -744,10 +744,77 @@ class UpdateMethodsPreFirstForkVisitor : public ClassVisitor {
DISALLOW_COPY_AND_ASSIGN(UpdateMethodsPreFirstForkVisitor);
};
+// Wait until the kernel thinks we are single-threaded again.
+static void WaitUntilSingleThreaded() {
+#if defined(__linux__)
+ // Read num_threads field from /proc/self/stat, avoiding higher-level IO libraries that may
+ // break atomicity of the read.
+ static constexpr size_t kNumTries = 1000;
+ static constexpr size_t kNumThreadsIndex = 20;
+ for (size_t tries = 0; tries < kNumTries; ++tries) {
+ static constexpr int BUF_SIZE = 500;
+ char buf[BUF_SIZE];
+ int stat_fd = open("/proc/self/stat", O_RDONLY | O_CLOEXEC);
+ CHECK(stat_fd >= 0) << strerror(errno);
+ ssize_t bytes_read = TEMP_FAILURE_RETRY(read(stat_fd, buf, BUF_SIZE));
+ CHECK(bytes_read >= 0) << strerror(errno);
+ int ret = close(stat_fd);
+ DCHECK(ret == 0) << strerror(errno);
+ ssize_t pos = 0;
+ while (pos < bytes_read && buf[pos++] != ')') {}
+ ++pos;
+ // We're now positioned at the beginning of the third field. Don't count blanks embedded in
+ // second (command) field.
+ size_t blanks_seen = 2;
+ while (pos < bytes_read && blanks_seen < kNumThreadsIndex - 1) {
+ if (buf[pos++] == ' ') {
+ ++blanks_seen;
+ }
+ }
+ CHECK(pos < bytes_read - 2);
+ // pos is first character of num_threads field.
+ CHECK_EQ(buf[pos + 1], ' '); // We never have more than single-digit threads here.
+ if (buf[pos] == '1') {
+ return; // num_threads == 1; success.
+ }
+ usleep(1000);
+ }
+ LOG(FATAL) << "Failed to reach single-threaded state";
+#else // Not Linux; shouldn't matter, but this has a high probability of working slowly.
+ usleep(20'000);
+#endif
+}
+
void Runtime::PreZygoteFork() {
if (GetJit() != nullptr) {
GetJit()->PreZygoteFork();
}
+ // All other threads have already been joined, but they may not have finished
+ // removing themselves from the thread list. Wait until the other threads have completely
+ // finished, and are no longer in the thread list.
+ // TODO: Since the threads Unregister() themselves before exiting, the first wait should be
+ // unnecessary. But since we're reading from a /proc entry that's concurrently changing, for
+ // now we play this as safe as possible.
+ ThreadList* tl = GetThreadList();
+ {
+ Thread* self = Thread::Current();
+ MutexLock mu(self, *Locks::thread_list_lock_);
+ tl->WaitForUnregisterToComplete(self);
+ if (kIsDebugBuild) {
+ auto list = tl->GetList();
+ if (list.size() != 1) {
+ for (Thread* t : list) {
+ std::string name;
+ t->GetThreadName(name);
+ LOG(ERROR) << "Remaining pre-fork thread: " << name;
+ }
+ }
+ }
+ CHECK_EQ(tl->Size(), 1u);
+ // And then wait until the kernel thinks the threads are gone.
+ WaitUntilSingleThreaded();
+ }
+
if (!heap_->HasZygoteSpace()) {
Thread* self = Thread::Current();
// This is the first fork. Update ArtMethods in the boot classpath now to
@@ -790,7 +857,11 @@ void Runtime::CallExitHook(jint status) {
}
void Runtime::SweepSystemWeaks(IsMarkedVisitor* visitor) {
- GetInternTable()->SweepInternTableWeaks(visitor);
+ // Userfaultfd compaction updates weak intern-table page-by-page via
+ // LinearAlloc.
+ if (!GetHeap()->IsPerformingUffdCompaction()) {
+ GetInternTable()->SweepInternTableWeaks(visitor);
+ }
GetMonitorList()->SweepMonitorList(visitor);
GetJavaVM()->SweepJniWeakGlobals(visitor);
GetHeap()->SweepAllocationRecords(visitor);
@@ -885,7 +956,8 @@ static jobject CreateSystemClassLoader(Runtime* runtime) {
CHECK(getSystemClassLoader->IsStatic());
ObjPtr<mirror::Object> system_class_loader = getSystemClassLoader->InvokeStatic<'L'>(soa.Self());
- CHECK(system_class_loader != nullptr);
+ CHECK(system_class_loader != nullptr)
+ << (soa.Self()->IsExceptionPending() ? soa.Self()->GetException()->Dump() : "<null>");
ScopedAssertNoThreadSuspension sants(__FUNCTION__);
jobject g_system_class_loader =
@@ -1620,9 +1692,9 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
InitializeApexVersions();
BackgroundGcOption background_gc =
- gUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground)
- : (gUseUserfaultfd ? BackgroundGcOption(xgc_option.collector_type_)
- : runtime_options.GetOrDefault(Opt::BackgroundGc));
+ gUseReadBarrier ? BackgroundGcOption(gc::kCollectorTypeCCBackground) :
+ (gUseUserfaultfd ? BackgroundGcOption(gc::kCollectorTypeCMCBackground) :
+ runtime_options.GetOrDefault(Opt::BackgroundGc));
heap_ = new gc::Heap(runtime_options.GetOrDefault(Opt::MemoryInitialSize),
runtime_options.GetOrDefault(Opt::HeapGrowthLimit),
@@ -2507,8 +2579,14 @@ void Runtime::VisitConstantRoots(RootVisitor* visitor) {
}
void Runtime::VisitConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags) {
- intern_table_->VisitRoots(visitor, flags);
- class_linker_->VisitRoots(visitor, flags);
+ // Userfaultfd compaction updates intern-tables and class-tables page-by-page
+ // via LinearAlloc. So don't visit them here.
+ if (GetHeap()->IsPerformingUffdCompaction()) {
+ class_linker_->VisitRoots(visitor, flags, /*visit_class_roots=*/false);
+ } else {
+ intern_table_->VisitRoots(visitor, flags);
+ class_linker_->VisitRoots(visitor, flags, /*visit_class_roots=*/true);
+ }
jni_id_manager_->VisitRoots(visitor);
heap_->VisitAllocationRecords(visitor);
if (jit_ != nullptr) {
diff --git a/runtime/runtime_image.cc b/runtime/runtime_image.cc
index fa475f552b..b586c8bb4a 100644
--- a/runtime/runtime_image.cc
+++ b/runtime/runtime_image.cc
@@ -431,7 +431,7 @@ class RuntimeImageHelper {
// generated in the image and put in the class table, boot classpath
// classes will be put in the class table.
ObjPtr<mirror::ClassLoader> class_loader = klass->GetClassLoader();
- if (class_loader == loader_.Get() || class_loader == nullptr) {
+ if (klass->IsResolved() && (class_loader == loader_.Get() || class_loader == nullptr)) {
handles_.NewHandle(klass);
}
return true;
@@ -477,6 +477,7 @@ class RuntimeImageHelper {
for (size_t i = 0, num_interfaces = cls->NumDirectInterfaces(); i < num_interfaces; ++i) {
other_class.Assign(cls->GetDirectInterface(i));
+ DCHECK(other_class != nullptr);
if (!CanEmit(other_class)) {
return false;
}
@@ -488,8 +489,9 @@ class RuntimeImageHelper {
if (cls == nullptr) {
return true;
}
+ DCHECK(cls->IsResolved());
// Only emit classes that are resolved and not erroneous.
- if (!cls->IsResolved() || cls->IsErroneous()) {
+ if (cls->IsErroneous()) {
return false;
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 9fa55f991d..84def824e2 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -1412,6 +1412,14 @@ void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
}
}
+void ThreadList::WaitForUnregisterToComplete(Thread* self) {
+ // We hold thread_list_lock_ .
+ while (unregistering_count_ != 0) {
+ LOG(WARNING) << "Waiting for a thread to finish unregistering";
+ Locks::thread_exit_cond_->Wait(self);
+ }
+}
+
void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) {
Thread* const self = Thread::Current();
std::vector<Thread*> threads_to_visit;
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 51fac4a6ed..db06611d59 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -158,6 +158,10 @@ class ThreadList {
!Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
+ // Wait until there are no Unregister() requests in flight. Only makes sense when we know that
+ // no new calls can be made. e.g. because we're the last thread.
+ void WaitForUnregisterToComplete(Thread* self) REQUIRES(Locks::thread_list_lock_);
+
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -175,6 +179,8 @@ class ThreadList {
return list_;
}
+ size_t Size() REQUIRES(Locks::thread_list_lock_) { return list_.size(); }
+
void DumpNativeStacks(std::ostream& os)
REQUIRES(!Locks::thread_list_lock_);
diff --git a/test/2247-checker-write-barrier-elimination/Android.bp b/test/2247-checker-write-barrier-elimination/Android.bp
index c9744e9b00..5848cb496e 100644
--- a/test/2247-checker-write-barrier-elimination/Android.bp
+++ b/test/2247-checker-write-barrier-elimination/Android.bp
@@ -15,7 +15,7 @@ package {
java_test {
name: "art-run-test-2247-checker-write-barrier-elimination",
defaults: ["art-run-test-defaults"],
- test_config_template: ":art-run-test-target-template",
+ test_config_template: ":art-run-test-target-no-test-suite-tag-template",
srcs: ["src/**/*.java"],
data: [
":art-run-test-2247-checker-write-barrier-elimination-expected-stdout",
diff --git a/test/849-records/build.py b/test/849-records/build.py
new file mode 100644
index 0000000000..3f9392c8da
--- /dev/null
+++ b/test/849-records/build.py
@@ -0,0 +1,22 @@
+#
+# Copyright (C) 2023 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def build(ctx):
+ # Use 17 and record annotations to compile records and generate annotations
+ # that let the runtime know what is a record class.
+ ctx.default_build(javac_source_arg="17",
+ javac_target_arg="17",
+ d8_flags=["-JDcom.android.tools.r8.emitRecordAnnotationsInDex"])
diff --git a/test/849-records/expected-stderr.txt b/test/849-records/expected-stderr.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/849-records/expected-stderr.txt
diff --git a/test/849-records/expected-stdout.txt b/test/849-records/expected-stdout.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/849-records/expected-stdout.txt
diff --git a/test/849-records/info.txt b/test/849-records/info.txt
new file mode 100644
index 0000000000..08cb76b5bd
--- /dev/null
+++ b/test/849-records/info.txt
@@ -0,0 +1 @@
+Regression test for b/297966050.
diff --git a/test/849-records/src/Main.java b/test/849-records/src/Main.java
new file mode 100644
index 0000000000..3512e0b906
--- /dev/null
+++ b/test/849-records/src/Main.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Main {
+ public static void main(String[] args) {
+ Foo f = new Foo(args);
+ if (!f.getClass().isRecord()) {
+ throw new Error("Expected " + f.getClass() + " to be a record");
+ }
+ // Trigger a GC, which used to crash when visiting an instance of a record class.
+ Runtime.getRuntime().gc();
+ }
+
+ record Foo(Object o) {}
+}
diff --git a/test/knownfailures.json b/test/knownfailures.json
index f6c0e25ebe..91cf968413 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -1,5 +1,10 @@
[
{
+ "tests": "2247-checker-write-barrier-elimination",
+ "description": ["Disable 2247- until we fix the WBE issue."],
+ "bug": "http://b/310755375"
+ },
+ {
"tests": "153-reference-stress",
"description": ["Disable 153-reference-stress temporarily until a fix",
"arrives."],
diff --git a/test/odsign/test-src/com/android/tests/odsign/OdrefreshFactoryHostTestBase.java b/test/odsign/test-src/com/android/tests/odsign/OdrefreshFactoryHostTestBase.java
index eaf2ec212c..1043ea4a5a 100644
--- a/test/odsign/test-src/com/android/tests/odsign/OdrefreshFactoryHostTestBase.java
+++ b/test/odsign/test-src/com/android/tests/odsign/OdrefreshFactoryHostTestBase.java
@@ -184,14 +184,14 @@ abstract public class OdrefreshFactoryHostTestBase extends BaseHostJUnit4Test {
}
@Test
- public void verifyEnableUffdGcChangeTriggersCompilation() throws Exception {
+ public void verifyPhenotypeFlagChangeTriggersCompilation() throws Exception {
// Simulate that the flag value is initially empty.
- mDeviceState.setPhenotypeFlag("enable_uffd_gc_2", null);
+ mDeviceState.setPhenotypeFlag("odrefresh_test_toggle", null);
long timeMs = mTestUtils.getCurrentTimeMs();
mTestUtils.runOdrefresh();
- mDeviceState.setPhenotypeFlag("enable_uffd_gc_2", "true");
+ mDeviceState.setPhenotypeFlag("odrefresh_test_toggle", "true");
timeMs = mTestUtils.getCurrentTimeMs();
mTestUtils.runOdrefresh();
@@ -213,7 +213,7 @@ abstract public class OdrefreshFactoryHostTestBase extends BaseHostJUnit4Test {
mTestUtils.getExpectedBootImageMainlineExtension(), timeMs);
mTestUtils.assertNotModifiedAfter(mTestUtils.getSystemServerExpectedArtifacts(), timeMs);
- mDeviceState.setPhenotypeFlag("enable_uffd_gc_2", null);
+ mDeviceState.setPhenotypeFlag("odrefresh_test_toggle", null);
mTestUtils.runOdrefresh();
diff --git a/test/odsign/test-src/com/android/tests/odsign/OdrefreshHostTest.java b/test/odsign/test-src/com/android/tests/odsign/OdrefreshHostTest.java
index ae275d3b32..fe727ca203 100644
--- a/test/odsign/test-src/com/android/tests/odsign/OdrefreshHostTest.java
+++ b/test/odsign/test-src/com/android/tests/odsign/OdrefreshHostTest.java
@@ -34,7 +34,7 @@ import java.util.HashSet;
import java.util.Set;
/**
- * Test to check end-to-end odrefresh invocations, but without odsign amd fs-verity involved.
+ * Test to check end-to-end odrefresh invocations, but without odsign and fs-verity involved.
*/
@RunWith(DeviceJUnit4ClassRunner.class)
public class OdrefreshHostTest extends BaseHostJUnit4Test {
@@ -130,14 +130,14 @@ public class OdrefreshHostTest extends BaseHostJUnit4Test {
}
@Test
- public void verifyEnableUffdGcChangeTriggersCompilation() throws Exception {
+ public void verifyPhenotypeFlagChangeTriggersCompilation() throws Exception {
// Simulate that the flag value is initially empty.
- mDeviceState.setPhenotypeFlag("enable_uffd_gc_2", null);
+ mDeviceState.setPhenotypeFlag("odrefresh_test_toggle", null);
long timeMs = mTestUtils.getCurrentTimeMs();
mTestUtils.runOdrefresh();
- mDeviceState.setPhenotypeFlag("enable_uffd_gc_2", "false");
+ mDeviceState.setPhenotypeFlag("odrefresh_test_toggle", "false");
timeMs = mTestUtils.getCurrentTimeMs();
mTestUtils.runOdrefresh();
@@ -148,7 +148,7 @@ public class OdrefreshHostTest extends BaseHostJUnit4Test {
mTestUtils.getExpectedBootImageMainlineExtension(), timeMs);
mTestUtils.assertNotModifiedAfter(mTestUtils.getSystemServerExpectedArtifacts(), timeMs);
- mDeviceState.setPhenotypeFlag("enable_uffd_gc_2", "true");
+ mDeviceState.setPhenotypeFlag("odrefresh_test_toggle", "true");
timeMs = mTestUtils.getCurrentTimeMs();
mTestUtils.runOdrefresh();
@@ -168,7 +168,7 @@ public class OdrefreshHostTest extends BaseHostJUnit4Test {
mTestUtils.getExpectedBootImageMainlineExtension(), timeMs);
mTestUtils.assertNotModifiedAfter(mTestUtils.getSystemServerExpectedArtifacts(), timeMs);
- mDeviceState.setPhenotypeFlag("enable_uffd_gc_2", null);
+ mDeviceState.setPhenotypeFlag("odrefresh_test_toggle", null);
timeMs = mTestUtils.getCurrentTimeMs();
mTestUtils.runOdrefresh();
diff --git a/test/run_test_build.py b/test/run_test_build.py
index 75cd64f061..f7984f657f 100755
--- a/test/run_test_build.py
+++ b/test/run_test_build.py
@@ -68,7 +68,7 @@ class BuildTestContext:
self.java_home = Path(os.environ.get("JAVA_HOME")).absolute()
self.java_path = self.java_home / "bin/java"
self.javac_path = self.java_home / "bin/javac"
- self.javac_args = "-g -Xlint:-options -source 1.8 -target 1.8"
+ self.javac_args = "-g -Xlint:-options"
# Helper functions to execute tools.
self.d8 = functools.partial(self.run, args.d8.absolute())
@@ -201,6 +201,8 @@ class BuildTestContext:
smali_args=[],
use_smali=True,
use_jasmin=True,
+ javac_source_arg="1.8",
+ javac_target_arg="1.8"
):
javac_classpath = javac_classpath.copy() # Do not modify default value.
@@ -266,7 +268,8 @@ class BuildTestContext:
dst_dir.mkdir(exist_ok=True)
args = self.javac_args.split(" ") + javac_args
args += ["-implicit:none", "-encoding", "utf8", "-d", dst_dir]
- if not self.jvm:
+ args += ["-source", javac_source_arg, "-target", javac_target_arg]
+ if not self.jvm and float(javac_target_arg) < 17.0:
args += ["-bootclasspath", self.bootclasspath]
if javac_classpath:
args += ["-classpath", javac_classpath]
diff --git a/test/utils/regen-test-files b/test/utils/regen-test-files
index d047c2354a..64846a7ef9 100755
--- a/test/utils/regen-test-files
+++ b/test/utils/regen-test-files
@@ -215,7 +215,9 @@ known_failing_tests = frozenset([
"993-breakpoints-non-debuggable",
"2243-single-step-default",
"2262-miranda-methods",
- "2262-default-conflict-methods"
+ "2262-default-conflict-methods",
+ # 2247-checker-write-barrier-elimination: Disabled while we investigate failures
+ "2247-checker-write-barrier-elimination"
])
known_failing_on_hwasan_tests = frozenset([
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index cac98cb131..41a06cd427 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -411,6 +411,6 @@ EOF
msginfo "Generating linkerconfig" "in $linkerconfig_out"
rm -rf $linkerconfig_out
mkdir -p $linkerconfig_out
- $ANDROID_HOST_OUT/bin/linkerconfig --target $linkerconfig_out --root $linkerconfig_root --vndk $platform_version
+ $ANDROID_HOST_OUT/bin/linkerconfig --target $linkerconfig_out --root $linkerconfig_root --vndk $platform_version --product_vndk $platform_version
msgnote "Don't be scared by \"Unable to access VNDK APEX\" message, it's not fatal"
fi