summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-02-28 20:58:56 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-02-28 20:58:56 +0000
commit30412219cd7262d2238886f4c2cd7ce0a5674f5e (patch)
tree341f1d4cbffb898c790060f91eed4e181680a5bd
parent3c576baf0a6caf71b359079cbd27c56d4036cb24 (diff)
parentb73e8ce9721adac2d07a9e0d844ab767b34eeb0e (diff)
downloadart-30412219cd7262d2238886f4c2cd7ce0a5674f5e.tar.gz
Snap for 11510257 from b73e8ce9721adac2d07a9e0d844ab767b34eeb0e to simpleperf-release
Change-Id: Ie227a5926aaaffaa1455765c3d62afad69f329b5
-rw-r--r--TEST_MAPPING57
-rw-r--r--artd/artd_test.cc2
-rw-r--r--build/README.md4
-rw-r--r--build/boot/Android.bp1
-rw-r--r--build/makevars.go2
-rw-r--r--compiler/compiler_reflection_test.cc1
-rw-r--r--compiler/driver/compiler_options.h2
-rw-r--r--compiler/jni/quick/calling_convention.h10
-rw-r--r--compiler/jni/quick/jni_compiler.cc90
-rw-r--r--compiler/optimizing/code_generator_arm64.cc24
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc5
-rw-r--r--compiler/optimizing/code_generator_riscv64.cc14
-rw-r--r--compiler/optimizing/code_generator_x86.cc6
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc4
-rw-r--r--compiler/optimizing/constant_folding.cc9
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.cc4
-rw-r--r--compiler/optimizing/dead_code_elimination.cc2
-rw-r--r--compiler/optimizing/graph_checker.cc83
-rw-r--r--compiler/optimizing/graph_checker.h2
-rw-r--r--compiler/optimizing/graph_visualizer.cc2
-rw-r--r--compiler/optimizing/gvn.cc2
-rw-r--r--compiler/optimizing/inliner.cc14
-rw-r--r--compiler/optimizing/instruction_builder.cc1
-rw-r--r--compiler/optimizing/instruction_simplifier.cc43
-rw-r--r--compiler/optimizing/intrinsic_objects.cc2
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc2
-rw-r--r--compiler/optimizing/linearize_test.cc7
-rw-r--r--compiler/optimizing/live_ranges_test.cc6
-rw-r--r--compiler/optimizing/liveness_test.cc12
-rw-r--r--compiler/optimizing/load_store_elimination.cc6
-rw-r--r--compiler/optimizing/loop_optimization_test.cc10
-rw-r--r--compiler/optimizing/nodes.cc12
-rw-r--r--compiler/optimizing/nodes.h27
-rw-r--r--compiler/optimizing/nodes_shared.h4
-rw-r--r--compiler/optimizing/optimizing_compiler.cc7
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.h2
-rw-r--r--compiler/optimizing/profiling_info_builder.cc12
-rw-r--r--compiler/optimizing/profiling_info_builder.h2
-rw-r--r--compiler/optimizing/reference_type_propagation.cc9
-rw-r--r--compiler/optimizing/scheduler.cc60
-rw-r--r--compiler/optimizing/scheduler.h41
-rw-r--r--compiler/optimizing/scheduler_arm.cc134
-rw-r--r--compiler/optimizing/scheduler_arm.h140
-rw-r--r--compiler/optimizing/scheduler_arm64.cc134
-rw-r--r--compiler/optimizing/scheduler_arm64.h135
-rw-r--r--compiler/optimizing/scheduler_test.cc6
-rw-r--r--compiler/optimizing/ssa_liveness_analysis_test.cc9
-rw-r--r--compiler/optimizing/stack_map_stream.h2
-rw-r--r--compiler/optimizing/write_barrier_elimination.cc8
-rw-r--r--compiler/trampolines/trampoline_compiler.cc8
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.cc34
-rw-r--r--compiler/utils/arm/jni_macro_assembler_arm_vixl.h8
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.cc37
-rw-r--r--compiler/utils/arm64/jni_macro_assembler_arm64.h8
-rw-r--r--compiler/utils/assembler_test_base.h4
-rw-r--r--compiler/utils/jni_macro_assembler.cc50
-rw-r--r--compiler/utils/jni_macro_assembler.h13
-rw-r--r--compiler/utils/riscv64/assembler_riscv64.cc296
-rw-r--r--compiler/utils/riscv64/assembler_riscv64.h414
-rw-r--r--compiler/utils/riscv64/assembler_riscv64_test.cc774
-rw-r--r--compiler/utils/riscv64/jni_macro_assembler_riscv64.cc2
-rw-r--r--compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc15
-rw-r--r--compiler/utils/x86/jni_macro_assembler_x86.cc2
-rw-r--r--compiler/utils/x86_64/jni_macro_assembler_x86_64.cc2
-rw-r--r--dex2oat/dex2oat_image_test.cc1
-rw-r--r--dex2oat/dex2oat_test.cc1
-rw-r--r--dex2oat/driver/compiler_driver_test.cc1
-rw-r--r--dex2oat/verifier_deps_test.cc1
-rw-r--r--disassembler/disassembler_riscv64.cc402
-rw-r--r--imgdiag/imgdiag_test.cc1
-rw-r--r--libartbase/base/common_art_test.h20
-rw-r--r--libartbase/base/safe_map.h2
-rw-r--r--libartbase/base/sdk_version.h1
-rw-r--r--libartpalette/apex/palette_test.cc13
-rw-r--r--libartservice/service/api/system-server-current.txt1
-rw-r--r--libartservice/service/java/com/android/server/art/ArtManagerLocal.java40
-rw-r--r--libartservice/service/java/com/android/server/art/BackgroundDexoptJob.java11
-rw-r--r--libartservice/service/java/com/android/server/art/BackgroundDexoptJobService.java14
-rw-r--r--libartservice/service/java/com/android/server/art/PreRebootDexoptJob.java87
-rw-r--r--libartservice/service/java/com/android/server/art/model/ArtServiceJobInterface.java30
-rw-r--r--libartservice/service/javatests/com/android/server/art/BackgroundDexoptJobTest.java3
-rw-r--r--libdexfile/dex/code_item_accessors-inl.h14
-rw-r--r--libdexfile/dex/code_item_accessors.h6
-rw-r--r--libdexfile/dex/dex_file-inl.h6
-rw-r--r--libdexfile/dex/dex_file.h18
-rw-r--r--libdexfile/dex/dex_file_loader.h11
-rw-r--r--libdexfile/dex/dex_file_verifier.cc86
-rw-r--r--libelffile/elf/elf_debug_reader.h11
-rw-r--r--libnativeloader/Android.bp1
-rw-r--r--libnativeloader/library_namespaces.cpp216
-rw-r--r--libnativeloader/library_namespaces.h50
-rw-r--r--libnativeloader/library_namespaces_test.cpp89
-rw-r--r--libnativeloader/native_loader.cpp248
-rw-r--r--libnativeloader/native_loader_lazy.cpp2
-rw-r--r--libnativeloader/native_loader_namespace.cpp8
-rw-r--r--libnativeloader/native_loader_test.cpp14
-rw-r--r--libnativeloader/public_libraries.cpp43
-rw-r--r--libnativeloader/test/Android.bp32
-rw-r--r--libnativeloader/test/src/android/test/app/DataAppTest.java73
-rw-r--r--libnativeloader/test/src/android/test/app/ProductAppTest.java101
-rw-r--r--libnativeloader/test/src/android/test/app/SystemAppTest.java59
-rw-r--r--libnativeloader/test/src/android/test/app/VendorAppTest.java83
-rw-r--r--libnativeloader/test/src/android/test/hostside/LibnativeloaderTest.java5
-rw-r--r--libnativeloader/test/src/android/test/lib/AppTestCommon.java192
-rw-r--r--libnativeloader/test/src/android/test/lib/TestUtils.java37
-rw-r--r--libnativeloader/test/src/android/test/productsharedlib/ProductSharedLib.java8
-rw-r--r--libnativeloader/test/src/android/test/systemextsharedlib/SystemExtSharedLib.java8
-rw-r--r--libnativeloader/test/src/android/test/systemsharedlib/SystemSharedLib.java8
-rw-r--r--libnativeloader/test/src/android/test/vendorsharedlib/VendorSharedLib.java8
-rw-r--r--oatdump/oatdump.cc14
-rw-r--r--oatdump/oatdump_app_test.cc15
-rw-r--r--oatdump/oatdump_test.cc4
-rw-r--r--odrefresh/Android.bp2
-rw-r--r--odrefresh/odrefresh.cc34
-rw-r--r--openjdkjvmti/ti_method.cc10
-rw-r--r--runtime/Android.bp1
-rw-r--r--runtime/art_method-inl.h5
-rw-r--r--runtime/common_runtime_test.h5
-rw-r--r--runtime/fault_handler.cc16
-rw-r--r--runtime/gc/space/image_space_test.cc2
-rw-r--r--runtime/gc/verification.cc2
-rw-r--r--runtime/jit/jit.cc94
-rw-r--r--runtime/jit/jit.h131
-rw-r--r--runtime/jit/jit_code_cache.cc156
-rw-r--r--runtime/jit/jit_options.cc117
-rw-r--r--runtime/jit/jit_options.h160
-rw-r--r--runtime/jit/profiling_info_test.cc2
-rw-r--r--runtime/jni/jni_env_ext.cc14
-rw-r--r--runtime/jni/jni_env_ext.h8
-rw-r--r--runtime/jni/jni_internal_test.cc9
-rw-r--r--runtime/metrics/statsd.cc34
-rw-r--r--runtime/metrics/statsd.h4
-rw-r--r--runtime/mirror/class.h5
-rw-r--r--runtime/mirror/dex_cache.h4
-rw-r--r--runtime/mirror/object-refvisitor-inl.h2
-rw-r--r--runtime/monitor.cc15
-rw-r--r--runtime/monitor.h10
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc13
-rw-r--r--runtime/native/dalvik_system_VMRuntime.h12
-rw-r--r--runtime/native/jdk_internal_misc_Unsafe.cc5
-rw-r--r--runtime/native/sun_misc_Unsafe.cc5
-rw-r--r--runtime/nterp_helpers.cc240
-rw-r--r--runtime/oat/elf_file.cc2
-rw-r--r--runtime/reflection.cc80
-rw-r--r--runtime/runtime.cc4
-rw-r--r--runtime/runtime_image.cc8
-rw-r--r--runtime/runtime_options.h2
-rw-r--r--runtime/thread-inl.h12
-rw-r--r--runtime/thread.cc45
-rw-r--r--runtime/thread.h20
-rw-r--r--runtime/thread_list.cc386
-rw-r--r--runtime/thread_list.h32
-rw-r--r--runtime/trace.cc441
-rw-r--r--runtime/trace.h44
-rw-r--r--runtime/well_known_classes.h3
-rw-r--r--test/115-native-bridge/nativebridge.cc5
-rw-r--r--test/2246-trace-v2/Android.bp40
-rw-r--r--test/2246-trace-v2/dump_trace.cc233
-rw-r--r--test/2246-trace-v2/expected-stderr.txt0
-rw-r--r--test/2246-trace-v2/expected-stdout.txt773
-rw-r--r--test/2246-trace-v2/info.txt2
-rw-r--r--test/2246-trace-v2/run.py22
-rw-r--r--test/2246-trace-v2/src/Main.java178
-rw-r--r--test/2247-checker-write-barrier-elimination/Android.bp2
-rw-r--r--test/2272-checker-codegen-honor-write-barrier-kind/Android.bp43
-rw-r--r--test/2272-checker-codegen-honor-write-barrier-kind/src/Main.java11
-rw-r--r--test/2273-checker-unreachable-intrinsics/Android.bp43
-rw-r--r--test/2274-checker-bitwise-gvn/expected-stderr.txt0
-rw-r--r--test/2274-checker-bitwise-gvn/expected-stdout.txt0
-rw-r--r--test/2274-checker-bitwise-gvn/info.txt2
-rw-r--r--test/2274-checker-bitwise-gvn/src/Main.java54
-rw-r--r--test/458-checker-instruct-simplification/src/Main.java221
-rw-r--r--test/557-checker-instruct-simplifier-ror/src/Main.java77
-rw-r--r--test/638-checker-inline-cache-intrinsic/src/Main.java4
-rw-r--r--test/928-jni-table/jni_table.cc1
-rw-r--r--test/941-recursive-obsolete-jit/src/Main.java6
-rw-r--r--test/943-private-recursive-jit/src/Main.java6
-rw-r--r--test/Android.bp1
-rw-r--r--test/common/stack_inspect.cc2
-rw-r--r--test/knownfailures.json3
-rwxr-xr-xtest/run-test16
-rw-r--r--test/testrunner/env.py15
-rwxr-xr-xtest/utils/regen-test-files125
-rwxr-xr-xtools/buildbot-build.sh2
-rwxr-xr-xtools/buildbot-utils.sh2
-rwxr-xr-xtools/buildbot-vm.sh18
-rw-r--r--tools/create_minidebuginfo/create_minidebuginfo.cc8
-rw-r--r--tools/fuzzer/corpus/recursive_encoded_array.dexbin0 -> 108883 bytes
-rw-r--r--tools/libcore_failures.txt3
-rw-r--r--tools/libcore_gcstress_debug_failures.txt13
-rw-r--r--tools/luci/config/generated/cr-buildbucket.cfg58
-rw-r--r--tools/luci/config/generated/luci-logdog.cfg2
-rw-r--r--tools/luci/config/generated/luci-milo.cfg2
-rw-r--r--tools/luci/config/generated/luci-notify.cfg15
-rw-r--r--tools/luci/config/generated/luci-scheduler.cfg16
-rw-r--r--tools/luci/config/generated/project.cfg4
-rw-r--r--tools/luci/config/generated/realms.cfg2
-rwxr-xr-xtools/luci/config/main.star39
198 files changed, 7017 insertions, 2187 deletions
diff --git a/TEST_MAPPING b/TEST_MAPPING
index d7bde8792d..038aaf9ef3 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -494,6 +494,15 @@
"name": "art-run-test-2265-checker-select-binary-unary[com.google.android.art.apex]"
},
{
+ "name": "art-run-test-2266-checker-remove-empty-ifs[com.google.android.art.apex]"
+ },
+ {
+ "name": "art-run-test-2268-checker-remove-dead-phis[com.google.android.art.apex]"
+ },
+ {
+ "name": "art-run-test-2269-checker-constant-folding-instrinsics[com.google.android.art.apex]"
+ },
+ {
"name": "art-run-test-300-package-override[com.google.android.art.apex]"
},
{
@@ -1328,6 +1337,12 @@
"name": "art-run-test-843-default-interface[com.google.android.art.apex]"
},
{
+ "name": "art-run-test-851-null-instanceof[com.google.android.art.apex]"
+ },
+ {
+ "name": "art-run-test-853-checker-inlining[com.google.android.art.apex]"
+ },
+ {
"name": "art-run-test-963-default-range-smali[com.google.android.art.apex]"
},
{
@@ -1922,6 +1937,15 @@
"name": "art-run-test-2265-checker-select-binary-unary"
},
{
+ "name": "art-run-test-2266-checker-remove-empty-ifs"
+ },
+ {
+ "name": "art-run-test-2268-checker-remove-dead-phis"
+ },
+ {
+ "name": "art-run-test-2269-checker-constant-folding-instrinsics"
+ },
+ {
"name": "art-run-test-300-package-override"
},
{
@@ -2756,6 +2780,12 @@
"name": "art-run-test-843-default-interface"
},
{
+ "name": "art-run-test-851-null-instanceof"
+ },
+ {
+ "name": "art-run-test-853-checker-inlining"
+ },
+ {
"name": "art-run-test-963-default-range-smali"
},
{
@@ -3337,6 +3367,15 @@
"name": "art-run-test-2265-checker-select-binary-unary"
},
{
+ "name": "art-run-test-2266-checker-remove-empty-ifs"
+ },
+ {
+ "name": "art-run-test-2268-checker-remove-dead-phis"
+ },
+ {
+ "name": "art-run-test-2269-checker-constant-folding-instrinsics"
+ },
+ {
"name": "art-run-test-300-package-override"
},
{
@@ -4171,6 +4210,12 @@
"name": "art-run-test-843-default-interface"
},
{
+ "name": "art-run-test-851-null-instanceof"
+ },
+ {
+ "name": "art-run-test-853-checker-inlining"
+ },
+ {
"name": "art-run-test-963-default-range-smali"
},
{
@@ -4253,19 +4298,13 @@
],
"postsubmit": [
{
- "name": "art-run-test-2266-checker-remove-empty-ifs"
- },
- {
- "name": "art-run-test-2268-checker-remove-dead-phis"
- },
- {
- "name": "art-run-test-2269-checker-constant-folding-instrinsics"
+ "name": "art-run-test-2247-checker-write-barrier-elimination"
},
{
- "name": "art-run-test-851-null-instanceof"
+ "name": "art-run-test-2273-checker-unreachable-intrinsics"
},
{
- "name": "art-run-test-853-checker-inlining"
+ "name": "art_standalone_dexopt_chroot_setup_tests"
}
]
}
diff --git a/artd/artd_test.cc b/artd/artd_test.cc
index 79fa7cd4e1..8a704464ea 100644
--- a/artd/artd_test.cc
+++ b/artd/artd_test.cc
@@ -2200,7 +2200,7 @@ TEST_F(ArtdTest, cleanup) {
},
{
RuntimeArtifactsPath{
- .packageName = "com.android.foo", .isa = "arm64", .dexPath = "/a/b/base.apk"},
+ .packageName = "com.android.foo", .dexPath = "/a/b/base.apk", .isa = "arm64"},
},
&aidl_return)
.isOk());
diff --git a/build/README.md b/build/README.md
index 2c8413075d..7b4e402376 100644
--- a/build/README.md
+++ b/build/README.md
@@ -8,10 +8,6 @@ directory) of platform releases, to ensure it is always available.
The recommended way to build the ART Module is to use the `master-art` manifest,
which only has the sources and dependencies required for the module.
-Currently it is also possible to build ART directly from sources in a platform
-build, i.e. as has been the traditional way up until Android S. However that
-method is being phased out.
-
The ART Module is available as a debug variant, `com.android.art.debug.apex`,
which has extra internal consistency checks enabled, and some debug tools. A
device cannot have both the non-debug and debug variants installed at once - it
diff --git a/build/boot/Android.bp b/build/boot/Android.bp
index e0bf395872..a2fa55ca42 100644
--- a/build/boot/Android.bp
+++ b/build/boot/Android.bp
@@ -142,6 +142,7 @@ bootclasspath_fragment {
// result in a build failure due to inconsistent flags.
package_prefixes: [
"android.compat",
+ "android.crypto.hpke",
"com.android.okhttp",
"com.android.org.bouncycastle",
"com.android.org.kxml2",
diff --git a/build/makevars.go b/build/makevars.go
index b35ee8561d..00124ec621 100644
--- a/build/makevars.go
+++ b/build/makevars.go
@@ -56,7 +56,7 @@ func makeVarsProvider(ctx android.MakeVarsContext) {
sort.Strings(testNames)
for _, name := range testNames {
- ctx.Strict("ART_TEST_LIST_"+name, strings.Join(testMap[name], " "))
+ ctx.Strict("ART_TEST_LIST_"+name, strings.Join(android.FirstUniqueStrings(testMap[name]), " "))
}
// Create list of copy commands to install the content of the testcases directory.
diff --git a/compiler/compiler_reflection_test.cc b/compiler/compiler_reflection_test.cc
index d8e2b9e5b9..f3c07db136 100644
--- a/compiler/compiler_reflection_test.cc
+++ b/compiler/compiler_reflection_test.cc
@@ -29,7 +29,6 @@ namespace art HIDDEN {
class CompilerReflectionTest : public CommonCompilerTest {};
TEST_F(CompilerReflectionTest, StaticMainMethod) {
- TEST_DISABLED_FOR_RISCV64();
ScopedObjectAccess soa(Thread::Current());
jobject jclass_loader = LoadDex("Main");
StackHandleScope<1> hs(soa.Self());
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index eaf9cc2fdb..6b3e26fc70 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -69,7 +69,7 @@ class CompilerOptions final {
static constexpr size_t kUnsetInlineMaxCodeUnits = -1;
// We set a lower inlining threshold for baseline to reduce code size and compilation time. This
// cannot be changed via flags.
- static constexpr size_t kBaselineInlineMaxCodeUnits = 8;
+ static constexpr size_t kBaselineInlineMaxCodeUnits = 14;
enum class CompilerType : uint8_t {
kAotCompiler, // AOT compiler.
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index 2657e943e6..b8b4cc14b1 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -324,9 +324,11 @@ class JniCallingConvention : public CallingConvention {
virtual ArrayRef<const ManagedRegister> CalleeSaveRegisters() const = 0;
// Subset of core callee save registers that can be used for arbitrary purposes after
- // constructing the JNI transition frame. These should be managed callee-saves as well.
+ // constructing the JNI transition frame. These should be both managed and native callee-saves.
// These should not include special purpose registers such as thread register.
- // JNI compiler currently requires at least 3 callee save scratch registers.
+ // JNI compiler currently requires at least 4 callee save scratch registers, except for x86
+ // where we have only 3 such registers but all args are passed on stack, so the method register
+ // is never clobbered by argument moves and does not need to be preserved elsewhere.
virtual ArrayRef<const ManagedRegister> CalleeSaveScratchRegisters() const = 0;
// Subset of core argument registers that can be used for arbitrary purposes after
@@ -356,10 +358,6 @@ class JniCallingConvention : public CallingConvention {
virtual ~JniCallingConvention() {}
- static constexpr size_t SavedLocalReferenceCookieSize() {
- return 4u;
- }
-
bool IsFastNative() const {
return is_fast_native_;
}
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 3357a5f8d7..c721825683 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -38,6 +38,7 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "instrumentation.h"
#include "jni/jni_env_ext.h"
+#include "jni/local_reference_table.h"
#include "runtime.h"
#include "thread.h"
#include "utils/arm/managed_register_arm.h"
@@ -51,19 +52,6 @@
namespace art HIDDEN {
-constexpr size_t kIRTCookieSize = JniCallingConvention::SavedLocalReferenceCookieSize();
-
-template <PointerSize kPointerSize>
-static void PushLocalReferenceFrame(JNIMacroAssembler<kPointerSize>* jni_asm,
- ManagedRegister jni_env_reg,
- ManagedRegister saved_cookie_reg,
- ManagedRegister temp_reg);
-template <PointerSize kPointerSize>
-static void PopLocalReferenceFrame(JNIMacroAssembler<kPointerSize>* jni_asm,
- ManagedRegister jni_env_reg,
- ManagedRegister saved_cookie_reg,
- ManagedRegister temp_reg);
-
template <PointerSize kPointerSize>
static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
JniCallingConvention* jni_conv,
@@ -306,22 +294,30 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
// 3. Push local reference frame.
// Skip this for @CriticalNative methods, they cannot use any references.
ManagedRegister jni_env_reg = ManagedRegister::NoRegister();
- ManagedRegister saved_cookie_reg = ManagedRegister::NoRegister();
+ ManagedRegister previous_state_reg = ManagedRegister::NoRegister();
+ ManagedRegister current_state_reg = ManagedRegister::NoRegister();
ManagedRegister callee_save_temp = ManagedRegister::NoRegister();
if (LIKELY(!is_critical_native)) {
// To pop the local reference frame later, we shall need the JNI environment pointer
// as well as the cookie, so we preserve them across calls in callee-save registers.
CHECK_GE(callee_save_scratch_regs.size(), 3u); // At least 3 for each supported architecture.
jni_env_reg = callee_save_scratch_regs[0];
- saved_cookie_reg = __ CoreRegisterWithSize(callee_save_scratch_regs[1], kIRTCookieSize);
- callee_save_temp = __ CoreRegisterWithSize(callee_save_scratch_regs[2], kIRTCookieSize);
+ constexpr size_t kLRTSegmentStateSize = sizeof(jni::LRTSegmentState);
+ previous_state_reg = __ CoreRegisterWithSize(callee_save_scratch_regs[1], kLRTSegmentStateSize);
+ current_state_reg = __ CoreRegisterWithSize(callee_save_scratch_regs[2], kLRTSegmentStateSize);
+ if (callee_save_scratch_regs.size() >= 4) {
+ callee_save_temp = callee_save_scratch_regs[3];
+ }
+ const MemberOffset previous_state_offset = JNIEnvExt::LrtPreviousStateOffset(kPointerSize);
// Load the JNI environment pointer.
__ LoadRawPtrFromThread(jni_env_reg, Thread::JniEnvOffset<kPointerSize>());
- // Push the local reference frame.
- PushLocalReferenceFrame<kPointerSize>(
- jni_asm.get(), jni_env_reg, saved_cookie_reg, callee_save_temp);
+ // Load the local reference frame states.
+ __ LoadLocalReferenceTableStates(jni_env_reg, previous_state_reg, current_state_reg);
+
+ // Store the current state as the previous state (push the LRT frame).
+ __ Store(jni_env_reg, previous_state_offset, current_state_reg, kLRTSegmentStateSize);
}
// 4. Make the main native call.
@@ -344,6 +340,7 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
refs.clear();
mr_conv->ResetIterator(FrameOffset(current_frame_size));
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+ bool check_method_not_clobbered = false;
if (UNLIKELY(is_critical_native)) {
// Move the method pointer to the hidden argument register.
// TODO: Pass this as the last argument, not first. Change ARM assembler
@@ -354,10 +351,16 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
} else {
main_jni_conv->Next(); // Skip JNIEnv*.
FrameOffset method_offset(current_out_arg_size + mr_conv->MethodStackOffset().SizeValue());
- if (!is_static || main_jni_conv->IsCurrentParamOnStack()) {
+ if (main_jni_conv->IsCurrentParamOnStack()) {
+ // This is for x86 only. The method shall not be clobbered by argument moves
+ // because all arguments are passed on the stack to the native method.
+ check_method_not_clobbered = true;
+ DCHECK(callee_save_temp.IsNoRegister());
+ } else if (!is_static) {
// The method shall not be available in the `jclass` argument register.
// Make sure it is available in `callee_save_temp` for the call below.
// (The old method register can be clobbered by argument moves.)
+ DCHECK(!callee_save_temp.IsNoRegister());
ManagedRegister new_method_reg = __ CoreRegisterWithSize(callee_save_temp, kRawPointerSize);
DCHECK(!method_register.IsNoRegister());
__ Move(new_method_reg, method_register, kRawPointerSize);
@@ -399,6 +402,10 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
refs.push_back(is_reference ? mr_conv->CurrentParamStackOffset() : kInvalidReferenceOffset);
}
DCHECK(!main_jni_conv->HasNext());
+ DCHECK_IMPLIES(check_method_not_clobbered,
+ std::all_of(dest_args.begin(),
+ dest_args.end(),
+ [](const ArgumentLocation& loc) { return !loc.IsRegister(); }));
__ MoveArguments(ArrayRef<ArgumentLocation>(dest_args),
ArrayRef<ArgumentLocation>(src_args),
ArrayRef<FrameOffset>(refs));
@@ -525,8 +532,10 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
// 6. Pop local reference frame.
if (LIKELY(!is_critical_native)) {
- PopLocalReferenceFrame<kPointerSize>(
- jni_asm.get(), jni_env_reg, saved_cookie_reg, callee_save_temp);
+ __ StoreLocalReferenceTableStates(jni_env_reg, previous_state_reg, current_state_reg);
+ // For x86, the `callee_save_temp` is not valid, so let's simply change it to one
+ // of the callee save registers that we don't need anymore for all architectures.
+ callee_save_temp = current_state_reg;
}
// 7. Return from the JNI stub.
@@ -658,8 +667,7 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
jni_asm->cfi().AdjustCFAOffset(main_out_arg_size);
__ DecreaseFrameSize(main_out_arg_size);
}
- PopLocalReferenceFrame<kPointerSize>(
- jni_asm.get(), jni_env_reg, saved_cookie_reg, callee_save_temp);
+ __ StoreLocalReferenceTableStates(jni_env_reg, previous_state_reg, current_state_reg);
}
DCHECK_EQ(jni_asm->cfi().GetCurrentCFAOffset(), static_cast<int>(current_frame_size));
__ DeliverPendingException();
@@ -736,40 +744,6 @@ static JniCompiledMethod ArtJniCompileMethodInternal(const CompilerOptions& comp
}
template <PointerSize kPointerSize>
-static void PushLocalReferenceFrame(JNIMacroAssembler<kPointerSize>* jni_asm,
- ManagedRegister jni_env_reg,
- ManagedRegister saved_cookie_reg,
- ManagedRegister temp_reg) {
- const size_t kRawPointerSize = static_cast<size_t>(kPointerSize);
- const MemberOffset jni_env_cookie_offset = JNIEnvExt::LocalRefCookieOffset(kRawPointerSize);
- const MemberOffset jni_env_segment_state_offset = JNIEnvExt::SegmentStateOffset(kRawPointerSize);
-
- // Load the old cookie that we shall need to restore.
- __ Load(saved_cookie_reg, jni_env_reg, jni_env_cookie_offset, kIRTCookieSize);
-
- // Set the cookie in JNI environment to the current segment state.
- __ Load(temp_reg, jni_env_reg, jni_env_segment_state_offset, kIRTCookieSize);
- __ Store(jni_env_reg, jni_env_cookie_offset, temp_reg, kIRTCookieSize);
-}
-
-template <PointerSize kPointerSize>
-static void PopLocalReferenceFrame(JNIMacroAssembler<kPointerSize>* jni_asm,
- ManagedRegister jni_env_reg,
- ManagedRegister saved_cookie_reg,
- ManagedRegister temp_reg) {
- const size_t kRawPointerSize = static_cast<size_t>(kPointerSize);
- const MemberOffset jni_env_cookie_offset = JNIEnvExt::LocalRefCookieOffset(kRawPointerSize);
- const MemberOffset jni_env_segment_state_offset = JNIEnvExt::SegmentStateOffset(kRawPointerSize);
-
- // Set the current segment state to the current cookie in JNI environment.
- __ Load(temp_reg, jni_env_reg, jni_env_cookie_offset, kIRTCookieSize);
- __ Store(jni_env_reg, jni_env_segment_state_offset, temp_reg, kIRTCookieSize);
-
- // Restore the cookie in JNI environment to the saved value.
- __ Store(jni_env_reg, jni_env_cookie_offset, saved_cookie_reg, kIRTCookieSize);
-}
-
-template <PointerSize kPointerSize>
static void SetNativeParameter(JNIMacroAssembler<kPointerSize>* jni_asm,
JniCallingConvention* jni_conv,
ManagedRegister in_reg) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 5ba26b4754..50b86489ee 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -37,6 +37,7 @@
#include "intrinsics_arm64.h"
#include "intrinsics_list.h"
#include "intrinsics_utils.h"
+#include "jit/profiling_info.h"
#include "linker/linker_patch.h"
#include "lock_word.h"
#include "mirror/array-inl.h"
@@ -1306,7 +1307,9 @@ void CodeGeneratorARM64::MaybeIncrementHotness(HSuspendCheck* suspend_check, boo
__ Bind(&done);
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ GetGraph()->IsUsefulOptimizing() &&
+ !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
@@ -2353,14 +2356,12 @@ void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
codegen_->StoreNeedsWriteBarrier(field_type, instruction->InputAt(1), write_barrier_kind);
if (needs_write_barrier) {
- // TODO(solanes): If we do a `HuntForOriginalReference` call to the value in WBE, we will be
- // able to DCHECK that the write_barrier_kind is kBeingReliedOn when Register(value).IsZero(),
- // and we could remove the `!Register(value).IsZero()` from below.
- codegen_->MaybeMarkGCCard(obj,
- Register(value),
- value_can_be_null &&
- write_barrier_kind == WriteBarrierKind::kEmitNotBeingReliedOn &&
- !Register(value).IsZero());
+ DCHECK_IMPLIES(Register(value).IsZero(),
+ write_barrier_kind == WriteBarrierKind::kEmitBeingReliedOn);
+ codegen_->MaybeMarkGCCard(
+ obj,
+ Register(value),
+ value_can_be_null && write_barrier_kind == WriteBarrierKind::kEmitNotBeingReliedOn);
} else if (codegen_->ShouldCheckGCCard(field_type, instruction->InputAt(1), write_barrier_kind)) {
codegen_->CheckGCCardIsValid(obj);
}
@@ -3049,9 +3050,8 @@ void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
// write barrier when its value is null (without an extra cbz since we already checked if the
// value is null for the type check). This will be done as a follow-up since it is a runtime
// optimization that needs extra care.
- // TODO(solanes): We can also skip it for known zero values which are not relied on i.e. when
- // we have the Zero register as the value. If we do `HuntForOriginalReference` on the value
- // we'll resolve this.
+ DCHECK_IMPLIES(Register(value).IsZero(),
+ write_barrier_kind == WriteBarrierKind::kEmitBeingReliedOn);
codegen_->MarkGCCard(array);
UseScratchRegisterScope temps(masm);
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 85f61f5303..0ed3f8b2c9 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -36,6 +36,7 @@
#include "intrinsics_arm_vixl.h"
#include "intrinsics_list.h"
#include "intrinsics_utils.h"
+#include "jit/profiling_info.h"
#include "linker/linker_patch.h"
#include "mirror/array-inl.h"
#include "mirror/class-inl.h"
@@ -2302,7 +2303,9 @@ void CodeGeneratorARMVIXL::MaybeIncrementHotness(HSuspendCheck* suspend_check,
}
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ GetGraph()->IsUsefulOptimizing() &&
+ !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index ed57683e0a..abbd74ac65 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -2518,8 +2518,7 @@ void InstructionCodeGeneratorRISCV64::HandleFieldSet(HInstruction* instruction,
codegen_->StoreNeedsWriteBarrier(type, instruction->InputAt(1), write_barrier_kind);
if (needs_write_barrier) {
if (value.IsConstant()) {
- // TODO(solanes): If we do a `HuntForOriginalReference` call to the value in WBE, we will be
- // able to DCHECK that the write_barrier_kind is kBeingReliedOn.
+ DCHECK_EQ(write_barrier_kind, WriteBarrierKind::kEmitBeingReliedOn);
codegen_->MarkGCCard(obj);
} else {
codegen_->MaybeMarkGCCard(
@@ -2947,9 +2946,8 @@ void InstructionCodeGeneratorRISCV64::VisitArraySet(HArraySet* instruction) {
if (needs_write_barrier) {
DCHECK_EQ(value_type, DataType::Type::kReference);
+ DCHECK_IMPLIES(value.IsConstant(), value.GetConstant()->IsArithmeticZero());
const bool storing_constant_zero = value.IsConstant();
- // TODO(solanes): If we do a `HuntForOriginalReference` call to the value in WBE, we will be
- // able to DCHECK that the write_barrier_kind is kBeingReliedOn when we have a constant.
if (!storing_constant_zero) {
Riscv64Label do_store;
@@ -3010,11 +3008,13 @@ void InstructionCodeGeneratorRISCV64::VisitArraySet(HArraySet* instruction) {
}
}
- DCHECK_NE(instruction->GetWriteBarrierKind(), WriteBarrierKind::kDontEmit);
+ DCHECK_NE(write_barrier_kind, WriteBarrierKind::kDontEmit);
// TODO(solanes): The WriteBarrierKind::kEmitNotBeingReliedOn case should be able to skip
// this write barrier when its value is null (without an extra Beqz since we already checked
// if the value is null for the type check). This will be done as a follow-up since it is a
// runtime optimization that needs extra care.
+ DCHECK_IMPLIES(storing_constant_zero,
+ write_barrier_kind == WriteBarrierKind::kEmitBeingReliedOn);
codegen_->MarkGCCard(array);
} else if (codegen_->ShouldCheckGCCard(value_type, instruction->GetValue(), write_barrier_kind)) {
codegen_->CheckGCCardIsValid(array);
@@ -5763,7 +5763,9 @@ void CodeGeneratorRISCV64::MaybeIncrementHotness(HSuspendCheck* suspend_check,
__ Bind(&done);
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ GetGraph()->IsUsefulOptimizing() &&
+ !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
DCHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a61dca3022..21d3492e8a 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1357,9 +1357,9 @@ void CodeGeneratorX86::MaybeIncrementHotness(HSuspendCheck* suspend_check, bool
}
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
- // Note the slow path doesn't save SIMD registers, so if we were to
- // call it on loop back edge, we would need to fix this.
+ if (GetGraph()->IsCompilingBaseline() &&
+ GetGraph()->IsUsefulOptimizing() &&
+ !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
uint32_t address = reinterpret_cast32<uint32_t>(info) +
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index db4062b00d..af6c6255e5 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1788,7 +1788,9 @@ void CodeGeneratorX86_64::MaybeIncrementHotness(HSuspendCheck* suspend_check, bo
__ Bind(&overflow);
}
- if (GetGraph()->IsCompilingBaseline() && !Runtime::Current()->IsAotCompiler()) {
+ if (GetGraph()->IsCompilingBaseline() &&
+ GetGraph()->IsUsefulOptimizing() &&
+ !Runtime::Current()->IsAotCompiler()) {
ProfilingInfo* info = GetGraph()->GetProfilingInfo();
DCHECK(info != nullptr);
CHECK(!HasEmptyFrame());
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index b0a65ca64d..f57d8ade16 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -117,12 +117,9 @@ bool HConstantFolding::Run() {
void HConstantFoldingVisitor::VisitBasicBlock(HBasicBlock* block) {
- // Traverse this block's instructions (phis don't need to be
- // processed) in (forward) order and replace the ones that can be
- // statically evaluated by a compile-time counterpart.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- it.Current()->Accept(this);
- }
+ // Traverse this block's instructions (phis don't need to be processed) in (forward) order
+ // and replace the ones that can be statically evaluated by a compile-time counterpart.
+ VisitNonPhiInstructions(block);
}
void HConstantFoldingVisitor::VisitUnaryOperation(HUnaryOperation* inst) {
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.cc b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
index 9b3bb91d21..30c33dd5c5 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.cc
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.cc
@@ -37,8 +37,8 @@ class CFREVisitor final : public HGraphVisitor {
stats_(stats) {}
void VisitBasicBlock(HBasicBlock* block) override {
- // Visit all instructions in block.
- HGraphVisitor::VisitBasicBlock(block);
+ // Visit all non-Phi instructions in the block.
+ VisitNonPhiInstructions(block);
// If there were any unmerged fences left, merge them together,
// the objects are considered 'published' at the end of the block.
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 6746771fa4..fe1361c935 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -937,7 +937,7 @@ void HDeadCodeElimination::RemoveDeadInstructions() {
for (HBackwardInstructionIterator phi_it(block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
DCHECK(phi_it.Current()->IsPhi());
HPhi* phi = phi_it.Current()->AsPhi();
- if (phi->IsDeadAndRemovable()) {
+ if (phi->IsPhiDeadAndRemovable()) {
block->RemovePhi(phi);
MaybeRecordStat(stats_, MethodCompilationStat::kRemovedDeadPhi);
}
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index d60ec06097..b061b401ff 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -304,10 +304,11 @@ void GraphChecker::VisitBasicBlock(HBasicBlock* block) {
current_block_->GetBlockId()));
}
if (current->GetNext() == nullptr && current != block->GetLastInstruction()) {
- AddError(StringPrintf("The recorded last instruction of block %d does not match "
- "the actual last instruction %d.",
- current_block_->GetBlockId(),
- current->GetId()));
+ AddError(
+ StringPrintf("The recorded last instruction of block %d does not match "
+ "the actual last instruction %d.",
+ current_block_->GetBlockId(),
+ current->GetId()));
}
current->Accept(this);
}
@@ -657,15 +658,18 @@ void GraphChecker::VisitInstruction(HInstruction* instruction) {
// Ensure an instruction dominates all its uses.
for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
HInstruction* user = use.GetUser();
- if (!user->IsPhi() && !instruction->StrictlyDominates(user)) {
- AddError(StringPrintf("Instruction %s:%d in block %d does not dominate "
- "use %s:%d in block %d.",
- instruction->DebugName(),
- instruction->GetId(),
- current_block_->GetBlockId(),
- user->DebugName(),
- user->GetId(),
- user->GetBlock()->GetBlockId()));
+ if (!user->IsPhi() && (instruction->GetBlock() == user->GetBlock()
+ ? seen_ids_.IsBitSet(user->GetId())
+ : !instruction->GetBlock()->Dominates(user->GetBlock()))) {
+ AddError(
+ StringPrintf("Instruction %s:%d in block %d does not dominate "
+ "use %s:%d in block %d.",
+ instruction->DebugName(),
+ instruction->GetId(),
+ current_block_->GetBlockId(),
+ user->DebugName(),
+ user->GetId(),
+ user->GetBlock()->GetBlockId()));
}
}
@@ -677,22 +681,24 @@ void GraphChecker::VisitInstruction(HInstruction* instruction) {
current_block_->GetBlockId()));
}
- // Ensure an instruction having an environment is dominated by the
- // instructions contained in the environment.
- for (HEnvironment* environment = instruction->GetEnvironment();
- environment != nullptr;
- environment = environment->GetParent()) {
- for (size_t i = 0, e = environment->Size(); i < e; ++i) {
- HInstruction* env_instruction = environment->GetInstructionAt(i);
- if (env_instruction != nullptr
- && !env_instruction->StrictlyDominates(instruction)) {
- AddError(StringPrintf("Instruction %d in environment of instruction %d "
- "from block %d does not dominate instruction %d.",
- env_instruction->GetId(),
- instruction->GetId(),
- current_block_->GetBlockId(),
- instruction->GetId()));
- }
+ // Ensure an instruction dominates all its environment uses.
+ for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
+ HInstruction* user = use.GetUser()->GetHolder();
+ if (user->IsPhi()) {
+ AddError(StringPrintf("Phi %d shouldn't have an environment", instruction->GetId()));
+ }
+ if (instruction->GetBlock() == user->GetBlock()
+ ? seen_ids_.IsBitSet(user->GetId())
+ : !instruction->GetBlock()->Dominates(user->GetBlock())) {
+ AddError(
+ StringPrintf("Instruction %s:%d in block %d does not dominate "
+ "environment use %s:%d in block %d.",
+ instruction->DebugName(),
+ instruction->GetId(),
+ current_block_->GetBlockId(),
+ user->DebugName(),
+ user->GetId(),
+ user->GetBlock()->GetBlockId()));
}
}
@@ -709,14 +715,15 @@ void GraphChecker::VisitInstruction(HInstruction* instruction) {
for (HInstructionIterator phi_it(catch_block->GetPhis()); !phi_it.Done(); phi_it.Advance()) {
HPhi* catch_phi = phi_it.Current()->AsPhi();
if (environment->GetInstructionAt(catch_phi->GetRegNumber()) == nullptr) {
- AddError(StringPrintf("Instruction %s:%d throws into catch block %d "
- "with catch phi %d for vreg %d but its "
- "corresponding environment slot is empty.",
- instruction->DebugName(),
- instruction->GetId(),
- catch_block->GetBlockId(),
- catch_phi->GetId(),
- catch_phi->GetRegNumber()));
+ AddError(
+ StringPrintf("Instruction %s:%d throws into catch block %d "
+ "with catch phi %d for vreg %d but its "
+ "corresponding environment slot is empty.",
+ instruction->DebugName(),
+ instruction->GetId(),
+ catch_block->GetBlockId(),
+ catch_phi->GetId(),
+ catch_phi->GetRegNumber()));
}
}
}
@@ -1291,7 +1298,7 @@ bool IsRemovedWriteBarrier(DataType::Type type,
HInstruction* value) {
return write_barrier_kind == WriteBarrierKind::kDontEmit &&
type == DataType::Type::kReference &&
- !value->IsNullConstant();
+ !HuntForOriginalReference(value)->IsNullConstant();
}
void GraphChecker::VisitArraySet(HArraySet* instruction) {
diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h
index 5704bcec1a..541a9cc3d2 100644
--- a/compiler/optimizing/graph_checker.h
+++ b/compiler/optimizing/graph_checker.h
@@ -30,7 +30,7 @@ namespace art HIDDEN {
class CodeGenerator;
// A control-flow graph visitor performing various checks.
-class GraphChecker : public HGraphDelegateVisitor {
+class GraphChecker final : public HGraphDelegateVisitor {
public:
explicit GraphChecker(HGraph* graph,
CodeGenerator* codegen = nullptr,
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index afbf941355..6696c1ef17 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -195,7 +195,7 @@ class HGraphVisualizerDisassembler {
/**
* HGraph visitor to generate a file suitable for the c1visualizer tool and IRHydra.
*/
-class HGraphVisualizerPrinter : public HGraphDelegateVisitor {
+class HGraphVisualizerPrinter final : public HGraphDelegateVisitor {
public:
HGraphVisualizerPrinter(HGraph* graph,
std::ostream& output,
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index a6ca057cfc..9113860387 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -248,7 +248,7 @@ class ValueSet : public ArenaObject<kArenaAllocGvn> {
// Iterates over buckets with impure instructions (even indices) and deletes
// the ones on which 'cond' returns true.
template<typename Functor>
- void DeleteAllImpureWhich(Functor cond) {
+ void DeleteAllImpureWhich(Functor&& cond) {
for (size_t i = 0; i < num_buckets_; i += 2) {
Node* node = buckets_[i];
Node* previous = nullptr;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index fd3e787fc8..d7ca17b646 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -541,6 +541,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
<< " statically resolve the target";
// For baseline compilation, we will collect inline caches, so we should not
// try to inline using them.
+ outermost_graph_->SetUsefulOptimizing();
return false;
}
@@ -1552,9 +1553,7 @@ bool HInliner::IsInliningEncouraged(const HInvoke* invoke_instruction,
return false;
}
- size_t inline_max_code_units = graph_->IsCompilingBaseline()
- ? CompilerOptions::kBaselineInlineMaxCodeUnits
- : codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
+ size_t inline_max_code_units = codegen_->GetCompilerOptions().GetInlineMaxCodeUnits();
if (accessor.InsnsSizeInCodeUnits() > inline_max_code_units) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCodeItem)
<< "Method " << method->PrettyMethod()
@@ -1565,6 +1564,14 @@ bool HInliner::IsInliningEncouraged(const HInvoke* invoke_instruction,
return false;
}
+ if (graph_->IsCompilingBaseline() &&
+ accessor.InsnsSizeInCodeUnits() > CompilerOptions::kBaselineInlineMaxCodeUnits) {
+ LOG_FAIL_NO_STAT() << "Reached baseline maximum code unit for inlining "
+ << method->PrettyMethod();
+ outermost_graph_->SetUsefulOptimizing();
+ return false;
+ }
+
if (invoke_instruction->GetBlock()->GetLastInstruction()->IsThrow()) {
LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedEndsWithThrow)
<< "Method " << method->PrettyMethod()
@@ -2129,6 +2136,7 @@ bool HInliner::CanInlineBody(const HGraph* callee_graph,
if (depth_ + 1 > maximum_inlining_depth_for_baseline) {
LOG_FAIL_NO_STAT() << "Reached maximum depth for inlining in baseline compilation: "
<< depth_ << " for " << callee_graph->GetArtMethod()->PrettyMethod();
+ outermost_graph_->SetUsefulOptimizing();
return false;
}
}
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index ca0a8e81af..410d6fd0d0 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -33,6 +33,7 @@
#include "intrinsics.h"
#include "intrinsics_utils.h"
#include "jit/jit.h"
+#include "jit/profiling_info.h"
#include "mirror/dex_cache.h"
#include "oat/oat_file.h"
#include "optimizing_compiler_stats.h"
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 5db8235e29..ae778b421a 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -170,7 +170,7 @@ bool InstructionSimplifierVisitor::Run() {
// post order visit, we sometimes need to revisit an instruction index.
do {
simplification_occurred_ = false;
- VisitBasicBlock(block);
+ VisitNonPhiInstructions(block);
if (simplification_occurred_) {
didSimplify = true;
}
@@ -450,15 +450,22 @@ static bool TryReplaceShiftsByConstantWithTypeConversion(HBinaryOperation *instr
bool is_signed = instruction->IsShr();
DataType::Type conv_type =
is_signed ? source_integral_type : DataType::ToUnsigned(source_integral_type);
+
+ DCHECK(DataType::IsTypeConversionImplicit(conv_type, instruction->GetResultType()));
+
HInstruction* shl_value = shl->GetLeft();
HBasicBlock *block = instruction->GetBlock();
- HTypeConversion* new_conversion =
- new (block->GetGraph()->GetAllocator()) HTypeConversion(conv_type, shl_value);
-
- DCHECK(DataType::IsTypeConversionImplicit(conv_type, instruction->GetResultType()));
+ // We shouldn't introduce new implicit type conversions during simplification.
+ if (DataType::IsTypeConversionImplicit(shl_value->GetType(), conv_type)) {
+ instruction->ReplaceWith(shl_value);
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ } else {
+ HTypeConversion* new_conversion =
+ new (block->GetGraph()->GetAllocator()) HTypeConversion(conv_type, shl_value);
+ block->ReplaceAndRemoveInstructionWith(instruction, new_conversion);
+ }
- block->ReplaceAndRemoveInstructionWith(instruction, new_conversion);
shl->GetBlock()->RemoveInstruction(shl);
return true;
@@ -614,8 +621,7 @@ bool InstructionSimplifierVisitor::TryReplaceWithRotateConstantPattern(HBinaryOp
size_t rdist = Int64FromConstant(ushr->GetRight()->AsConstant());
size_t ldist = Int64FromConstant(shl->GetRight()->AsConstant());
if (((ldist + rdist) & (reg_bits - 1)) == 0) {
- ReplaceRotateWithRor(op, ushr, shl);
- return true;
+ return ReplaceRotateWithRor(op, ushr, shl);
}
return false;
}
@@ -636,6 +642,10 @@ bool InstructionSimplifierVisitor::TryReplaceWithRotateConstantPattern(HBinaryOp
// OP dst, dst, tmp
// with
// Ror dst, x, d
+//
+// Requires `d` to be non-zero for the HAdd and HXor case. If `d` is 0 the shifts and rotate are
+// no-ops and the `OP` is never executed. This is fine for HOr since the result is the same, but the
+// result is different for HAdd and HXor.
bool InstructionSimplifierVisitor::TryReplaceWithRotateRegisterNegPattern(HBinaryOperation* op,
HUShr* ushr,
HShl* shl) {
@@ -643,11 +653,20 @@ bool InstructionSimplifierVisitor::TryReplaceWithRotateRegisterNegPattern(HBinar
DCHECK(ushr->GetRight()->IsNeg() || shl->GetRight()->IsNeg());
bool neg_is_left = shl->GetRight()->IsNeg();
HNeg* neg = neg_is_left ? shl->GetRight()->AsNeg() : ushr->GetRight()->AsNeg();
- // And the shift distance being negated is the distance being shifted the other way.
- if (neg->InputAt(0) == (neg_is_left ? ushr->GetRight() : shl->GetRight())) {
- ReplaceRotateWithRor(op, ushr, shl);
+ HInstruction* value = neg->InputAt(0);
+
+ // The shift distance being negated is the distance being shifted the other way.
+ if (value != (neg_is_left ? ushr->GetRight() : shl->GetRight())) {
+ return false;
}
- return false;
+
+ const bool needs_non_zero_value = !op->IsOr();
+ if (needs_non_zero_value) {
+ if (!value->IsConstant() || value->AsConstant()->IsArithmeticZero()) {
+ return false;
+ }
+ }
+ return ReplaceRotateWithRor(op, ushr, shl);
}
// Try replacing code looking like (x >>> d OP x << (#bits - d)):
diff --git a/compiler/optimizing/intrinsic_objects.cc b/compiler/optimizing/intrinsic_objects.cc
index 6c799d4132..c625d435ae 100644
--- a/compiler/optimizing/intrinsic_objects.cc
+++ b/compiler/optimizing/intrinsic_objects.cc
@@ -35,7 +35,7 @@ static int32_t FillIntrinsicsObjects(
ObjPtr<mirror::ObjectArray<mirror::Object>> live_objects,
int32_t expected_low,
int32_t expected_high,
- T type_check,
+ T&& type_check,
int32_t index)
REQUIRES_SHARED(Locks::mutator_lock_) {
ObjPtr<mirror::ObjectArray<mirror::Object>> cache =
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 2ae44cd4b0..56a5186d36 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -3969,7 +3969,7 @@ template<typename OP>
void GenerateFP16Round(HInvoke* invoke,
CodeGeneratorARM64* const codegen_,
MacroAssembler* masm,
- const OP roundOp) {
+ OP&& roundOp) {
DCHECK(codegen_->GetInstructionSetFeatures().HasFP16());
LocationSummary* locations = invoke->GetLocations();
UseScratchRegisterScope scratch_scope(masm);
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 6f4f2b6cf6..01daa23511 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -55,7 +55,6 @@ void LinearizeTest::TestCode(const std::vector<uint16_t>& data,
}
TEST_F(LinearizeTest, CFG1) {
- TEST_DISABLED_FOR_RISCV64();
// Structure of this graph (+ are back edges)
// Block0
// |
@@ -81,7 +80,6 @@ TEST_F(LinearizeTest, CFG1) {
}
TEST_F(LinearizeTest, CFG2) {
- TEST_DISABLED_FOR_RISCV64();
// Structure of this graph (+ are back edges)
// Block0
// |
@@ -107,7 +105,6 @@ TEST_F(LinearizeTest, CFG2) {
}
TEST_F(LinearizeTest, CFG3) {
- TEST_DISABLED_FOR_RISCV64();
// Structure of this graph (+ are back edges)
// Block0
// |
@@ -135,7 +132,6 @@ TEST_F(LinearizeTest, CFG3) {
}
TEST_F(LinearizeTest, CFG4) {
- TEST_DISABLED_FOR_RISCV64();
/* Structure of this graph (+ are back edges)
// Block0
// |
@@ -166,7 +162,6 @@ TEST_F(LinearizeTest, CFG4) {
}
TEST_F(LinearizeTest, CFG5) {
- TEST_DISABLED_FOR_RISCV64();
/* Structure of this graph (+ are back edges)
// Block0
// |
@@ -197,7 +192,6 @@ TEST_F(LinearizeTest, CFG5) {
}
TEST_F(LinearizeTest, CFG6) {
- TEST_DISABLED_FOR_RISCV64();
// Block0
// |
// Block1
@@ -224,7 +218,6 @@ TEST_F(LinearizeTest, CFG6) {
}
TEST_F(LinearizeTest, CFG7) {
- TEST_DISABLED_FOR_RISCV64();
// Structure of this graph (+ are back edges)
// Block0
// |
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 7e488ba41d..fb1a23eef4 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -47,7 +47,6 @@ HGraph* LiveRangesTest::BuildGraph(const std::vector<uint16_t>& data) {
}
TEST_F(LiveRangesTest, CFG1) {
- TEST_DISABLED_FOR_RISCV64();
/*
* Test the following snippet:
* return 0;
@@ -82,7 +81,6 @@ TEST_F(LiveRangesTest, CFG1) {
}
TEST_F(LiveRangesTest, CFG2) {
- TEST_DISABLED_FOR_RISCV64();
/*
* Test the following snippet:
* var a = 0;
@@ -127,7 +125,6 @@ TEST_F(LiveRangesTest, CFG2) {
}
TEST_F(LiveRangesTest, CFG3) {
- TEST_DISABLED_FOR_RISCV64();
/*
* Test the following snippet:
* var a = 0;
@@ -197,7 +194,6 @@ TEST_F(LiveRangesTest, CFG3) {
}
TEST_F(LiveRangesTest, Loop1) {
- TEST_DISABLED_FOR_RISCV64();
/*
* Test the following snippet:
* var a = 0;
@@ -274,7 +270,6 @@ TEST_F(LiveRangesTest, Loop1) {
}
TEST_F(LiveRangesTest, Loop2) {
- TEST_DISABLED_FOR_RISCV64();
/*
* Test the following snippet:
* var a = 0;
@@ -346,7 +341,6 @@ TEST_F(LiveRangesTest, Loop2) {
}
TEST_F(LiveRangesTest, CFG4) {
- TEST_DISABLED_FOR_RISCV64();
/*
* Test the following snippet:
* var a = 0;
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index 6af07aea4e..0b421cf9e6 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -70,7 +70,6 @@ void LivenessTest::TestCode(const std::vector<uint16_t>& data, const char* expec
}
TEST_F(LivenessTest, CFG1) {
- TEST_DISABLED_FOR_RISCV64();
const char* expected =
"Block 0\n"
" live in: (0)\n"
@@ -94,7 +93,6 @@ TEST_F(LivenessTest, CFG1) {
}
TEST_F(LivenessTest, CFG2) {
- TEST_DISABLED_FOR_RISCV64();
const char* expected =
"Block 0\n"
" live in: (0)\n"
@@ -117,7 +115,6 @@ TEST_F(LivenessTest, CFG2) {
}
TEST_F(LivenessTest, CFG3) {
- TEST_DISABLED_FOR_RISCV64();
const char* expected =
"Block 0\n" // entry block
" live in: (000)\n"
@@ -147,7 +144,6 @@ TEST_F(LivenessTest, CFG3) {
}
TEST_F(LivenessTest, CFG4) {
- TEST_DISABLED_FOR_RISCV64();
// var a;
// if (0 == 0) {
// a = 5;
@@ -196,7 +192,6 @@ TEST_F(LivenessTest, CFG4) {
}
TEST_F(LivenessTest, CFG5) {
- TEST_DISABLED_FOR_RISCV64();
// var a = 0;
// if (0 == 0) {
// } else {
@@ -242,7 +237,6 @@ TEST_F(LivenessTest, CFG5) {
}
TEST_F(LivenessTest, Loop1) {
- TEST_DISABLED_FOR_RISCV64();
// Simple loop with one preheader and one back edge.
// var a = 0;
// while (a == a) {
@@ -289,7 +283,6 @@ TEST_F(LivenessTest, Loop1) {
}
TEST_F(LivenessTest, Loop3) {
- TEST_DISABLED_FOR_RISCV64();
// Test that the returned value stays live in a preceding loop.
// var a = 0;
// while (a == a) {
@@ -337,7 +330,6 @@ TEST_F(LivenessTest, Loop3) {
TEST_F(LivenessTest, Loop4) {
- TEST_DISABLED_FOR_RISCV64();
// Make sure we support a preheader of a loop not being the first predecessor
// in the predecessor list of the header.
// var a = 0;
@@ -390,7 +382,6 @@ TEST_F(LivenessTest, Loop4) {
}
TEST_F(LivenessTest, Loop5) {
- TEST_DISABLED_FOR_RISCV64();
// Make sure we create a preheader of a loop when a header originally has two
// incoming blocks and one back edge.
// Bitsets are made of:
@@ -447,7 +438,6 @@ TEST_F(LivenessTest, Loop5) {
}
TEST_F(LivenessTest, Loop6) {
- TEST_DISABLED_FOR_RISCV64();
// Bitsets are made of:
// (constant0, constant4, constant5, phi in block 2)
const char* expected =
@@ -499,7 +489,6 @@ TEST_F(LivenessTest, Loop6) {
TEST_F(LivenessTest, Loop7) {
- TEST_DISABLED_FOR_RISCV64();
// Bitsets are made of:
// (constant0, constant4, constant5, phi in block 2, phi in block 6)
const char* expected =
@@ -554,7 +543,6 @@ TEST_F(LivenessTest, Loop7) {
}
TEST_F(LivenessTest, Loop8) {
- TEST_DISABLED_FOR_RISCV64();
// var a = 0;
// while (a == a) {
// a = a + a;
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 38c1cfc0d2..80cf9e669b 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -1834,8 +1834,8 @@ void LSEVisitor::VisitBasicBlock(HBasicBlock* block) {
} else {
MergePredecessorRecords(block);
}
- // Visit instructions.
- HGraphVisitor::VisitBasicBlock(block);
+ // Visit non-Phi instructions.
+ VisitNonPhiInstructions(block);
}
bool LSEVisitor::MayAliasOnBackEdge(HBasicBlock* loop_header, size_t idx1, size_t idx2) const {
@@ -2727,7 +2727,7 @@ struct ScopedRestoreHeapValues {
}
template<typename Func>
- void ForEachRecord(Func func) {
+ void ForEachRecord(Func&& func) {
for (size_t blk_id : Range(to_restore_.size())) {
for (size_t heap_loc : Range(to_restore_[blk_id].size())) {
LSEVisitor::ValueRecord* vr = &to_restore_[blk_id][heap_loc];
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index 49e3c0418f..7f694fb655 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -30,7 +30,6 @@ namespace art HIDDEN {
class LoopOptimizationTest : public OptimizingUnitTest {
protected:
void SetUp() override {
- TEST_SETUP_DISABLED_FOR_RISCV64();
OptimizingUnitTest::SetUp();
graph_ = CreateGraph();
@@ -45,7 +44,6 @@ class LoopOptimizationTest : public OptimizingUnitTest {
}
void TearDown() override {
- TEST_TEARDOWN_DISABLED_FOR_RISCV64();
codegen_.reset();
compiler_options_.reset();
graph_ = nullptr;
@@ -136,20 +134,17 @@ class LoopOptimizationTest : public OptimizingUnitTest {
//
TEST_F(LoopOptimizationTest, NoLoops) {
- TEST_DISABLED_FOR_RISCV64();
PerformAnalysis();
EXPECT_EQ("", LoopStructure());
}
TEST_F(LoopOptimizationTest, SingleLoop) {
- TEST_DISABLED_FOR_RISCV64();
AddLoop(entry_block_, return_block_);
PerformAnalysis();
EXPECT_EQ("[]", LoopStructure());
}
TEST_F(LoopOptimizationTest, LoopNest10) {
- TEST_DISABLED_FOR_RISCV64();
HBasicBlock* b = entry_block_;
HBasicBlock* s = return_block_;
for (int i = 0; i < 10; i++) {
@@ -161,7 +156,6 @@ TEST_F(LoopOptimizationTest, LoopNest10) {
}
TEST_F(LoopOptimizationTest, LoopSequence10) {
- TEST_DISABLED_FOR_RISCV64();
HBasicBlock* b = entry_block_;
HBasicBlock* s = return_block_;
for (int i = 0; i < 10; i++) {
@@ -173,7 +167,6 @@ TEST_F(LoopOptimizationTest, LoopSequence10) {
}
TEST_F(LoopOptimizationTest, LoopSequenceOfNests) {
- TEST_DISABLED_FOR_RISCV64();
HBasicBlock* b = entry_block_;
HBasicBlock* s = return_block_;
for (int i = 0; i < 10; i++) {
@@ -201,7 +194,6 @@ TEST_F(LoopOptimizationTest, LoopSequenceOfNests) {
}
TEST_F(LoopOptimizationTest, LoopNestWithSequence) {
- TEST_DISABLED_FOR_RISCV64();
HBasicBlock* b = entry_block_;
HBasicBlock* s = return_block_;
for (int i = 0; i < 10; i++) {
@@ -223,7 +215,6 @@ TEST_F(LoopOptimizationTest, LoopNestWithSequence) {
//
// This is a test for nodes.cc functionality - HGraph::SimplifyLoop.
TEST_F(LoopOptimizationTest, SimplifyLoopReoderPredecessors) {
- TEST_DISABLED_FOR_RISCV64();
// Can't use AddLoop as we want special order for blocks predecessors.
HBasicBlock* header = new (GetAllocator()) HBasicBlock(graph_);
HBasicBlock* body = new (GetAllocator()) HBasicBlock(graph_);
@@ -269,7 +260,6 @@ TEST_F(LoopOptimizationTest, SimplifyLoopReoderPredecessors) {
//
// This is a test for nodes.cc functionality - HGraph::SimplifyLoop.
TEST_F(LoopOptimizationTest, SimplifyLoopSinglePreheader) {
- TEST_DISABLED_FOR_RISCV64();
HBasicBlock* header = AddLoop(entry_block_, return_block_);
header->InsertInstructionBefore(
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 94f197d8f1..c87c78815b 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1693,10 +1693,20 @@ void HGraphVisitor::VisitReversePostOrder() {
}
void HGraphVisitor::VisitBasicBlock(HBasicBlock* block) {
+ VisitPhis(block);
+ VisitNonPhiInstructions(block);
+}
+
+void HGraphVisitor::VisitPhis(HBasicBlock* block) {
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- it.Current()->Accept(this);
+ DCHECK(it.Current()->IsPhi());
+ VisitPhi(it.Current()->AsPhi());
}
+}
+
+void HGraphVisitor::VisitNonPhiInstructions(HBasicBlock* block) {
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ DCHECK(!it.Current()->IsPhi());
it.Current()->Accept(this);
}
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index c862e31de7..4d6b909629 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -425,6 +425,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
cached_current_method_(nullptr),
art_method_(nullptr),
compilation_kind_(compilation_kind),
+ useful_optimizing_(false),
cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
blocks_.reserve(kDefaultNumberOfBlocks);
}
@@ -742,6 +743,9 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
void SetNumberOfCHAGuards(uint32_t num) { number_of_cha_guards_ = num; }
void IncrementNumberOfCHAGuards() { number_of_cha_guards_++; }
+ void SetUsefulOptimizing() { useful_optimizing_ = true; }
+ bool IsUsefulOptimizing() const { return useful_optimizing_; }
+
private:
void RemoveDeadBlocksInstructionsAsUsersAndDisconnect(const ArenaBitVector& visited) const;
void RemoveDeadBlocks(const ArenaBitVector& visited);
@@ -897,6 +901,10 @@ class HGraph : public ArenaObject<kArenaAllocGraph> {
// directly jump to.
const CompilationKind compilation_kind_;
+ // Whether after compiling baseline it is still useful re-optimizing this
+ // method.
+ bool useful_optimizing_;
+
// List of methods that are assumed to have single implementation.
ArenaSet<ArtMethod*> cha_single_implementation_list_;
@@ -2440,18 +2448,26 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
bool IsRemovable() const {
return
!DoesAnyWrite() &&
- !CanThrow() &&
+ // TODO(solanes): Merge calls from IsSuspendCheck to IsControlFlow into one that doesn't
+ // do virtual dispatching.
!IsSuspendCheck() &&
- !IsControlFlow() &&
!IsNop() &&
!IsParameterValue() &&
// If we added an explicit barrier then we should keep it.
!IsMemoryBarrier() &&
- !IsConstructorFence();
+ !IsConstructorFence() &&
+ !IsControlFlow() &&
+ !CanThrow();
}
bool IsDeadAndRemovable() const {
- return IsRemovable() && !HasUses();
+ return !HasUses() && IsRemovable();
+ }
+
+ bool IsPhiDeadAndRemovable() const {
+ DCHECK(IsPhi());
+ DCHECK(IsRemovable()) << " phis are always removable";
+ return !HasUses();
}
// Does this instruction dominate `other_instruction`?
@@ -8556,6 +8572,9 @@ class HGraphVisitor : public ValueObject {
#undef DECLARE_VISIT_INSTRUCTION
protected:
+ void VisitPhis(HBasicBlock* block);
+ void VisitNonPhiInstructions(HBasicBlock* block);
+
OptimizingCompilerStats* stats_;
private:
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 4b0187d536..d627c6daee 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -97,6 +97,10 @@ class HBitwiseNegatedRight final : public HBinaryOperation {
}
}
+ bool InstructionDataEquals(const HInstruction* other) const override {
+ return op_kind_ == other->AsBitwiseNegatedRight()->op_kind_;
+ }
+
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const override {
return GetBlock()->GetGraph()->GetIntConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index a1c4130bc1..65e8e51712 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -905,6 +905,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
if (compilation_kind == CompilationKind::kBaseline && compiler_options.ProfileBranches()) {
+ graph->SetUsefulOptimizing();
// Branch profiling currently doesn't support running optimizations.
RunRequiredPasses(graph, codegen.get(), dex_compilation_unit, &pass_observer);
} else {
@@ -917,6 +918,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
// this method already, do it now.
if (jit != nullptr &&
compilation_kind == CompilationKind::kBaseline &&
+ graph->IsUsefulOptimizing() &&
graph->GetProfilingInfo() == nullptr) {
ProfilingInfoBuilder(
graph, codegen->GetCompilerOptions(), codegen.get(), compilation_stats_.get()).Run();
@@ -1448,6 +1450,11 @@ bool OptimizingCompiler::JitCompile(Thread* self,
debug_info = GenerateJitDebugInfo(info);
}
+ if (compilation_kind == CompilationKind::kBaseline &&
+ !codegen->GetGraph()->IsUsefulOptimizing()) {
+ compilation_kind = CompilationKind::kOptimized;
+ }
+
if (!code_cache->Commit(self,
region,
method,
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index 0426f8470b..f53c8a1285 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -30,7 +30,7 @@ class OptimizingCompilerStats;
* For example it changes uses of null checks and bounds checks to the original
* objects, to avoid creating a live range for these checks.
*/
-class PrepareForRegisterAllocation : public HGraphDelegateVisitor {
+class PrepareForRegisterAllocation final : public HGraphDelegateVisitor {
public:
PrepareForRegisterAllocation(HGraph* graph,
const CompilerOptions& compiler_options,
diff --git a/compiler/optimizing/profiling_info_builder.cc b/compiler/optimizing/profiling_info_builder.cc
index 19795f5466..7faf2bf5be 100644
--- a/compiler/optimizing/profiling_info_builder.cc
+++ b/compiler/optimizing/profiling_info_builder.cc
@@ -28,14 +28,12 @@
namespace art HIDDEN {
void ProfilingInfoBuilder::Run() {
+ DCHECK(GetGraph()->IsUsefulOptimizing());
DCHECK_EQ(GetGraph()->GetProfilingInfo(), nullptr);
// Order does not matter.
for (HBasicBlock* block : GetGraph()->GetReversePostOrder()) {
// No need to visit the phis.
- for (HInstructionIteratorHandleChanges inst_it(block->GetInstructions()); !inst_it.Done();
- inst_it.Advance()) {
- inst_it.Current()->Accept(this);
- }
+ VisitNonPhiInstructions(block);
}
ScopedObjectAccess soa(Thread::Current());
@@ -122,6 +120,12 @@ bool ProfilingInfoBuilder::IsInlineCacheUseful(HInvoke* invoke, CodeGenerator* c
return false;
}
}
+
+ if (!codegen->GetGraph()->IsUsefulOptimizing()) {
+ // Earlier pass knew what the calling target was. No need for an inline
+ // cache.
+ return false;
+ }
return true;
}
diff --git a/compiler/optimizing/profiling_info_builder.h b/compiler/optimizing/profiling_info_builder.h
index c8dc59a03c..ca1c8dd431 100644
--- a/compiler/optimizing/profiling_info_builder.h
+++ b/compiler/optimizing/profiling_info_builder.h
@@ -28,7 +28,7 @@ class HInliner;
class InlineCache;
class ProfilingInfo;
-class ProfilingInfoBuilder : public HGraphDelegateVisitor {
+class ProfilingInfoBuilder final : public HGraphDelegateVisitor {
public:
ProfilingInfoBuilder(HGraph* graph,
const CompilerOptions& compiler_options,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index 6f44d45ed4..91a6fd5d3a 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -316,16 +316,11 @@ bool ReferenceTypePropagation::Run() {
void ReferenceTypePropagation::RTPVisitor::VisitBasicBlock(HBasicBlock* block) {
// Handle Phis first as there might be instructions in the same block who depend on them.
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- VisitPhi(it.Current()->AsPhi());
- }
+ VisitPhis(block);
// Handle instructions. Since RTP may add HBoundType instructions just after the
// last visited instruction, use `HInstructionIteratorHandleChanges` iterator.
- for (HInstructionIteratorHandleChanges it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instr = it.Current();
- instr->Accept(this);
- }
+ VisitNonPhiInstructions(block);
// Add extra nodes to bound types.
BoundTypeForIfNotNull(block);
diff --git a/compiler/optimizing/scheduler.cc b/compiler/optimizing/scheduler.cc
index 9b5091a81a..f4cf7b0a49 100644
--- a/compiler/optimizing/scheduler.cc
+++ b/compiler/optimizing/scheduler.cc
@@ -106,35 +106,15 @@ static bool IsArrayAccess(const HInstruction* instruction) {
}
static bool IsInstanceFieldAccess(const HInstruction* instruction) {
- return instruction->IsInstanceFieldGet() ||
- instruction->IsInstanceFieldSet() ||
- instruction->IsUnresolvedInstanceFieldGet() ||
- instruction->IsUnresolvedInstanceFieldSet();
+ return instruction->IsInstanceFieldGet() || instruction->IsInstanceFieldSet();
}
static bool IsStaticFieldAccess(const HInstruction* instruction) {
- return instruction->IsStaticFieldGet() ||
- instruction->IsStaticFieldSet() ||
- instruction->IsUnresolvedStaticFieldGet() ||
- instruction->IsUnresolvedStaticFieldSet();
-}
-
-static bool IsResolvedFieldAccess(const HInstruction* instruction) {
- return instruction->IsInstanceFieldGet() ||
- instruction->IsInstanceFieldSet() ||
- instruction->IsStaticFieldGet() ||
- instruction->IsStaticFieldSet();
-}
-
-static bool IsUnresolvedFieldAccess(const HInstruction* instruction) {
- return instruction->IsUnresolvedInstanceFieldGet() ||
- instruction->IsUnresolvedInstanceFieldSet() ||
- instruction->IsUnresolvedStaticFieldGet() ||
- instruction->IsUnresolvedStaticFieldSet();
+ return instruction->IsStaticFieldGet() || instruction->IsStaticFieldSet();
}
static bool IsFieldAccess(const HInstruction* instruction) {
- return IsResolvedFieldAccess(instruction) || IsUnresolvedFieldAccess(instruction);
+ return IsInstanceFieldAccess(instruction) || IsStaticFieldAccess(instruction);
}
static const FieldInfo* GetFieldInfo(const HInstruction* instruction) {
@@ -165,12 +145,6 @@ bool SideEffectDependencyAnalysis::MemoryDependencyAnalysis::FieldAccessMayAlias
return false;
}
- // If either of the field accesses is unresolved.
- if (IsUnresolvedFieldAccess(instr1) || IsUnresolvedFieldAccess(instr2)) {
- // Conservatively treat these two accesses may alias.
- return true;
- }
-
// If both fields accesses are resolved.
size_t instr1_field_access_heap_loc = FieldAccessHeapLocation(instr1);
size_t instr2_field_access_heap_loc = FieldAccessHeapLocation(instr2);
@@ -201,6 +175,14 @@ bool SideEffectDependencyAnalysis::MemoryDependencyAnalysis::HasMemoryDependency
return true;
}
+ // Note: Unresolved field access instructions are currently marked as not schedulable.
+ // If we change that, we should still keep in mind that these instructions can throw and
+ // read or write volatile fields and, if static, cause class initialization and write to
+ // arbitrary heap locations, and therefore cannot be reordered with any other field or
+ // array access to preserve the observable behavior. The only exception is access to
+ // singleton members that could actually be reodered across these instructions but we
+ // currently do not analyze singletons here anyway.
+
if (IsArrayAccess(instr1) && IsArrayAccess(instr2)) {
return ArrayAccessMayAlias(instr1, instr2);
}
@@ -566,20 +548,10 @@ void HScheduler::Schedule(HGraph* graph) {
void HScheduler::Schedule(HBasicBlock* block,
const HeapLocationCollector* heap_location_collector) {
ScopedArenaAllocator allocator(block->GetGraph()->GetArenaStack());
- ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator.Adapter(kArenaAllocScheduler));
// Build the scheduling graph.
- SchedulingGraph scheduling_graph(&allocator, heap_location_collector);
- for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
- CHECK_EQ(instruction->GetBlock(), block)
- << instruction->DebugName()
- << " is in block " << instruction->GetBlock()->GetBlockId()
- << ", and expected in block " << block->GetBlockId();
- SchedulingNode* node = scheduling_graph.AddNode(instruction, IsSchedulingBarrier(instruction));
- CalculateLatency(node);
- scheduling_nodes.push_back(node);
- }
+ auto [scheduling_graph, scheduling_nodes] =
+ BuildSchedulingGraph(block, &allocator, heap_location_collector);
if (scheduling_graph.Size() <= 1) {
return;
@@ -717,7 +689,8 @@ bool HScheduler::IsSchedulable(const HInstruction* instruction) const {
// HNop
// HThrow
// HTryBoundary
- // All volatile field access e.g. HInstanceFieldGet
+ // All unresolved field access instructions
+ // All volatile field access instructions, e.g. HInstanceFieldGet
// TODO: Some of the instructions above may be safe to schedule (maybe as
// scheduling barriers).
return instruction->IsArrayGet() ||
@@ -820,8 +793,7 @@ bool HInstructionScheduling::Run(bool only_optimize_loop_blocks,
#if defined(ART_ENABLE_CODEGEN_arm)
case InstructionSet::kThumb2:
case InstructionSet::kArm: {
- arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
- arm::HSchedulerARM scheduler(selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(selector, codegen_);
scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
scheduler.Schedule(graph_);
break;
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index 299fbc93f3..a9672ea732 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -497,9 +497,8 @@ class CriticalPathSchedulingNodeSelector : public SchedulingNodeSelector {
class HScheduler {
public:
- HScheduler(SchedulingLatencyVisitor* latency_visitor, SchedulingNodeSelector* selector)
- : latency_visitor_(latency_visitor),
- selector_(selector),
+ explicit HScheduler(SchedulingNodeSelector* selector)
+ : selector_(selector),
only_optimize_loop_blocks_(true),
cursor_(nullptr) {}
virtual ~HScheduler() {}
@@ -512,6 +511,35 @@ class HScheduler {
virtual bool IsSchedulingBarrier(const HInstruction* instruction) const;
protected:
+ virtual std::pair<SchedulingGraph, ScopedArenaVector<SchedulingNode*>> BuildSchedulingGraph(
+ HBasicBlock* block,
+ ScopedArenaAllocator* allocator,
+ const HeapLocationCollector* heap_location_collector) = 0;
+
+ template <typename LatencyVisitor>
+ std::pair<SchedulingGraph, ScopedArenaVector<SchedulingNode*>> BuildSchedulingGraph(
+ HBasicBlock* block,
+ ScopedArenaAllocator* allocator,
+ const HeapLocationCollector* heap_location_collector,
+ LatencyVisitor* latency_visitor) ALWAYS_INLINE {
+ SchedulingGraph scheduling_graph(allocator, heap_location_collector);
+ ScopedArenaVector<SchedulingNode*> scheduling_nodes(allocator->Adapter(kArenaAllocScheduler));
+ for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ CHECK_EQ(instruction->GetBlock(), block)
+ << instruction->DebugName()
+ << " is in block " << instruction->GetBlock()->GetBlockId()
+ << ", and expected in block " << block->GetBlockId();
+ SchedulingNode* node =
+ scheduling_graph.AddNode(instruction, IsSchedulingBarrier(instruction));
+ latency_visitor->CalculateLatency(node);
+ node->SetLatency(latency_visitor->GetLastVisitedLatency());
+ node->SetInternalLatency(latency_visitor->GetLastVisitedInternalLatency());
+ scheduling_nodes.push_back(node);
+ }
+ return {std::move(scheduling_graph), std::move(scheduling_nodes)};
+ }
+
void Schedule(HBasicBlock* block, const HeapLocationCollector* heap_location_collector);
void Schedule(SchedulingNode* scheduling_node,
/*inout*/ ScopedArenaVector<SchedulingNode*>* candidates);
@@ -529,13 +557,6 @@ class HScheduler {
virtual bool IsSchedulable(const HInstruction* instruction) const;
bool IsSchedulable(const HBasicBlock* block) const;
- void CalculateLatency(SchedulingNode* node) {
- latency_visitor_->CalculateLatency(node);
- node->SetLatency(latency_visitor_->GetLastVisitedLatency());
- node->SetInternalLatency(latency_visitor_->GetLastVisitedInternalLatency());
- }
-
- SchedulingLatencyVisitor* const latency_visitor_;
SchedulingNodeSelector* const selector_;
bool only_optimize_loop_blocks_;
diff --git a/compiler/optimizing/scheduler_arm.cc b/compiler/optimizing/scheduler_arm.cc
index 510a0f5496..3ee6f06b46 100644
--- a/compiler/optimizing/scheduler_arm.cc
+++ b/compiler/optimizing/scheduler_arm.cc
@@ -17,6 +17,7 @@
#include "scheduler_arm.h"
#include "arch/arm/instruction_set_features_arm.h"
+#include "code_generator_arm_vixl.h"
#include "code_generator_utils.h"
#include "common_arm.h"
#include "heap_poisoning.h"
@@ -29,6 +30,116 @@ namespace arm {
using helpers::Int32ConstantFrom;
using helpers::Uint64ConstantFrom;
+// AArch32 instruction latencies.
+// We currently assume that all ARM CPUs share the same instruction latency list.
+// The following latencies were tuned based on performance experiments and
+// automatic tuning using differential evolution approach on various benchmarks.
+static constexpr uint32_t kArmIntegerOpLatency = 2;
+static constexpr uint32_t kArmFloatingPointOpLatency = 11;
+static constexpr uint32_t kArmDataProcWithShifterOpLatency = 4;
+static constexpr uint32_t kArmMulIntegerLatency = 6;
+static constexpr uint32_t kArmMulFloatingPointLatency = 11;
+static constexpr uint32_t kArmDivIntegerLatency = 10;
+static constexpr uint32_t kArmDivFloatLatency = 20;
+static constexpr uint32_t kArmDivDoubleLatency = 25;
+static constexpr uint32_t kArmTypeConversionFloatingPointIntegerLatency = 11;
+static constexpr uint32_t kArmMemoryLoadLatency = 9;
+static constexpr uint32_t kArmMemoryStoreLatency = 9;
+static constexpr uint32_t kArmMemoryBarrierLatency = 6;
+static constexpr uint32_t kArmBranchLatency = 4;
+static constexpr uint32_t kArmCallLatency = 5;
+static constexpr uint32_t kArmCallInternalLatency = 29;
+static constexpr uint32_t kArmLoadStringInternalLatency = 10;
+static constexpr uint32_t kArmNopLatency = 2;
+static constexpr uint32_t kArmLoadWithBakerReadBarrierLatency = 18;
+static constexpr uint32_t kArmRuntimeTypeCheckLatency = 46;
+
+class SchedulingLatencyVisitorARM final : public SchedulingLatencyVisitor {
+ public:
+ explicit SchedulingLatencyVisitorARM(CodeGenerator* codegen)
+ : codegen_(down_cast<CodeGeneratorARMVIXL*>(codegen)) {}
+
+ // Default visitor for instructions not handled specifically below.
+ void VisitInstruction([[maybe_unused]] HInstruction*) override {
+ last_visited_latency_ = kArmIntegerOpLatency;
+ }
+
+// We add a second unused parameter to be able to use this macro like the others
+// defined in `nodes.h`.
+#define FOR_EACH_SCHEDULED_ARM_INSTRUCTION(M) \
+ M(ArrayGet, unused) \
+ M(ArrayLength, unused) \
+ M(ArraySet, unused) \
+ M(Add, unused) \
+ M(Sub, unused) \
+ M(And, unused) \
+ M(Or, unused) \
+ M(Ror, unused) \
+ M(Xor, unused) \
+ M(Shl, unused) \
+ M(Shr, unused) \
+ M(UShr, unused) \
+ M(Mul, unused) \
+ M(Div, unused) \
+ M(Condition, unused) \
+ M(Compare, unused) \
+ M(BoundsCheck, unused) \
+ M(InstanceFieldGet, unused) \
+ M(InstanceFieldSet, unused) \
+ M(InstanceOf, unused) \
+ M(Invoke, unused) \
+ M(LoadString, unused) \
+ M(NewArray, unused) \
+ M(NewInstance, unused) \
+ M(Rem, unused) \
+ M(StaticFieldGet, unused) \
+ M(StaticFieldSet, unused) \
+ M(SuspendCheck, unused) \
+ M(TypeConversion, unused)
+
+#define FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(M) \
+ M(BitwiseNegatedRight, unused) \
+ M(MultiplyAccumulate, unused) \
+ M(IntermediateAddress, unused) \
+ M(IntermediateAddressIndex, unused) \
+ M(DataProcWithShifterOp, unused)
+
+#define DECLARE_VISIT_INSTRUCTION(type, unused) \
+ void Visit##type(H##type* instruction) override;
+
+ FOR_EACH_SCHEDULED_ARM_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ bool CanGenerateTest(HCondition* cond);
+ void HandleGenerateConditionWithZero(IfCondition cond);
+ void HandleGenerateLongTestConstant(HCondition* cond);
+ void HandleGenerateLongTest(HCondition* cond);
+ void HandleGenerateLongComparesAndJumps();
+ void HandleGenerateTest(HCondition* cond);
+ void HandleGenerateConditionGeneric(HCondition* cond);
+ void HandleGenerateEqualLong(HCondition* cond);
+ void HandleGenerateConditionLong(HCondition* cond);
+ void HandleGenerateConditionIntegralOrNonPrimitive(HCondition* cond);
+ void HandleCondition(HCondition* instr);
+ void HandleBinaryOperationLantencies(HBinaryOperation* instr);
+ void HandleBitwiseOperationLantencies(HBinaryOperation* instr);
+ void HandleShiftLatencies(HBinaryOperation* instr);
+ void HandleDivRemConstantIntegralLatencies(int32_t imm);
+ void HandleFieldSetLatencies(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleFieldGetLatencies(HInstruction* instruction, const FieldInfo& field_info);
+ void HandleGenerateDataProcInstruction(bool internal_latency = false);
+ void HandleGenerateDataProc(HDataProcWithShifterOp* instruction);
+ void HandleGenerateLongDataProc(HDataProcWithShifterOp* instruction);
+
+ // The latency setting for each HInstruction depends on how CodeGenerator may generate code,
+ // latency visitors may query CodeGenerator for such information for accurate latency settings.
+ CodeGeneratorARMVIXL* codegen_;
+};
+
void SchedulingLatencyVisitorARM::HandleBinaryOperationLantencies(HBinaryOperation* instr) {
switch (instr->GetResultType()) {
case DataType::Type::kInt64:
@@ -1153,5 +1264,28 @@ void SchedulingLatencyVisitorARM::VisitTypeConversion(HTypeConversion* instr) {
}
}
+bool HSchedulerARM::IsSchedulable(const HInstruction* instruction) const {
+ switch (instruction->GetKind()) {
+#define SCHEDULABLE_CASE(type, unused) \
+ case HInstruction::InstructionKind::k##type: \
+ return true;
+ FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(SCHEDULABLE_CASE)
+ FOR_EACH_CONCRETE_INSTRUCTION_ARM(SCHEDULABLE_CASE)
+#undef SCHEDULABLE_CASE
+
+ default:
+ return HScheduler::IsSchedulable(instruction);
+ }
+}
+
+std::pair<SchedulingGraph, ScopedArenaVector<SchedulingNode*>> HSchedulerARM::BuildSchedulingGraph(
+ HBasicBlock* block,
+ ScopedArenaAllocator* allocator,
+ const HeapLocationCollector* heap_location_collector) {
+ SchedulingLatencyVisitorARM latency_visitor(codegen_);
+ return HScheduler::BuildSchedulingGraph(
+ block, allocator, heap_location_collector, &latency_visitor);
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/scheduler_arm.h b/compiler/optimizing/scheduler_arm.h
index cf00fa12a3..25eac1b2c4 100644
--- a/compiler/optimizing/scheduler_arm.h
+++ b/compiler/optimizing/scheduler_arm.h
@@ -18,144 +18,32 @@
#define ART_COMPILER_OPTIMIZING_SCHEDULER_ARM_H_
#include "base/macros.h"
-#include "code_generator_arm_vixl.h"
#include "scheduler.h"
namespace art HIDDEN {
-namespace arm {
-// AArch32 instruction latencies.
-// We currently assume that all ARM CPUs share the same instruction latency list.
-// The following latencies were tuned based on performance experiments and
-// automatic tuning using differential evolution approach on various benchmarks.
-static constexpr uint32_t kArmIntegerOpLatency = 2;
-static constexpr uint32_t kArmFloatingPointOpLatency = 11;
-static constexpr uint32_t kArmDataProcWithShifterOpLatency = 4;
-static constexpr uint32_t kArmMulIntegerLatency = 6;
-static constexpr uint32_t kArmMulFloatingPointLatency = 11;
-static constexpr uint32_t kArmDivIntegerLatency = 10;
-static constexpr uint32_t kArmDivFloatLatency = 20;
-static constexpr uint32_t kArmDivDoubleLatency = 25;
-static constexpr uint32_t kArmTypeConversionFloatingPointIntegerLatency = 11;
-static constexpr uint32_t kArmMemoryLoadLatency = 9;
-static constexpr uint32_t kArmMemoryStoreLatency = 9;
-static constexpr uint32_t kArmMemoryBarrierLatency = 6;
-static constexpr uint32_t kArmBranchLatency = 4;
-static constexpr uint32_t kArmCallLatency = 5;
-static constexpr uint32_t kArmCallInternalLatency = 29;
-static constexpr uint32_t kArmLoadStringInternalLatency = 10;
-static constexpr uint32_t kArmNopLatency = 2;
-static constexpr uint32_t kArmLoadWithBakerReadBarrierLatency = 18;
-static constexpr uint32_t kArmRuntimeTypeCheckLatency = 46;
-
-class SchedulingLatencyVisitorARM final : public SchedulingLatencyVisitor {
- public:
- explicit SchedulingLatencyVisitorARM(CodeGenerator* codegen)
- : codegen_(down_cast<CodeGeneratorARMVIXL*>(codegen)) {}
-
- // Default visitor for instructions not handled specifically below.
- void VisitInstruction([[maybe_unused]] HInstruction*) override {
- last_visited_latency_ = kArmIntegerOpLatency;
- }
-
-// We add a second unused parameter to be able to use this macro like the others
-// defined in `nodes.h`.
-#define FOR_EACH_SCHEDULED_ARM_INSTRUCTION(M) \
- M(ArrayGet, unused) \
- M(ArrayLength, unused) \
- M(ArraySet, unused) \
- M(Add, unused) \
- M(Sub, unused) \
- M(And, unused) \
- M(Or, unused) \
- M(Ror, unused) \
- M(Xor, unused) \
- M(Shl, unused) \
- M(Shr, unused) \
- M(UShr, unused) \
- M(Mul, unused) \
- M(Div, unused) \
- M(Condition, unused) \
- M(Compare, unused) \
- M(BoundsCheck, unused) \
- M(InstanceFieldGet, unused) \
- M(InstanceFieldSet, unused) \
- M(InstanceOf, unused) \
- M(Invoke, unused) \
- M(LoadString, unused) \
- M(NewArray, unused) \
- M(NewInstance, unused) \
- M(Rem, unused) \
- M(StaticFieldGet, unused) \
- M(StaticFieldSet, unused) \
- M(SuspendCheck, unused) \
- M(TypeConversion, unused)
-
-#define FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(M) \
- M(BitwiseNegatedRight, unused) \
- M(MultiplyAccumulate, unused) \
- M(IntermediateAddress, unused) \
- M(IntermediateAddressIndex, unused) \
- M(DataProcWithShifterOp, unused)
-
-#define DECLARE_VISIT_INSTRUCTION(type, unused) \
- void Visit##type(H##type* instruction) override;
- FOR_EACH_SCHEDULED_ARM_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
- FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
- FOR_EACH_CONCRETE_INSTRUCTION_ARM(DECLARE_VISIT_INSTRUCTION)
+class CodeGenerator;
-#undef DECLARE_VISIT_INSTRUCTION
-
- private:
- bool CanGenerateTest(HCondition* cond);
- void HandleGenerateConditionWithZero(IfCondition cond);
- void HandleGenerateLongTestConstant(HCondition* cond);
- void HandleGenerateLongTest(HCondition* cond);
- void HandleGenerateLongComparesAndJumps();
- void HandleGenerateTest(HCondition* cond);
- void HandleGenerateConditionGeneric(HCondition* cond);
- void HandleGenerateEqualLong(HCondition* cond);
- void HandleGenerateConditionLong(HCondition* cond);
- void HandleGenerateConditionIntegralOrNonPrimitive(HCondition* cond);
- void HandleCondition(HCondition* instr);
- void HandleBinaryOperationLantencies(HBinaryOperation* instr);
- void HandleBitwiseOperationLantencies(HBinaryOperation* instr);
- void HandleShiftLatencies(HBinaryOperation* instr);
- void HandleDivRemConstantIntegralLatencies(int32_t imm);
- void HandleFieldSetLatencies(HInstruction* instruction, const FieldInfo& field_info);
- void HandleFieldGetLatencies(HInstruction* instruction, const FieldInfo& field_info);
- void HandleGenerateDataProcInstruction(bool internal_latency = false);
- void HandleGenerateDataProc(HDataProcWithShifterOp* instruction);
- void HandleGenerateLongDataProc(HDataProcWithShifterOp* instruction);
-
- // The latency setting for each HInstruction depends on how CodeGenerator may generate code,
- // latency visitors may query CodeGenerator for such information for accurate latency settings.
- CodeGeneratorARMVIXL* codegen_;
-};
+namespace arm {
-class HSchedulerARM : public HScheduler {
+class HSchedulerARM final : public HScheduler {
public:
- HSchedulerARM(SchedulingNodeSelector* selector,
- SchedulingLatencyVisitorARM* arm_latency_visitor)
- : HScheduler(arm_latency_visitor, selector) {}
+ HSchedulerARM(SchedulingNodeSelector* selector, CodeGenerator* codegen)
+ : HScheduler(selector), codegen_(codegen) {}
~HSchedulerARM() override {}
- bool IsSchedulable(const HInstruction* instruction) const override {
-#define CASE_INSTRUCTION_KIND(type, unused) case \
- HInstruction::InstructionKind::k##type:
- switch (instruction->GetKind()) {
- FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(CASE_INSTRUCTION_KIND)
- return true;
- FOR_EACH_CONCRETE_INSTRUCTION_ARM(CASE_INSTRUCTION_KIND)
- return true;
- default:
- return HScheduler::IsSchedulable(instruction);
- }
-#undef CASE_INSTRUCTION_KIND
- }
+ bool IsSchedulable(const HInstruction* instruction) const override;
+
+ protected:
+ std::pair<SchedulingGraph, ScopedArenaVector<SchedulingNode*>> BuildSchedulingGraph(
+ HBasicBlock* block,
+ ScopedArenaAllocator* allocator,
+ const HeapLocationCollector* heap_location_collector) override;
private:
DISALLOW_COPY_AND_ASSIGN(HSchedulerARM);
+
+ CodeGenerator* const codegen_;
};
} // namespace arm
diff --git a/compiler/optimizing/scheduler_arm64.cc b/compiler/optimizing/scheduler_arm64.cc
index 5113cf446d..08b8a3fb78 100644
--- a/compiler/optimizing/scheduler_arm64.cc
+++ b/compiler/optimizing/scheduler_arm64.cc
@@ -23,6 +23,115 @@
namespace art HIDDEN {
namespace arm64 {
+static constexpr uint32_t kArm64MemoryLoadLatency = 5;
+static constexpr uint32_t kArm64MemoryStoreLatency = 3;
+
+static constexpr uint32_t kArm64CallInternalLatency = 10;
+static constexpr uint32_t kArm64CallLatency = 5;
+
+// AArch64 instruction latency.
+// We currently assume that all arm64 CPUs share the same instruction latency list.
+static constexpr uint32_t kArm64IntegerOpLatency = 2;
+static constexpr uint32_t kArm64FloatingPointOpLatency = 5;
+
+static constexpr uint32_t kArm64DataProcWithShifterOpLatency = 3;
+static constexpr uint32_t kArm64DivDoubleLatency = 30;
+static constexpr uint32_t kArm64DivFloatLatency = 15;
+static constexpr uint32_t kArm64DivIntegerLatency = 5;
+static constexpr uint32_t kArm64LoadStringInternalLatency = 7;
+static constexpr uint32_t kArm64MulFloatingPointLatency = 6;
+static constexpr uint32_t kArm64MulIntegerLatency = 6;
+static constexpr uint32_t kArm64TypeConversionFloatingPointIntegerLatency = 5;
+static constexpr uint32_t kArm64BranchLatency = kArm64IntegerOpLatency;
+
+static constexpr uint32_t kArm64SIMDFloatingPointOpLatency = 10;
+static constexpr uint32_t kArm64SIMDIntegerOpLatency = 6;
+static constexpr uint32_t kArm64SIMDMemoryLoadLatency = 10;
+static constexpr uint32_t kArm64SIMDMemoryStoreLatency = 6;
+static constexpr uint32_t kArm64SIMDMulFloatingPointLatency = 12;
+static constexpr uint32_t kArm64SIMDMulIntegerLatency = 12;
+static constexpr uint32_t kArm64SIMDReplicateOpLatency = 16;
+static constexpr uint32_t kArm64SIMDDivDoubleLatency = 60;
+static constexpr uint32_t kArm64SIMDDivFloatLatency = 30;
+static constexpr uint32_t kArm64SIMDTypeConversionInt2FPLatency = 10;
+
+class SchedulingLatencyVisitorARM64 final : public SchedulingLatencyVisitor {
+ public:
+ // Default visitor for instructions not handled specifically below.
+ void VisitInstruction([[maybe_unused]] HInstruction*) override {
+ last_visited_latency_ = kArm64IntegerOpLatency;
+ }
+
+// We add a second unused parameter to be able to use this macro like the others
+// defined in `nodes.h`.
+#define FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(M) \
+ M(ArrayGet , unused) \
+ M(ArrayLength , unused) \
+ M(ArraySet , unused) \
+ M(BoundsCheck , unused) \
+ M(Div , unused) \
+ M(InstanceFieldGet , unused) \
+ M(InstanceOf , unused) \
+ M(LoadString , unused) \
+ M(Mul , unused) \
+ M(NewArray , unused) \
+ M(NewInstance , unused) \
+ M(Rem , unused) \
+ M(StaticFieldGet , unused) \
+ M(SuspendCheck , unused) \
+ M(TypeConversion , unused) \
+ M(VecReplicateScalar , unused) \
+ M(VecExtractScalar , unused) \
+ M(VecReduce , unused) \
+ M(VecCnv , unused) \
+ M(VecNeg , unused) \
+ M(VecAbs , unused) \
+ M(VecNot , unused) \
+ M(VecAdd , unused) \
+ M(VecHalvingAdd , unused) \
+ M(VecSub , unused) \
+ M(VecMul , unused) \
+ M(VecDiv , unused) \
+ M(VecMin , unused) \
+ M(VecMax , unused) \
+ M(VecAnd , unused) \
+ M(VecAndNot , unused) \
+ M(VecOr , unused) \
+ M(VecXor , unused) \
+ M(VecShl , unused) \
+ M(VecShr , unused) \
+ M(VecUShr , unused) \
+ M(VecSetScalars , unused) \
+ M(VecMultiplyAccumulate, unused) \
+ M(VecLoad , unused) \
+ M(VecStore , unused)
+
+#define FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(M) \
+ M(BinaryOperation , unused) \
+ M(Invoke , unused)
+
+#define FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(M) \
+ M(BitwiseNegatedRight, unused) \
+ M(MultiplyAccumulate, unused) \
+ M(IntermediateAddress, unused) \
+ M(IntermediateAddressIndex, unused) \
+ M(DataProcWithShifterOp, unused)
+
+#define DECLARE_VISIT_INSTRUCTION(type, unused) \
+ void Visit##type(H##type* instruction) override;
+
+ FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+ FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ void HandleSimpleArithmeticSIMD(HVecOperation *instr);
+ void HandleVecAddress(HVecMemoryOperation* instruction, size_t size);
+};
+
void SchedulingLatencyVisitorARM64::VisitBinaryOperation(HBinaryOperation* instr) {
last_visited_latency_ = DataType::IsFloatingPointType(instr->GetResultType())
? kArm64FloatingPointOpLatency
@@ -348,5 +457,30 @@ void SchedulingLatencyVisitorARM64::VisitVecStore(HVecStore* instr) {
last_visited_latency_ = kArm64SIMDMemoryStoreLatency;
}
+bool HSchedulerARM64::IsSchedulable(const HInstruction* instruction) const {
+ switch (instruction->GetKind()) {
+#define SCHEDULABLE_CASE(type, unused) \
+ case HInstruction::InstructionKind::k##type: \
+ return true;
+ FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(SCHEDULABLE_CASE)
+ FOR_EACH_CONCRETE_INSTRUCTION_ARM64(SCHEDULABLE_CASE)
+ FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(SCHEDULABLE_CASE)
+#undef SCHEDULABLE_CASE
+
+ default:
+ return HScheduler::IsSchedulable(instruction);
+ }
+}
+
+std::pair<SchedulingGraph, ScopedArenaVector<SchedulingNode*>>
+HSchedulerARM64::BuildSchedulingGraph(
+ HBasicBlock* block,
+ ScopedArenaAllocator* allocator,
+ const HeapLocationCollector* heap_location_collector) {
+ SchedulingLatencyVisitorARM64 latency_visitor;
+ return HScheduler::BuildSchedulingGraph(
+ block, allocator, heap_location_collector, &latency_visitor);
+}
+
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h
index 7ce00e00ab..044aa48a5a 100644
--- a/compiler/optimizing/scheduler_arm64.h
+++ b/compiler/optimizing/scheduler_arm64.h
@@ -23,137 +23,13 @@
namespace art HIDDEN {
namespace arm64 {
-static constexpr uint32_t kArm64MemoryLoadLatency = 5;
-static constexpr uint32_t kArm64MemoryStoreLatency = 3;
-
-static constexpr uint32_t kArm64CallInternalLatency = 10;
-static constexpr uint32_t kArm64CallLatency = 5;
-
-// AArch64 instruction latency.
-// We currently assume that all arm64 CPUs share the same instruction latency list.
-static constexpr uint32_t kArm64IntegerOpLatency = 2;
-static constexpr uint32_t kArm64FloatingPointOpLatency = 5;
-
-
-static constexpr uint32_t kArm64DataProcWithShifterOpLatency = 3;
-static constexpr uint32_t kArm64DivDoubleLatency = 30;
-static constexpr uint32_t kArm64DivFloatLatency = 15;
-static constexpr uint32_t kArm64DivIntegerLatency = 5;
-static constexpr uint32_t kArm64LoadStringInternalLatency = 7;
-static constexpr uint32_t kArm64MulFloatingPointLatency = 6;
-static constexpr uint32_t kArm64MulIntegerLatency = 6;
-static constexpr uint32_t kArm64TypeConversionFloatingPointIntegerLatency = 5;
-static constexpr uint32_t kArm64BranchLatency = kArm64IntegerOpLatency;
-
-static constexpr uint32_t kArm64SIMDFloatingPointOpLatency = 10;
-static constexpr uint32_t kArm64SIMDIntegerOpLatency = 6;
-static constexpr uint32_t kArm64SIMDMemoryLoadLatency = 10;
-static constexpr uint32_t kArm64SIMDMemoryStoreLatency = 6;
-static constexpr uint32_t kArm64SIMDMulFloatingPointLatency = 12;
-static constexpr uint32_t kArm64SIMDMulIntegerLatency = 12;
-static constexpr uint32_t kArm64SIMDReplicateOpLatency = 16;
-static constexpr uint32_t kArm64SIMDDivDoubleLatency = 60;
-static constexpr uint32_t kArm64SIMDDivFloatLatency = 30;
-static constexpr uint32_t kArm64SIMDTypeConversionInt2FPLatency = 10;
-
-class SchedulingLatencyVisitorARM64 final : public SchedulingLatencyVisitor {
- public:
- // Default visitor for instructions not handled specifically below.
- void VisitInstruction([[maybe_unused]] HInstruction*) override {
- last_visited_latency_ = kArm64IntegerOpLatency;
- }
-
-// We add a second unused parameter to be able to use this macro like the others
-// defined in `nodes.h`.
-#define FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(M) \
- M(ArrayGet , unused) \
- M(ArrayLength , unused) \
- M(ArraySet , unused) \
- M(BoundsCheck , unused) \
- M(Div , unused) \
- M(InstanceFieldGet , unused) \
- M(InstanceOf , unused) \
- M(LoadString , unused) \
- M(Mul , unused) \
- M(NewArray , unused) \
- M(NewInstance , unused) \
- M(Rem , unused) \
- M(StaticFieldGet , unused) \
- M(SuspendCheck , unused) \
- M(TypeConversion , unused) \
- M(VecReplicateScalar , unused) \
- M(VecExtractScalar , unused) \
- M(VecReduce , unused) \
- M(VecCnv , unused) \
- M(VecNeg , unused) \
- M(VecAbs , unused) \
- M(VecNot , unused) \
- M(VecAdd , unused) \
- M(VecHalvingAdd , unused) \
- M(VecSub , unused) \
- M(VecMul , unused) \
- M(VecDiv , unused) \
- M(VecMin , unused) \
- M(VecMax , unused) \
- M(VecAnd , unused) \
- M(VecAndNot , unused) \
- M(VecOr , unused) \
- M(VecXor , unused) \
- M(VecShl , unused) \
- M(VecShr , unused) \
- M(VecUShr , unused) \
- M(VecSetScalars , unused) \
- M(VecMultiplyAccumulate, unused) \
- M(VecLoad , unused) \
- M(VecStore , unused)
-
-#define FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(M) \
- M(BinaryOperation , unused) \
- M(Invoke , unused)
-
-#define FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(M) \
- M(BitwiseNegatedRight, unused) \
- M(MultiplyAccumulate, unused) \
- M(IntermediateAddress, unused) \
- M(IntermediateAddressIndex, unused) \
- M(DataProcWithShifterOp, unused)
-
-#define DECLARE_VISIT_INSTRUCTION(type, unused) \
- void Visit##type(H##type* instruction) override;
-
- FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
- FOR_EACH_SCHEDULED_ABSTRACT_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
- FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
- FOR_EACH_CONCRETE_INSTRUCTION_ARM64(DECLARE_VISIT_INSTRUCTION)
-
-#undef DECLARE_VISIT_INSTRUCTION
-
- private:
- void HandleSimpleArithmeticSIMD(HVecOperation *instr);
- void HandleVecAddress(HVecMemoryOperation* instruction, size_t size);
-};
-
class HSchedulerARM64 : public HScheduler {
public:
explicit HSchedulerARM64(SchedulingNodeSelector* selector)
- : HScheduler(&arm64_latency_visitor_, selector) {}
+ : HScheduler(selector) {}
~HSchedulerARM64() override {}
- bool IsSchedulable(const HInstruction* instruction) const override {
-#define CASE_INSTRUCTION_KIND(type, unused) case \
- HInstruction::InstructionKind::k##type:
- switch (instruction->GetKind()) {
- FOR_EACH_SCHEDULED_SHARED_INSTRUCTION(CASE_INSTRUCTION_KIND)
- return true;
- FOR_EACH_CONCRETE_INSTRUCTION_ARM64(CASE_INSTRUCTION_KIND)
- return true;
- FOR_EACH_SCHEDULED_COMMON_INSTRUCTION(CASE_INSTRUCTION_KIND)
- return true;
- default:
- return HScheduler::IsSchedulable(instruction);
- }
-#undef CASE_INSTRUCTION_KIND
- }
+ bool IsSchedulable(const HInstruction* instruction) const override;
// Treat as scheduling barriers those vector instructions whose live ranges exceed the vectorized
// loop boundaries. This is a workaround for the lack of notion of SIMD register in the compiler;
@@ -169,8 +45,13 @@ class HSchedulerARM64 : public HScheduler {
instr->IsVecReplicateScalar();
}
+ protected:
+ std::pair<SchedulingGraph, ScopedArenaVector<SchedulingNode*>> BuildSchedulingGraph(
+ HBasicBlock* block,
+ ScopedArenaAllocator* allocator,
+ const HeapLocationCollector* heap_location_collector) override;
+
private:
- SchedulingLatencyVisitorARM64 arm64_latency_visitor_;
DISALLOW_COPY_AND_ASSIGN(HSchedulerARM64);
};
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index c2b1fd6f7c..0b020f1460 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -406,15 +406,13 @@ TEST_F(SchedulerTest, ArrayAccessAliasingARM64) {
#if defined(ART_ENABLE_CODEGEN_arm)
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- arm::HSchedulerARM scheduler(&critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(&critical_path_selector, /*codegen=*/ nullptr);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- arm::HSchedulerARM scheduler(&critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(&critical_path_selector, /*codegen=*/ nullptr);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 18c945381d..2df0f34c7d 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -31,7 +31,6 @@ namespace art HIDDEN {
class SsaLivenessAnalysisTest : public OptimizingUnitTest {
protected:
void SetUp() override {
- TEST_SETUP_DISABLED_FOR_RISCV64();
OptimizingUnitTest::SetUp();
graph_ = CreateGraph();
compiler_options_ = CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default");
@@ -43,11 +42,6 @@ class SsaLivenessAnalysisTest : public OptimizingUnitTest {
graph_->SetEntryBlock(entry_);
}
- void TearDown() override {
- TEST_TEARDOWN_DISABLED_FOR_RISCV64();
- OptimizingUnitTest::TearDown();
- }
-
protected:
HBasicBlock* CreateSuccessor(HBasicBlock* block) {
HGraph* graph = block->GetGraph();
@@ -64,7 +58,6 @@ class SsaLivenessAnalysisTest : public OptimizingUnitTest {
};
TEST_F(SsaLivenessAnalysisTest, TestReturnArg) {
- TEST_DISABLED_FOR_RISCV64();
HInstruction* arg = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kInt32);
entry_->AddInstruction(arg);
@@ -85,7 +78,6 @@ TEST_F(SsaLivenessAnalysisTest, TestReturnArg) {
}
TEST_F(SsaLivenessAnalysisTest, TestAput) {
- TEST_DISABLED_FOR_RISCV64();
HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
HInstruction* index = new (GetAllocator()) HParameterValue(
@@ -155,7 +147,6 @@ TEST_F(SsaLivenessAnalysisTest, TestAput) {
}
TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) {
- TEST_DISABLED_FOR_RISCV64();
HInstruction* array = new (GetAllocator()) HParameterValue(
graph_->GetDexFile(), dex::TypeIndex(0), 0, DataType::Type::kReference);
HInstruction* index = new (GetAllocator()) HParameterValue(
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 8c7b1c01a7..a3daa29b4e 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -110,7 +110,7 @@ class StackMapStream : public DeletableArenaObject<kArenaAllocStackMapStream> {
// Invokes the callback with pointer of each BitTableBuilder field.
template<typename Callback>
- void ForEachBitTable(Callback callback) {
+ void ForEachBitTable(Callback&& callback) {
size_t index = 0;
callback(index++, &stack_maps_);
callback(index++, &register_masks_);
diff --git a/compiler/optimizing/write_barrier_elimination.cc b/compiler/optimizing/write_barrier_elimination.cc
index 27348cd87d..537bc09f93 100644
--- a/compiler/optimizing/write_barrier_elimination.cc
+++ b/compiler/optimizing/write_barrier_elimination.cc
@@ -38,14 +38,14 @@ class WBEVisitor final : public HGraphVisitor {
// We clear the map to perform this optimization only in the same block. Doing it across blocks
// would entail non-trivial merging of states.
current_write_barriers_.clear();
- HGraphVisitor::VisitBasicBlock(block);
+ VisitNonPhiInstructions(block);
}
void VisitInstanceFieldSet(HInstanceFieldSet* instruction) override {
DCHECK(!instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()));
if (instruction->GetFieldType() != DataType::Type::kReference ||
- instruction->GetValue()->IsNullConstant()) {
+ HuntForOriginalReference(instruction->GetValue())->IsNullConstant()) {
instruction->SetWriteBarrierKind(WriteBarrierKind::kDontEmit);
return;
}
@@ -72,7 +72,7 @@ class WBEVisitor final : public HGraphVisitor {
DCHECK(!instruction->GetSideEffects().Includes(SideEffects::CanTriggerGC()));
if (instruction->GetFieldType() != DataType::Type::kReference ||
- instruction->GetValue()->IsNullConstant()) {
+ HuntForOriginalReference(instruction->GetValue())->IsNullConstant()) {
instruction->SetWriteBarrierKind(WriteBarrierKind::kDontEmit);
return;
}
@@ -100,7 +100,7 @@ class WBEVisitor final : public HGraphVisitor {
}
if (instruction->GetComponentType() != DataType::Type::kReference ||
- instruction->GetValue()->IsNullConstant()) {
+ HuntForOriginalReference(instruction->GetValue())->IsNullConstant()) {
instruction->SetWriteBarrierKind(WriteBarrierKind::kDontEmit);
return;
}
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index d9f56629ef..b3fc688cc9 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -67,7 +67,7 @@ static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
// VIXL will use the destination as a scratch register if
// the offset is not encodable as an immediate operand.
- ___ Ldr(temp_reg, MemOperand(r0, JNIEnvExt::SelfOffset(4).Int32Value()));
+ ___ Ldr(temp_reg, MemOperand(r0, JNIEnvExt::SelfOffset(kArmPointerSize).Int32Value()));
___ Ldr(pc, MemOperand(temp_reg, offset.Int32Value()));
break;
}
@@ -99,7 +99,7 @@ static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(
case kJniAbi: // Load via Thread* held in JNIEnv* in first argument (X0).
__ LoadRawPtr(Arm64ManagedRegister::FromXRegister(IP1),
Arm64ManagedRegister::FromXRegister(X0),
- Offset(JNIEnvExt::SelfOffset(8).Int32Value()));
+ Offset(JNIEnvExt::SelfOffset(kArm64PointerSize).Int32Value()));
__ JumpTo(Arm64ManagedRegister::FromXRegister(IP1), Offset(offset.Int32Value()),
Arm64ManagedRegister::FromXRegister(IP0));
@@ -134,9 +134,7 @@ static std::unique_ptr<const std::vector<uint8_t>> CreateTrampoline(ArenaAllocat
switch (abi) {
case kJniAbi: // Load via Thread* held in JNIEnv* in first argument (A0).
- __ Loadd(tmp,
- A0,
- JNIEnvExt::SelfOffset(static_cast<size_t>(kRiscv64PointerSize)).Int32Value());
+ __ Loadd(tmp, A0, JNIEnvExt::SelfOffset(kRiscv64PointerSize).Int32Value());
__ Loadd(tmp, tmp, offset.Int32Value());
__ Jr(tmp);
break;
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
index 9c2589138c..6844b1e656 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.cc
@@ -21,6 +21,8 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "indirect_reference_table.h"
+#include "jni/jni_env_ext.h"
+#include "jni/local_reference_table.h"
#include "lock_word.h"
#include "thread.h"
@@ -1001,7 +1003,7 @@ void ArmVIXLJNIMacroAssembler::DeliverPendingException() {
}
std::unique_ptr<JNIMacroLabel> ArmVIXLJNIMacroAssembler::CreateLabel() {
- return std::unique_ptr<JNIMacroLabel>(new ArmVIXLJNIMacroLabel());
+ return std::unique_ptr<JNIMacroLabel>(new (asm_.GetAllocator()) ArmVIXLJNIMacroLabel());
}
void ArmVIXLJNIMacroAssembler::Jump(JNIMacroLabel* label) {
@@ -1108,5 +1110,35 @@ void ArmVIXLJNIMacroAssembler::Load(ArmManagedRegister dest,
}
}
+void ArmVIXLJNIMacroAssembler::LoadLocalReferenceTableStates(ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg) {
+ constexpr size_t kLRTSegmentStateSize = sizeof(jni::LRTSegmentState);
+ DCHECK_EQ(kLRTSegmentStateSize, kRegSizeInBytes);
+ const MemberOffset previous_state_offset = JNIEnvExt::LrtPreviousStateOffset(kArmPointerSize);
+ const MemberOffset current_state_offset = JNIEnvExt::LrtSegmentStateOffset(kArmPointerSize);
+ DCHECK_EQ(previous_state_offset.SizeValue() + kLRTSegmentStateSize,
+ current_state_offset.SizeValue());
+
+ ___ Ldrd(AsVIXLRegister(previous_state_reg.AsArm()),
+ AsVIXLRegister(current_state_reg.AsArm()),
+ MemOperand(AsVIXLRegister(jni_env_reg.AsArm()), previous_state_offset.Int32Value()));
+}
+
+void ArmVIXLJNIMacroAssembler::StoreLocalReferenceTableStates(ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg) {
+ constexpr size_t kLRTSegmentStateSize = sizeof(jni::LRTSegmentState);
+ DCHECK_EQ(kLRTSegmentStateSize, kRegSizeInBytes);
+ const MemberOffset previous_state_offset = JNIEnvExt::LrtPreviousStateOffset(kArmPointerSize);
+ const MemberOffset current_state_offset = JNIEnvExt::LrtSegmentStateOffset(kArmPointerSize);
+ DCHECK_EQ(previous_state_offset.SizeValue() + kLRTSegmentStateSize,
+ current_state_offset.SizeValue());
+
+ ___ Strd(AsVIXLRegister(previous_state_reg.AsArm()),
+ AsVIXLRegister(current_state_reg.AsArm()),
+ MemOperand(AsVIXLRegister(jni_env_reg.AsArm()), previous_state_offset.Int32Value()));
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
index f6df7f2c53..3f6512c2a8 100644
--- a/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
+++ b/compiler/utils/arm/jni_macro_assembler_arm_vixl.h
@@ -91,6 +91,14 @@ class ArmVIXLJNIMacroAssembler final
void GetCurrentThread(ManagedRegister dest) override;
void GetCurrentThread(FrameOffset dest_offset) override;
+ // Manipulating local reference table states.
+ void LoadLocalReferenceTableStates(ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg) override;
+ void StoreLocalReferenceTableStates(ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg) override;
+
// Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path.
void DecodeJNITransitionOrLocalJObject(ManagedRegister reg,
JNIMacroLabel* slow_path,
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.cc b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
index 8ce44b6c63..50f6b4158d 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.cc
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.cc
@@ -18,6 +18,8 @@
#include "entrypoints/quick/quick_entrypoints.h"
#include "indirect_reference_table.h"
+#include "jni/jni_env_ext.h"
+#include "jni/local_reference_table.h"
#include "lock_word.h"
#include "managed_register_arm64.h"
#include "offsets.h"
@@ -797,7 +799,7 @@ void Arm64JNIMacroAssembler::DeliverPendingException() {
}
std::unique_ptr<JNIMacroLabel> Arm64JNIMacroAssembler::CreateLabel() {
- return std::unique_ptr<JNIMacroLabel>(new Arm64JNIMacroLabel());
+ return std::unique_ptr<JNIMacroLabel>(new (asm_.GetAllocator()) Arm64JNIMacroLabel());
}
void Arm64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
@@ -979,6 +981,39 @@ void Arm64JNIMacroAssembler::RemoveFrame(size_t frame_size,
cfi().DefCFAOffset(frame_size);
}
+void Arm64JNIMacroAssembler::LoadLocalReferenceTableStates(ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg) {
+ constexpr size_t kLRTSegmentStateSize = sizeof(jni::LRTSegmentState);
+ DCHECK_EQ(kLRTSegmentStateSize, kWRegSizeInBytes);
+ const MemberOffset previous_state_offset = JNIEnvExt::LrtPreviousStateOffset(kArm64PointerSize);
+ const MemberOffset current_state_offset = JNIEnvExt::LrtSegmentStateOffset(kArm64PointerSize);
+ DCHECK_EQ(previous_state_offset.SizeValue() + kLRTSegmentStateSize,
+ current_state_offset.SizeValue());
+
+ ___ Ldp(
+ reg_w(previous_state_reg.AsArm64().AsWRegister()),
+ reg_w(current_state_reg.AsArm64().AsWRegister()),
+ MemOperand(reg_x(jni_env_reg.AsArm64().AsXRegister()), previous_state_offset.Int32Value()));
+}
+
+void Arm64JNIMacroAssembler::StoreLocalReferenceTableStates(ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg) {
+ constexpr size_t kLRTSegmentStateSize = sizeof(jni::LRTSegmentState);
+ DCHECK_EQ(kLRTSegmentStateSize, kWRegSizeInBytes);
+ const MemberOffset previous_state_offset = JNIEnvExt::LrtPreviousStateOffset(kArm64PointerSize);
+ const MemberOffset current_state_offset = JNIEnvExt::LrtSegmentStateOffset(kArm64PointerSize);
+ DCHECK_EQ(previous_state_offset.SizeValue() + kLRTSegmentStateSize,
+ current_state_offset.SizeValue());
+
+ // Set the current segment state together with restoring the cookie.
+ ___ Stp(
+ reg_w(previous_state_reg.AsArm64().AsWRegister()),
+ reg_w(current_state_reg.AsArm64().AsWRegister()),
+ MemOperand(reg_x(jni_env_reg.AsArm64().AsXRegister()), previous_state_offset.Int32Value()));
+}
+
#undef ___
} // namespace arm64
diff --git a/compiler/utils/arm64/jni_macro_assembler_arm64.h b/compiler/utils/arm64/jni_macro_assembler_arm64.h
index 2836e0947d..0750c2655f 100644
--- a/compiler/utils/arm64/jni_macro_assembler_arm64.h
+++ b/compiler/utils/arm64/jni_macro_assembler_arm64.h
@@ -93,6 +93,14 @@ class Arm64JNIMacroAssembler final : public JNIMacroAssemblerFwd<Arm64Assembler,
void GetCurrentThread(ManagedRegister dest) override;
void GetCurrentThread(FrameOffset dest_offset) override;
+ // Manipulating local reference table states.
+ void LoadLocalReferenceTableStates(ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg) override;
+ void StoreLocalReferenceTableStates(ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg) override;
+
// Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path.
void DecodeJNITransitionOrLocalJObject(ManagedRegister reg,
JNIMacroLabel* slow_path,
diff --git a/compiler/utils/assembler_test_base.h b/compiler/utils/assembler_test_base.h
index c147217b3d..c5345130b3 100644
--- a/compiler/utils/assembler_test_base.h
+++ b/compiler/utils/assembler_test_base.h
@@ -147,7 +147,7 @@ class AssemblerTestBase : public testing::Test {
"--compile",
"-target",
"riscv64-linux-gnu",
- "-march=rv64imafdv_zba_zbb",
+ "-march=rv64imafdcv_zba_zbb_zca_zcd_zcb",
// Force the assembler to fully emit branch instructions instead of leaving
// offsets unresolved with relocation information for the linker.
"-mno-relax"};
@@ -175,7 +175,7 @@ class AssemblerTestBase : public testing::Test {
"--no-print-imm-hex",
"--no-show-raw-insn",
// Disassemble Standard Extensions supported by the assembler.
- "--mattr=+F,+D,+A,+V,+Zba,+Zbb",
+ "--mattr=+F,+D,+A,+C,+V,+Zba,+Zbb,+Zca,+Zcd,+Zcb",
"-M",
"no-aliases"};
default:
diff --git a/compiler/utils/jni_macro_assembler.cc b/compiler/utils/jni_macro_assembler.cc
index 7a90a46f51..1806180980 100644
--- a/compiler/utils/jni_macro_assembler.cc
+++ b/compiler/utils/jni_macro_assembler.cc
@@ -38,6 +38,8 @@
#include "base/globals.h"
#include "base/memory_region.h"
#include "gc_root.h"
+#include "jni/jni_env_ext.h"
+#include "jni/local_reference_table.h"
#include "stack_reference.h"
namespace art HIDDEN {
@@ -129,4 +131,52 @@ template
void JNIMacroAssembler<PointerSize::k64>::LoadStackReference(ManagedRegister dest,
FrameOffset offs);
+template <PointerSize kPointerSize>
+void JNIMacroAssembler<kPointerSize>::LoadLocalReferenceTableStates(
+ ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg) {
+ constexpr size_t kLRTSegmentStateSize = sizeof(jni::LRTSegmentState);
+ const MemberOffset previous_state_offset = JNIEnvExt::LrtPreviousStateOffset(kPointerSize);
+ const MemberOffset current_state_offset = JNIEnvExt::LrtSegmentStateOffset(kPointerSize);
+
+ Load(previous_state_reg, jni_env_reg, previous_state_offset, kLRTSegmentStateSize);
+ Load(current_state_reg, jni_env_reg, current_state_offset, kLRTSegmentStateSize);
+}
+
+template
+void JNIMacroAssembler<PointerSize::k32>::LoadLocalReferenceTableStates(
+ ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg);
+template
+void JNIMacroAssembler<PointerSize::k64>::LoadLocalReferenceTableStates(
+ ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg);
+
+template <PointerSize kPointerSize>
+void JNIMacroAssembler<kPointerSize>::StoreLocalReferenceTableStates(
+ ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg) {
+ constexpr size_t kLRTSegmentStateSize = sizeof(jni::LRTSegmentState);
+ const MemberOffset previous_state_offset = JNIEnvExt::LrtPreviousStateOffset(kPointerSize);
+ const MemberOffset segment_state_offset = JNIEnvExt::LrtSegmentStateOffset(kPointerSize);
+
+ Store(jni_env_reg, previous_state_offset, previous_state_reg, kLRTSegmentStateSize);
+ Store(jni_env_reg, segment_state_offset, current_state_reg, kLRTSegmentStateSize);
+}
+
+template
+void JNIMacroAssembler<PointerSize::k32>::StoreLocalReferenceTableStates(
+ ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg);
+template
+void JNIMacroAssembler<PointerSize::k64>::StoreLocalReferenceTableStates(
+ ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg);
+
} // namespace art
diff --git a/compiler/utils/jni_macro_assembler.h b/compiler/utils/jni_macro_assembler.h
index 2d52eada08..0ffa50a53b 100644
--- a/compiler/utils/jni_macro_assembler.h
+++ b/compiler/utils/jni_macro_assembler.h
@@ -167,6 +167,17 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
virtual void GetCurrentThread(ManagedRegister dest) = 0;
virtual void GetCurrentThread(FrameOffset dest_offset) = 0;
+ // Manipulating local reference table states.
+ //
+ // These have a default implementation but they can be overridden to use register pair
+ // load/store instructions on architectures that support them (arm, arm64).
+ virtual void LoadLocalReferenceTableStates(ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg);
+ virtual void StoreLocalReferenceTableStates(ManagedRegister jni_env_reg,
+ ManagedRegister previous_state_reg,
+ ManagedRegister current_state_reg);
+
// Decode JNI transition or local `jobject`. For (weak) global `jobject`, jump to slow path.
virtual void DecodeJNITransitionOrLocalJObject(ManagedRegister reg,
JNIMacroLabel* slow_path,
@@ -250,7 +261,7 @@ class JNIMacroAssembler : public DeletableArenaObject<kArenaAllocAssembler> {
//
// It is only safe to use a label created
// via JNIMacroAssembler::CreateLabel with that same macro assembler.
-class JNIMacroLabel {
+class JNIMacroLabel : public DeletableArenaObject<kArenaAllocAssembler> {
public:
virtual ~JNIMacroLabel() = 0;
diff --git a/compiler/utils/riscv64/assembler_riscv64.cc b/compiler/utils/riscv64/assembler_riscv64.cc
index c98b919f41..eeb4537a31 100644
--- a/compiler/utils/riscv64/assembler_riscv64.cc
+++ b/compiler/utils/riscv64/assembler_riscv64.cc
@@ -61,18 +61,6 @@ void Riscv64Assembler::FinalizeCode() {
finalized_ = true;
}
-void Riscv64Assembler::Emit(uint32_t value) {
- if (overwriting_) {
- // Branches to labels are emitted into their placeholders here.
- buffer_.Store<uint32_t>(overwrite_location_, value);
- overwrite_location_ += sizeof(uint32_t);
- } else {
- // Other instructions are simply appended at the end here.
- AssemblerBuffer::EnsureCapacity ensured(&buffer_);
- buffer_.Emit<uint32_t>(value);
- }
-}
-
/////////////////////////////// RV64 VARIANTS extension ///////////////////////////////
//////////////////////////////// RV64 "I" Instructions ////////////////////////////////
@@ -792,6 +780,270 @@ void Riscv64Assembler::FClassD(XRegister rd, FRegister rs1) {
/////////////////////////////// RV64 "FD" Instructions END ///////////////////////////////
+/////////////////////////////// RV64 "C" Instructions START /////////////////////////////
+
+void Riscv64Assembler::CLwsp(XRegister rd, int32_t offset) {
+ DCHECK_NE(rd, Zero);
+
+ EmitCI(0b010u, rd, ExtractOffset52_76(offset), 0b10u);
+}
+
+void Riscv64Assembler::CLdsp(XRegister rd, int32_t offset) {
+ DCHECK_NE(rd, Zero);
+
+ EmitCI(0b011u, rd, ExtractOffset53_86(offset), 0b10u);
+}
+
+void Riscv64Assembler::CFLdsp(FRegister rd, int32_t offset) {
+ EmitCI(0b001u, rd, ExtractOffset53_86(offset), 0b10u);
+}
+
+void Riscv64Assembler::CSwsp(XRegister rs2, int32_t offset) {
+ EmitCSS(0b110u, ExtractOffset52_76(offset), rs2, 0b10u);
+}
+
+void Riscv64Assembler::CSdsp(XRegister rs2, int32_t offset) {
+ EmitCSS(0b111u, ExtractOffset53_86(offset), rs2, 0b10u);
+}
+
+void Riscv64Assembler::CFSdsp(FRegister rs2, int32_t offset) {
+ EmitCSS(0b101u, ExtractOffset53_86(offset), rs2, 0b10u);
+}
+
+void Riscv64Assembler::CLw(XRegister rd_s, XRegister rs1_s, int32_t offset) {
+ EmitCM(0b010u, ExtractOffset52_6(offset), rs1_s, rd_s, 0b00u);
+}
+
+void Riscv64Assembler::CLd(XRegister rd_s, XRegister rs1_s, int32_t offset) {
+ EmitCM(0b011u, ExtractOffset53_76(offset), rs1_s, rd_s, 0b00u);
+}
+
+void Riscv64Assembler::CFLd(FRegister rd_s, XRegister rs1_s, int32_t offset) {
+ EmitCM(0b001u, ExtractOffset53_76(offset), rs1_s, rd_s, 0b00u);
+}
+
+void Riscv64Assembler::CSw(XRegister rs2_s, XRegister rs1_s, int32_t offset) {
+ EmitCM(0b110u, ExtractOffset52_6(offset), rs1_s, rs2_s, 0b00u);
+}
+
+void Riscv64Assembler::CSd(XRegister rs2_s, XRegister rs1_s, int32_t offset) {
+ EmitCM(0b111u, ExtractOffset53_76(offset), rs1_s, rs2_s, 0b00u);
+}
+
+void Riscv64Assembler::CFSd(FRegister rs2_s, XRegister rs1_s, int32_t offset) {
+ EmitCM(0b101u, ExtractOffset53_76(offset), rs1_s, rs2_s, 0b00u);
+}
+
+void Riscv64Assembler::CLi(XRegister rd, int32_t imm) {
+ DCHECK_NE(rd, Zero);
+ DCHECK(IsInt<6>(imm));
+
+ EmitCI(0b010u, rd, EncodeInt6(imm), 0b01u);
+}
+
+void Riscv64Assembler::CLui(XRegister rd, uint32_t nzimm6) {
+ DCHECK_NE(rd, Zero);
+ DCHECK_NE(rd, SP);
+ DCHECK(IsImmCLuiEncodable(nzimm6));
+
+ EmitCI(0b011u, rd, nzimm6 & MaskLeastSignificant<uint32_t>(6), 0b01u);
+}
+
+void Riscv64Assembler::CAddi(XRegister rd, int32_t nzimm) {
+ DCHECK_NE(rd, Zero);
+ DCHECK_NE(nzimm, 0);
+
+ EmitCI(0b000u, rd, EncodeInt6(nzimm), 0b01u);
+}
+
+void Riscv64Assembler::CAddiw(XRegister rd, int32_t imm) {
+ DCHECK_NE(rd, Zero);
+
+ EmitCI(0b001u, rd, EncodeInt6(imm), 0b01u);
+}
+
+void Riscv64Assembler::CAddi16Sp(int32_t nzimm) {
+ DCHECK_NE(nzimm, 0);
+ DCHECK(IsAligned<16>(nzimm));
+
+ uint32_t unzimm = static_cast<uint32_t>(nzimm);
+
+ // nzimm[9]
+ uint32_t imms1 = BitFieldExtract(unzimm, 9, 1);
+ // nzimm[4|6|8:7|5]
+ uint32_t imms0 = (BitFieldExtract(unzimm, 4, 1) << 4) |
+ (BitFieldExtract(unzimm, 6, 1) << 3) |
+ (BitFieldExtract(unzimm, 7, 2) << 1) |
+ BitFieldExtract(unzimm, 5, 1);
+
+ EmitCI(0b011u, SP, BitFieldInsert(imms0, imms1, 5, 1), 0b01u);
+}
+
+void Riscv64Assembler::CAddi4Spn(XRegister rd_s, uint32_t nzuimm) {
+ DCHECK_NE(nzuimm, 0u);
+ DCHECK(IsAligned<4>(nzuimm));
+ DCHECK(IsUint<10>(nzuimm));
+
+ // nzuimm[5:4|9:6|2|3]
+ uint32_t uimm = (BitFieldExtract(nzuimm, 4, 2) << 6) |
+ (BitFieldExtract(nzuimm, 6, 4) << 2) |
+ (BitFieldExtract(nzuimm, 2, 1) << 1) |
+ BitFieldExtract(nzuimm, 3, 1);
+
+ EmitCIW(0b000u, uimm, rd_s, 0b00u);
+}
+
+void Riscv64Assembler::CSlli(XRegister rd, int32_t shamt) {
+ DCHECK_NE(shamt, 0);
+ DCHECK_NE(rd, Zero);
+
+ EmitCI(0b000u, rd, shamt, 0b10u);
+}
+
+void Riscv64Assembler::CSrli(XRegister rd_s, int32_t shamt) {
+ DCHECK_NE(shamt, 0);
+ DCHECK(IsUint<6>(shamt));
+
+ EmitCBArithmetic(0b100u, 0b00u, shamt, rd_s, 0b01u);
+}
+
+void Riscv64Assembler::CSrai(XRegister rd_s, int32_t shamt) {
+ DCHECK_NE(shamt, 0);
+ DCHECK(IsUint<6>(shamt));
+
+ EmitCBArithmetic(0b100u, 0b01u, shamt, rd_s, 0b01u);
+}
+
+void Riscv64Assembler::CAndi(XRegister rd_s, int32_t imm) {
+ DCHECK(IsInt<6>(imm));
+
+ EmitCBArithmetic(0b100u, 0b10u, imm, rd_s, 0b01u);
+}
+
+void Riscv64Assembler::CMv(XRegister rd, XRegister rs2) {
+ DCHECK_NE(rd, Zero);
+ DCHECK_NE(rs2, Zero);
+
+ EmitCR(0b1000u, rd, rs2, 0b10u);
+}
+
+void Riscv64Assembler::CAdd(XRegister rd, XRegister rs2) {
+ DCHECK_NE(rd, Zero);
+ DCHECK_NE(rs2, Zero);
+
+ EmitCR(0b1001u, rd, rs2, 0b10u);
+}
+
+void Riscv64Assembler::CAnd(XRegister rd_s, XRegister rs2_s) {
+ EmitCAReg(0b100011u, rd_s, 0b11u, rs2_s, 0b01u);
+}
+
+void Riscv64Assembler::COr(XRegister rd_s, XRegister rs2_s) {
+ EmitCAReg(0b100011u, rd_s, 0b10u, rs2_s, 0b01u);
+}
+
+void Riscv64Assembler::CXor(XRegister rd_s, XRegister rs2_s) {
+ EmitCAReg(0b100011u, rd_s, 0b01u, rs2_s, 0b01u);
+}
+
+void Riscv64Assembler::CSub(XRegister rd_s, XRegister rs2_s) {
+ EmitCAReg(0b100011u, rd_s, 0b00u, rs2_s, 0b01u);
+}
+
+void Riscv64Assembler::CAddw(XRegister rd_s, XRegister rs2_s) {
+ EmitCAReg(0b100111u, rd_s, 0b01u, rs2_s, 0b01u);
+}
+
+void Riscv64Assembler::CSubw(XRegister rd_s, XRegister rs2_s) {
+ EmitCAReg(0b100111u, rd_s, 0b00u, rs2_s, 0b01u);
+}
+
+// "Zcb" Standard Extension, part of "C", opcode = 0b00, 0b01, funct3 = 0b100.
+
+void Riscv64Assembler::CLbu(XRegister rd_s, XRegister rs1_s, int32_t offset) {
+ EmitCAReg(0b100000u, rs1_s, EncodeOffset0_1(offset), rd_s, 0b00u);
+}
+
+void Riscv64Assembler::CLhu(XRegister rd_s, XRegister rs1_s, int32_t offset) {
+ DCHECK(IsUint<2>(offset));
+ DCHECK_ALIGNED(offset, 2);
+ EmitCAReg(0b100001u, rs1_s, BitFieldExtract<uint32_t>(offset, 1, 1), rd_s, 0b00u);
+}
+
+void Riscv64Assembler::CLh(XRegister rd_s, XRegister rs1_s, int32_t offset) {
+ DCHECK(IsUint<2>(offset));
+ DCHECK_ALIGNED(offset, 2);
+ EmitCAReg(0b100001u, rs1_s, 0b10 | BitFieldExtract<uint32_t>(offset, 1, 1), rd_s, 0b00u);
+}
+
+void Riscv64Assembler::CSb(XRegister rs2_s, XRegister rs1_s, int32_t offset) {
+ EmitCAReg(0b100010u, rs1_s, EncodeOffset0_1(offset), rs2_s, 0b00u);
+}
+
+void Riscv64Assembler::CSh(XRegister rs2_s, XRegister rs1_s, int32_t offset) {
+ DCHECK(IsUint<2>(offset));
+ DCHECK_ALIGNED(offset, 2);
+ EmitCAReg(0b100011u, rs1_s, BitFieldExtract<uint32_t>(offset, 1, 1), rs2_s, 0b00u);
+}
+
+void Riscv64Assembler::CZext_b(XRegister rd_rs1_s) {
+ EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b000u, 0b01u);
+}
+
+void Riscv64Assembler::CSext_b(XRegister rd_rs1_s) {
+ EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b001u, 0b01u);
+}
+
+void Riscv64Assembler::CZext_h(XRegister rd_rs1_s) {
+ EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b010u, 0b01u);
+}
+
+void Riscv64Assembler::CSext_h(XRegister rd_rs1_s) {
+ EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b011u, 0b01u);
+}
+
+void Riscv64Assembler::CZext_w(XRegister rd_rs1_s) {
+ EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b100u, 0b01u);
+}
+
+void Riscv64Assembler::CNot(XRegister rd_rs1_s) {
+ EmitCAImm(0b100111u, rd_rs1_s, 0b11u, 0b101u, 0b01u);
+}
+
+void Riscv64Assembler::CMul(XRegister rd_s, XRegister rs2_s) {
+ EmitCAReg(0b100111u, rd_s, 0b10u, rs2_s, 0b01u);
+}
+
+void Riscv64Assembler::CJ(int32_t offset) { EmitCJ(0b101u, offset, 0b01u); }
+
+void Riscv64Assembler::CJr(XRegister rs1) {
+ DCHECK_NE(rs1, Zero);
+
+ EmitCR(0b1000u, rs1, Zero, 0b10u);
+}
+
+void Riscv64Assembler::CJalr(XRegister rs1) {
+ DCHECK_NE(rs1, Zero);
+
+ EmitCR(0b1001u, rs1, Zero, 0b10u);
+}
+
+void Riscv64Assembler::CBeqz(XRegister rs1_s, int32_t offset) {
+ EmitCBBranch(0b110u, offset, rs1_s, 0b01u);
+}
+
+void Riscv64Assembler::CBnez(XRegister rs1_s, int32_t offset) {
+ EmitCBBranch(0b111u, offset, rs1_s, 0b01u);
+}
+
+void Riscv64Assembler::CEbreak() { EmitCR(0b1001u, Zero, Zero, 0b10u); }
+
+void Riscv64Assembler::CNop() { EmitCI(0b000u, Zero, 0u, 0b01u); }
+
+void Riscv64Assembler::CUnimp() { Emit16(0x0u); }
+
+/////////////////////////////// RV64 "C" Instructions END ///////////////////////////////
+
////////////////////////////// RV64 "Zba" Instructions START /////////////////////////////
void Riscv64Assembler::AddUw(XRegister rd, XRegister rs1, XRegister rs2) {
@@ -916,6 +1168,18 @@ void Riscv64Assembler::Rev8(XRegister rd, XRegister rs1) {
EmitR(0x35, 0x18, rs1, 0x5, rd, 0x13);
}
+void Riscv64Assembler::ZbbSextB(XRegister rd, XRegister rs1) {
+ EmitR(0x30, 0x4, rs1, 0x1, rd, 0x13);
+}
+
+void Riscv64Assembler::ZbbSextH(XRegister rd, XRegister rs1) {
+ EmitR(0x30, 0x5, rs1, 0x1, rd, 0x13);
+}
+
+void Riscv64Assembler::ZbbZextH(XRegister rd, XRegister rs1) {
+ EmitR(0x4, 0x0, rs1, 0x4, rd, 0x3b);
+}
+
/////////////////////////////// RV64 "Zbb" Instructions END //////////////////////////////
/////////////////////////////// RVV "VSet" Instructions START ////////////////////////////
@@ -5060,7 +5324,7 @@ void Riscv64Assembler::FLoadd(FRegister rd, Literal* literal) {
void Riscv64Assembler::Unimp() {
// TODO(riscv64): use 16-bit zero C.UNIMP once we support compression
- Emit(0xC0001073);
+ Emit32(0xC0001073);
}
/////////////////////////////// RV64 MACRO Instructions END ///////////////////////////////
@@ -5542,7 +5806,7 @@ void Riscv64Assembler::FinalizeLabeledBranch(Riscv64Label* label) {
// Branch forward (to a following label), distance is unknown.
// The first branch forward will contain 0, serving as the terminator of
// the list of forward-reaching branches.
- Emit(label->position_);
+ Emit32(label->position_);
length--;
// Now make the label object point to this branch
// (this forms a linked list of branches preceding this label).
@@ -5775,7 +6039,7 @@ void Riscv64Assembler::PromoteBranches() {
DCHECK(!overwriting_);
overwriting_ = true;
overwrite_location_ = first_literal_location;
- Emit(0); // Illegal instruction.
+ Emit32(0); // Illegal instruction.
overwriting_ = false;
// Increase target addresses in literal and address loads by 4 bytes in order for correct
// offsets from PC to be generated.
@@ -5838,7 +6102,7 @@ void Riscv64Assembler::EmitJumpTables() {
CHECK_EQ(buffer_.Load<uint32_t>(overwrite_location_), 0x1abe1234u);
// The table will contain target addresses relative to the table start.
uint32_t offset = GetLabelLocation(target) - start;
- Emit(offset);
+ Emit32(offset);
}
}
diff --git a/compiler/utils/riscv64/assembler_riscv64.h b/compiler/utils/riscv64/assembler_riscv64.h
index b67a6508d9..1696251bf6 100644
--- a/compiler/utils/riscv64/assembler_riscv64.h
+++ b/compiler/utils/riscv64/assembler_riscv64.h
@@ -490,6 +490,64 @@ class Riscv64Assembler final : public Assembler {
void FClassS(XRegister rd, FRegister rs1);
void FClassD(XRegister rd, FRegister rs1);
+ // "C" Standard Extension, Compresseed Instructions
+ void CLwsp(XRegister rd, int32_t offset);
+ void CLdsp(XRegister rd, int32_t offset);
+ void CFLdsp(FRegister rd, int32_t offset);
+ void CSwsp(XRegister rs2, int32_t offset);
+ void CSdsp(XRegister rs2, int32_t offset);
+ void CFSdsp(FRegister rs2, int32_t offset);
+
+ void CLw(XRegister rd_s, XRegister rs1_s, int32_t offset);
+ void CLd(XRegister rd_s, XRegister rs1_s, int32_t offset);
+ void CFLd(FRegister rd_s, XRegister rs1_s, int32_t offset);
+ void CSw(XRegister rs2_s, XRegister rs1_s, int32_t offset);
+ void CSd(XRegister rs2_s, XRegister rs1_s, int32_t offset);
+ void CFSd(FRegister rs2_s, XRegister rs1_s, int32_t offset);
+
+ void CLi(XRegister rd, int32_t imm);
+ void CLui(XRegister rd, uint32_t nzimm6);
+ void CAddi(XRegister rd, int32_t nzimm);
+ void CAddiw(XRegister rd, int32_t imm);
+ void CAddi16Sp(int32_t nzimm);
+ void CAddi4Spn(XRegister rd_s, uint32_t nzuimm);
+ void CSlli(XRegister rd, int32_t shamt);
+ void CSrli(XRegister rd_s, int32_t shamt);
+ void CSrai(XRegister rd_s, int32_t shamt);
+ void CAndi(XRegister rd_s, int32_t imm);
+ void CMv(XRegister rd, XRegister rs2);
+ void CAdd(XRegister rd, XRegister rs2);
+ void CAnd(XRegister rd_s, XRegister rs2_s);
+ void COr(XRegister rd_s, XRegister rs2_s);
+ void CXor(XRegister rd_s, XRegister rs2_s);
+ void CSub(XRegister rd_s, XRegister rs2_s);
+ void CAddw(XRegister rd_s, XRegister rs2_s);
+ void CSubw(XRegister rd_s, XRegister rs2_s);
+
+ // "Zcb" Standard Extension, part of "C", opcode = 0b00, 0b01, funct3 = 0b100.
+ void CLbu(XRegister rd_s, XRegister rs1_s, int32_t offset);
+ void CLhu(XRegister rd_s, XRegister rs1_s, int32_t offset);
+ void CLh(XRegister rd_s, XRegister rs1_s, int32_t offset);
+ void CSb(XRegister rd_s, XRegister rs1_s, int32_t offset);
+ void CSh(XRegister rd_s, XRegister rs1_s, int32_t offset);
+ void CZext_b(XRegister rd_rs1_s);
+ void CSext_b(XRegister rd_rs1_s);
+ void CZext_h(XRegister rd_rs1_s);
+ void CSext_h(XRegister rd_rs1_s);
+ void CZext_w(XRegister rd_rs1_s);
+ void CNot(XRegister rd_rs1_s);
+ void CMul(XRegister rd_s, XRegister rs2_s);
+
+ void CJ(int32_t offset);
+ void CJr(XRegister rs1);
+ void CJalr(XRegister rs1);
+ void CBeqz(XRegister rs1_s, int32_t offset);
+ void CBnez(XRegister rs1_s, int32_t offset);
+
+ void CEbreak();
+ void CNop();
+ void CUnimp();
+
// "Zba" Standard Extension, opcode = 0x1b, 0x33 or 0x3b, funct3 and funct7 varies.
void AddUw(XRegister rd, XRegister rs1, XRegister rs2);
void Sh1Add(XRegister rd, XRegister rs1, XRegister rs2);
@@ -500,9 +558,9 @@ class Riscv64Assembler final : public Assembler {
void Sh3AddUw(XRegister rd, XRegister rs1, XRegister rs2);
void SlliUw(XRegister rd, XRegister rs1, int32_t shamt);
- // "Zbb" Standard Extension, opcode = 0x13, 0x1b or 0x33, funct3 and funct7 varies.
- // Note: We do not support 32-bit sext.b, sext.h and zext.h from the Zbb extension.
- // (Neither does the clang-r498229's assembler which we currently test against.)
+ // "Zbb" Standard Extension, opcode = 0x13, 0x1b, 0x33 or 0x3b, funct3 and funct7 varies.
+ // Note: 32-bit sext.b, sext.h and zext.h from the Zbb extension are explicitly
+ // prefixed with "Zbb" to differentiate them from the utility macros.
void Andn(XRegister rd, XRegister rs1, XRegister rs2);
void Orn(XRegister rd, XRegister rs1, XRegister rs2);
void Xnor(XRegister rd, XRegister rs1, XRegister rs2);
@@ -524,6 +582,9 @@ class Riscv64Assembler final : public Assembler {
void Roriw(XRegister rd, XRegister rs1, int32_t shamt);
void OrcB(XRegister rd, XRegister rs1);
void Rev8(XRegister rd, XRegister rs1);
+ void ZbbSextB(XRegister rd, XRegister rs1);
+ void ZbbSextH(XRegister rd, XRegister rs1);
+ void ZbbZextH(XRegister rd, XRegister rs1);
////////////////////////////// RISC-V Vector Instructions START ///////////////////////////////
enum class LengthMultiplier : uint32_t {
@@ -1758,6 +1819,13 @@ class Riscv64Assembler final : public Assembler {
// and emit branches.
void FinalizeCode() override;
+ template <typename Reg>
+ static inline bool IsShortReg(Reg reg) {
+ static_assert(std::is_same_v<Reg, XRegister> || std::is_same_v<Reg, FRegister>);
+ uint32_t uv = enum_cast<uint32_t>(reg) - 8u;
+ return IsUint<3>(uv);
+ }
+
// Returns the current location of a label.
//
// This function must be used instead of `Riscv64Label::GetPosition()`
@@ -1952,7 +2020,23 @@ class Riscv64Assembler final : public Assembler {
void PatchCFI();
// Emit data (e.g. encoded instruction or immediate) to the instruction stream.
- void Emit(uint32_t value);
+ template <typename T>
+ void Emit(T value) {
+ static_assert(std::is_same_v<T, uint32_t> || std::is_same_v<T, uint16_t>,
+ "Only Integer types are allowed");
+ if (overwriting_) {
+ // Branches to labels are emitted into their placeholders here.
+ buffer_.Store<T>(overwrite_location_, value);
+ overwrite_location_ += sizeof(T);
+ } else {
+ // Other instructions are simply appended at the end here.
+ AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+ buffer_.Emit<T>(value);
+ }
+ }
+
+ void Emit16(uint32_t value) { Emit(dchecked_integral_cast<uint16_t>(value)); }
+ void Emit32(uint32_t value) { Emit(value); }
// Adjust base register and offset if needed for load/store with a large offset.
void AdjustBaseAndOffset(XRegister& base, int32_t& offset, ScratchRegisterScope& srs);
@@ -2030,9 +2114,92 @@ class Riscv64Assembler final : public Assembler {
return funct6 << 1 | enum_cast<uint32_t>(vm);
}
- static constexpr uint32_t EncodeInt5(const int32_t imm) {
- DCHECK(IsInt<5>(imm));
- return static_cast<uint32_t>(imm) & 0b11111u;
+ template <unsigned kWidth>
+ static constexpr uint32_t EncodeIntWidth(const int32_t imm) {
+ DCHECK(IsInt<kWidth>(imm));
+ return static_cast<uint32_t>(imm) & MaskLeastSignificant<uint32_t>(kWidth);
+ }
+
+ static constexpr uint32_t EncodeInt5(const int32_t imm) { return EncodeIntWidth<5>(imm); }
+ static constexpr uint32_t EncodeInt6(const int32_t imm) { return EncodeIntWidth<6>(imm); }
+
+ template <typename Reg>
+ static constexpr uint32_t EncodeShortReg(const Reg reg) {
+ DCHECK(IsShortReg(reg));
+ return enum_cast<uint32_t>(reg) - 8u;
+ }
+
+ // Rearrange given offset in the way {offset[0] | offset[1]}
+ static constexpr uint32_t EncodeOffset0_1(int32_t offset) {
+ uint32_t u_offset = static_cast<uint32_t>(offset);
+ DCHECK(IsUint<2>(u_offset));
+
+ return u_offset >> 1 | (u_offset & 1u) << 1;
+ }
+
+ // Rearrange given offset, scaled by 4, in the way {offset[5:2] | offset[7:6]}
+ static constexpr uint32_t ExtractOffset52_76(int32_t offset) {
+ DCHECK(IsAligned<4>(offset)) << "Offset should be scalable by 4";
+
+ uint32_t u_offset = static_cast<uint32_t>(offset);
+ DCHECK(IsUint<6 + 2>(u_offset));
+
+ uint32_t imm_52 = BitFieldExtract(u_offset, 2, 4);
+ uint32_t imm_76 = BitFieldExtract(u_offset, 6, 2);
+
+ return BitFieldInsert(imm_76, imm_52, 2, 4);
+ }
+
+ // Rearrange given offset, scaled by 8, in the way {offset[5:3] | offset[8:6]}
+ static constexpr uint32_t ExtractOffset53_86(int32_t offset) {
+ DCHECK(IsAligned<8>(offset)) << "Offset should be scalable by 8";
+
+ uint32_t u_offset = static_cast<uint32_t>(offset);
+ DCHECK(IsUint<6 + 3>(u_offset));
+
+ uint32_t imm_53 = BitFieldExtract(u_offset, 3, 3);
+ uint32_t imm_86 = BitFieldExtract(u_offset, 6, 3);
+
+ return BitFieldInsert(imm_86, imm_53, 3, 3);
+ }
+
+ // Rearrange given offset, scaled by 4, in the way {offset[5:2] | offset[6]}
+ static constexpr uint32_t ExtractOffset52_6(int32_t offset) {
+ DCHECK(IsAligned<4>(offset)) << "Offset should be scalable by 4";
+
+ uint32_t u_offset = static_cast<uint32_t>(offset);
+ DCHECK(IsUint<5 + 2>(u_offset));
+
+ uint32_t imm_52 = BitFieldExtract(u_offset, 2, 4);
+ uint32_t imm_6 = BitFieldExtract(u_offset, 6, 1);
+
+ return BitFieldInsert(imm_6, imm_52, 1, 4);
+ }
+
+ // Rearrange given offset, scaled by 8, in the way {offset[5:3], offset[7:6]}
+ static constexpr uint32_t ExtractOffset53_76(int32_t offset) {
+ DCHECK(IsAligned<8>(offset)) << "Offset should be scalable by 4";
+
+ uint32_t u_offset = static_cast<uint32_t>(offset);
+ DCHECK(IsUint<5 + 3>(u_offset));
+
+ uint32_t imm_53 = BitFieldExtract(u_offset, 3, 3);
+ uint32_t imm_76 = BitFieldExtract(u_offset, 6, 2);
+
+ return BitFieldInsert(imm_76, imm_53, 2, 3);
+ }
+
+ static constexpr bool IsImmCLuiEncodable(uint32_t uimm) {
+ // Instruction c.lui is odd and its immediate value is a bit tricky
+ // Its value is not a full 32 bits value, but its bits [31:12]
+ // (where the bit 17 marks the sign bit) shifted towards the bottom i.e. bits [19:0]
+ // are the meaningful ones. Since that we want a signed non-zero 6-bit immediate to
+ // keep values in the range [0, 0x1f], and the range [0xfffe0, 0xfffff] for negative values
+ // since the sign bit was bit 17 (which is now bit 5 and replicated in the higher bits too)
+ // Also encoding with immediate = 0 is reserved
+ // For more details please see 16.5 chapter is the specification
+
+ return uimm != 0u && (IsUint<5>(uimm) || IsUint<5>(uimm - 0xfffe0u));
}
// Emit helpers.
@@ -2053,7 +2220,7 @@ class Riscv64Assembler final : public Assembler {
DCHECK(IsUint<7>(opcode));
uint32_t encoding = static_cast<uint32_t>(imm12) << 20 | static_cast<uint32_t>(rs1) << 15 |
funct3 << 12 | static_cast<uint32_t>(rd) << 7 | opcode;
- Emit(encoding);
+ Emit32(encoding);
}
// R-type instruction:
@@ -2074,7 +2241,7 @@ class Riscv64Assembler final : public Assembler {
uint32_t encoding = funct7 << 25 | static_cast<uint32_t>(rs2) << 20 |
static_cast<uint32_t>(rs1) << 15 | funct3 << 12 |
static_cast<uint32_t>(rd) << 7 | opcode;
- Emit(encoding);
+ Emit32(encoding);
}
// R-type instruction variant for floating-point fused multiply-add/sub (F[N]MADD/ F[N]MSUB):
@@ -2098,7 +2265,7 @@ class Riscv64Assembler final : public Assembler {
static_cast<uint32_t>(rs2) << 20 | static_cast<uint32_t>(rs1) << 15 |
static_cast<uint32_t>(funct3) << 12 | static_cast<uint32_t>(rd) << 7 |
opcode;
- Emit(encoding);
+ Emit32(encoding);
}
// S-type instruction:
@@ -2119,7 +2286,7 @@ class Riscv64Assembler final : public Assembler {
static_cast<uint32_t>(rs2) << 20 | static_cast<uint32_t>(rs1) << 15 |
static_cast<uint32_t>(funct3) << 12 |
(static_cast<uint32_t>(imm12) & 0x1F) << 7 | opcode;
- Emit(encoding);
+ Emit32(encoding);
}
// I-type instruction variant for shifts (SLLI / SRLI / SRAI):
@@ -2144,7 +2311,7 @@ class Riscv64Assembler final : public Assembler {
uint32_t encoding = funct6 << 26 | static_cast<uint32_t>(imm6) << 20 |
static_cast<uint32_t>(rs1) << 15 | funct3 << 12 |
static_cast<uint32_t>(rd) << 7 | opcode;
- Emit(encoding);
+ Emit32(encoding);
}
// B-type instruction:
@@ -2166,7 +2333,7 @@ class Riscv64Assembler final : public Assembler {
static_cast<uint32_t>(rs2) << 20 | static_cast<uint32_t>(rs1) << 15 |
static_cast<uint32_t>(funct3) << 12 |
(imm12 & 0xfu) << 8 | (imm12 & 0x400u) >> (10 - 7) | opcode;
- Emit(encoding);
+ Emit32(encoding);
}
// U-type instruction:
@@ -2181,7 +2348,7 @@ class Riscv64Assembler final : public Assembler {
DCHECK(IsUint<5>(static_cast<uint32_t>(rd)));
DCHECK(IsUint<7>(opcode));
uint32_t encoding = imm20 << 12 | static_cast<uint32_t>(rd) << 7 | opcode;
- Emit(encoding);
+ Emit32(encoding);
}
// J-type instruction:
@@ -2200,7 +2367,224 @@ class Riscv64Assembler final : public Assembler {
uint32_t encoding = (imm20 & 0x80000u) << (31 - 19) | (imm20 & 0x03ffu) << 21 |
(imm20 & 0x400u) << (20 - 10) | (imm20 & 0x7f800u) << (12 - 11) |
static_cast<uint32_t>(rd) << 7 | opcode;
- Emit(encoding);
+ Emit32(encoding);
+ }
+
+ // Compressed Instruction Encodings
+
+ // CR-type instruction:
+ //
+ // 15 12 11 7 6 2 1 0
+ // ---------------------------------
+ // [ . . . | . . . . | . . . . | . ]
+ // [ func4 rd/rs1 rs2 op ]
+ // ---------------------------------
+ //
+ void EmitCR(uint32_t funct4, XRegister rd_rs1, XRegister rs2, uint32_t opcode) {
+ DCHECK(IsUint<4>(funct4));
+ DCHECK(IsUint<5>(static_cast<uint32_t>(rd_rs1)));
+ DCHECK(IsUint<5>(static_cast<uint32_t>(rs2)));
+ DCHECK(IsUint<2>(opcode));
+
+ uint32_t encoding = funct4 << 12 | static_cast<uint32_t>(rd_rs1) << 7 |
+ static_cast<uint32_t>(rs2) << 2 | opcode;
+ Emit16(encoding);
+ }
+
+ // CI-type instruction:
+ //
+ // 15 13 11 7 6 2 1 0
+ // ---------------------------------
+ // [ . . | | . . . . | . . . . | . ]
+ // [func3 imm rd/rs1 imm op ]
+ // ---------------------------------
+ //
+ template <typename Reg>
+ void EmitCI(uint32_t funct3, Reg rd_rs1, uint32_t imm6, uint32_t opcode) {
+ DCHECK(IsUint<3>(funct3));
+ DCHECK(IsUint<5>(static_cast<uint32_t>(rd_rs1)));
+ DCHECK(IsUint<6>(imm6));
+ DCHECK(IsUint<2>(opcode));
+
+ uint32_t immH1 = BitFieldExtract(imm6, 5, 1);
+ uint32_t immL5 = BitFieldExtract(imm6, 0, 5);
+
+ uint32_t encoding =
+ funct3 << 13 | immH1 << 12 | static_cast<uint32_t>(rd_rs1) << 7 | immL5 << 2 | opcode;
+ Emit16(encoding);
+ }
+
+ // CSS-type instruction:
+ //
+ // 15 13 12 7 6 2 1 0
+ // ---------------------------------
+ // [ . . | . . . . . | . . . . | . ]
+ // [func3 imm6 rs2 op ]
+ // ---------------------------------
+ //
+ template <typename Reg>
+ void EmitCSS(uint32_t funct3, uint32_t offset6, Reg rs2, uint32_t opcode) {
+ DCHECK(IsUint<3>(funct3));
+ DCHECK(IsUint<6>(offset6));
+ DCHECK(IsUint<5>(static_cast<uint32_t>(rs2)));
+ DCHECK(IsUint<2>(opcode));
+
+ uint32_t encoding = funct3 << 13 | offset6 << 7 | static_cast<uint32_t>(rs2) << 2 | opcode;
+ Emit16(encoding);
+ }
+
+ // CIW-type instruction:
+ //
+ // 15 13 12 5 4 2 1 0
+ // ---------------------------------
+ // [ . . | . . . . . . . | . . | . ]
+ // [func3 imm8 rd' op ]
+ // ---------------------------------
+ //
+ void EmitCIW(uint32_t funct3, uint32_t imm8, XRegister rd_s, uint32_t opcode) {
+ DCHECK(IsUint<3>(funct3));
+ DCHECK(IsUint<8>(imm8));
+ DCHECK(IsShortReg(rd_s)) << rd_s;
+ DCHECK(IsUint<2>(opcode));
+
+ uint32_t encoding = funct3 << 13 | imm8 << 5 | EncodeShortReg(rd_s) << 2 | opcode;
+ Emit16(encoding);
+ }
+
+ // CL/S-type instruction:
+ //
+ // 15 13 12 10 9 7 6 5 4 2 1 0
+ // ---------------------------------
+ // [ . . | . . | . . | . | . . | . ]
+ // [func3 imm rs1' imm rds2' op ]
+ // ---------------------------------
+ //
+ template <typename Reg>
+ void EmitCM(uint32_t funct3, uint32_t imm5, XRegister rs1_s, Reg rd_rs2_s, uint32_t opcode) {
+ DCHECK(IsUint<3>(funct3));
+ DCHECK(IsUint<5>(imm5));
+ DCHECK(IsShortReg(rs1_s)) << rs1_s;
+ DCHECK(IsShortReg(rd_rs2_s)) << rd_rs2_s;
+ DCHECK(IsUint<2>(opcode));
+
+ uint32_t immH3 = BitFieldExtract(imm5, 2, 3);
+ uint32_t immL2 = BitFieldExtract(imm5, 0, 2);
+
+ uint32_t encoding = funct3 << 13 | immH3 << 10 | EncodeShortReg(rs1_s) << 7 | immL2 << 5 |
+ EncodeShortReg(rd_rs2_s) << 2 | opcode;
+ Emit16(encoding);
+ }
+
+ // CA-type instruction:
+ //
+ // 15 10 9 7 6 5 4 2 1 0
+ // ---------------------------------
+ // [ . . . . . | . . | . | . . | . ]
+ // [ funct6 rds1' funct2 rs2' op]
+ // ---------------------------------
+ //
+ void EmitCA(
+ uint32_t funct6, XRegister rd_rs1_s, uint32_t funct2, uint32_t rs2_v, uint32_t opcode) {
+ DCHECK(IsUint<6>(funct6));
+ DCHECK(IsShortReg(rd_rs1_s)) << rd_rs1_s;
+ DCHECK(IsUint<2>(funct2));
+ DCHECK(IsUint<3>(rs2_v));
+ DCHECK(IsUint<2>(opcode));
+
+ uint32_t encoding =
+ funct6 << 10 | EncodeShortReg(rd_rs1_s) << 7 | funct2 << 5 | rs2_v << 2 | opcode;
+ Emit16(encoding);
+ }
+
+ void EmitCAReg(
+ uint32_t funct6, XRegister rd_rs1_s, uint32_t funct2, XRegister rs2_s, uint32_t opcode) {
+ DCHECK(IsShortReg(rs2_s)) << rs2_s;
+ EmitCA(funct6, rd_rs1_s, funct2, EncodeShortReg(rs2_s), opcode);
+ }
+
+ void EmitCAImm(
+ uint32_t funct6, XRegister rd_rs1_s, uint32_t funct2, uint32_t funct3, uint32_t opcode) {
+ EmitCA(funct6, rd_rs1_s, funct2, funct3, opcode);
+ }
+
+ // CB-type instruction:
+ //
+ // 15 13 12 10 9 7 6 2 1 0
+ // ---------------------------------
+ // [ . . | . . | . . | . . . . | . ]
+ // [func3 offset rs1' offset op ]
+ // ---------------------------------
+ //
+ void EmitCB(uint32_t funct3, int32_t offset8, XRegister rd_rs1_s, uint32_t opcode) {
+ DCHECK(IsUint<3>(funct3));
+ DCHECK(IsUint<8>(offset8));
+ DCHECK(IsShortReg(rd_rs1_s)) << rd_rs1_s;
+ DCHECK(IsUint<2>(opcode));
+
+ uint32_t offsetH3 = BitFieldExtract<uint32_t>(offset8, 5, 3);
+ uint32_t offsetL5 = BitFieldExtract<uint32_t>(offset8, 0, 5);
+
+ uint32_t encoding =
+ funct3 << 13 | offsetH3 << 10 | EncodeShortReg(rd_rs1_s) << 7 | offsetL5 << 2 | opcode;
+ Emit16(encoding);
+ }
+
+ // Wrappers for EmitCB with different imm bit permutation
+ void EmitCBBranch(uint32_t funct3, int32_t offset, XRegister rs1_s, uint32_t opcode) {
+ DCHECK(IsInt<9>(offset));
+ DCHECK_ALIGNED(offset, 2);
+
+ uint32_t u_offset = static_cast<uint32_t>(offset);
+
+ // offset[8|4:3]
+ uint32_t offsetH3 = (BitFieldExtract(u_offset, 8, 1) << 2) |
+ BitFieldExtract(u_offset, 3, 2);
+ // offset[7:6|2:1|5]
+ uint32_t offsetL5 = (BitFieldExtract(u_offset, 6, 2) << 3) |
+ (BitFieldExtract(u_offset, 1, 2) << 1) |
+ BitFieldExtract(u_offset, 5, 1);
+
+ EmitCB(funct3, BitFieldInsert(offsetL5, offsetH3, 5, 3), rs1_s, opcode);
+ }
+
+ void EmitCBArithmetic(
+ uint32_t funct3, uint32_t funct2, uint32_t imm, XRegister rd_s, uint32_t opcode) {
+ uint32_t imm_5 = BitFieldExtract(imm, 5, 1);
+ uint32_t immH3 = BitFieldInsert(funct2, imm_5, 2, 1);
+ uint32_t immL5 = BitFieldExtract(imm, 0, 5);
+
+ EmitCB(funct3, BitFieldInsert(immL5, immH3, 5, 3), rd_s, opcode);
+ }
+
+ // CJ-type instruction:
+ //
+ // 15 13 12 2 1 0
+ // ---------------------------------
+ // [ . . | . . . . . . . . . . | . ]
+ // [func3 jump target 11 op ]
+ // ---------------------------------
+ //
+ void EmitCJ(uint32_t funct3, int32_t offset, uint32_t opcode) {
+ DCHECK_ALIGNED(offset, 2);
+ DCHECK(IsInt<12>(offset)) << offset;
+ DCHECK(IsUint<3>(funct3));
+ DCHECK(IsUint<2>(opcode));
+
+ uint32_t uoffset = static_cast<uint32_t>(offset);
+ // offset[11|4|9:8|10|6|7|3:1|5]
+ uint32_t jumpt = (BitFieldExtract(uoffset, 11, 1) << 10) |
+ (BitFieldExtract(uoffset, 4, 1) << 9) |
+ (BitFieldExtract(uoffset, 8, 2) << 7) |
+ (BitFieldExtract(uoffset, 10, 1) << 6) |
+ (BitFieldExtract(uoffset, 6, 1) << 5) |
+ (BitFieldExtract(uoffset, 7, 1) << 4) |
+ (BitFieldExtract(uoffset, 1, 3) << 1) |
+ BitFieldExtract(uoffset, 5, 1);
+
+ DCHECK(IsUint<11>(jumpt));
+
+ uint32_t encoding = funct3 << 13 | jumpt << 2 | opcode;
+ Emit16(encoding);
}
ArenaVector<Branch> branches_;
diff --git a/compiler/utils/riscv64/assembler_riscv64_test.cc b/compiler/utils/riscv64/assembler_riscv64_test.cc
index a8c1fbd9be..87c7641576 100644
--- a/compiler/utils/riscv64/assembler_riscv64_test.cc
+++ b/compiler/utils/riscv64/assembler_riscv64_test.cc
@@ -52,33 +52,39 @@ class AssemblerRISCV64Test : public AssemblerTest<Riscv64Assembler,
InstructionSet GetIsa() override { return InstructionSet::kRiscv64; }
- // Clang's assembler takes advantage of certain extensions for emitting constants with `li`
- // but our assembler does not. For now, we use a simple `-march` to avoid the divergence.
- // TODO(riscv64): Implement these more efficient patterns in assembler.
- void SetUseSimpleMarch(bool value) {
- use_simple_march_ = value;
- }
+ class ScopedMarchOverride {
+ public:
+ ScopedMarchOverride(AssemblerRISCV64Test* test, const std::string& march)
+ : test_(test), old_override_(test->march_override_) {
+ test->march_override_ = march;
+ }
+
+ ~ScopedMarchOverride() {
+ test_->march_override_ = old_override_;
+ }
+
+ private:
+ AssemblerRISCV64Test* const test_;
+ std::optional<std::string> const old_override_;
+ };
+
+ class ScopedCSuppression {
+ public:
+ explicit ScopedCSuppression(AssemblerRISCV64Test* test)
+ : smo_(test, "-march=rv64imafdv_zba_zbb") {}
+
+ private:
+ ScopedMarchOverride smo_;
+ };
std::vector<std::string> GetAssemblerCommand() override {
std::vector<std::string> result = Base::GetAssemblerCommand();
- if (use_simple_march_) {
+ if (march_override_.has_value()) {
auto it = std::find_if(result.begin(),
result.end(),
[](const std::string& s) { return StartsWith(s, "-march="); });
CHECK(it != result.end());
- *it = "-march=rv64imafdv";
- }
- return result;
- }
-
- std::vector<std::string> GetDisassemblerCommand() override {
- std::vector<std::string> result = Base::GetDisassemblerCommand();
- if (use_simple_march_) {
- auto it = std::find_if(result.begin(),
- result.end(),
- [](const std::string& s) { return StartsWith(s, "--mattr="); });
- CHECK(it != result.end());
- *it = "--mattr=+F,+D,+A,+V";
+ *it = march_override_.value();
}
return result;
}
@@ -167,6 +173,20 @@ class AssemblerRISCV64Test : public AssemblerTest<Riscv64Assembler,
return ArrayRef<const XRegister>(kXRegisters);
}
+ ArrayRef<const XRegister> GetRegistersShort() {
+ static constexpr XRegister kXRegistersShort[] = {
+ S0,
+ S1,
+ A0,
+ A1,
+ A2,
+ A3,
+ A4,
+ A5,
+ };
+ return ArrayRef<const XRegister>(kXRegistersShort);
+ }
+
ArrayRef<const FRegister> GetFPRegisters() override {
static constexpr FRegister kFRegisters[] = {
FT0,
@@ -213,6 +233,20 @@ class AssemblerRISCV64Test : public AssemblerTest<Riscv64Assembler,
return ArrayRef<const VRegister>(kVRegisters);
}
+ ArrayRef<const FRegister> GetFPRegistersShort() {
+ static constexpr FRegister kFRegistersShort[] = {
+ FS0,
+ FS1,
+ FA0,
+ FA1,
+ FA2,
+ FA3,
+ FA4,
+ FA5,
+ };
+ return ArrayRef<const FRegister>(kFRegistersShort);
+ }
+
std::string GetSecondaryRegisterName(const XRegister& reg) override {
CHECK(secondary_register_names_.find(reg) != secondary_register_names_.end());
return secondary_register_names_[reg];
@@ -954,6 +988,186 @@ class AssemblerRISCV64Test : public AssemblerTest<Riscv64Assembler,
return str;
}
+ template <typename Reg, typename Imm>
+ std::string RepeatCTemplateRegImm(void (Riscv64Assembler::*f)(Reg, Imm),
+ ArrayRef<const Reg> registers,
+ std::string (Base::*GetName)(const Reg&),
+ int imm_bits,
+ int shift,
+ bool no_zero_imm,
+ const std::string& fmt) {
+ auto imms = CreateImmediateValuesBits(abs(imm_bits), /*as_uint=*/imm_bits > 0, shift);
+
+ CHECK(f != nullptr);
+ std::string str;
+ for (Reg reg : registers) {
+ for (int64_t imm_raw : imms) {
+ if (no_zero_imm && imm_raw == 0) {
+ continue;
+ }
+
+ Imm imm = CreateImmediate(imm_raw);
+ (GetAssembler()->*f)(reg, imm);
+
+ std::string base = fmt;
+ ReplaceReg(REG_TOKEN, (this->*GetName)(reg), &base);
+ ReplaceImm(imm, /*bias=*/0, /*multiplier=*/1, &base);
+ str += base;
+ str += "\n";
+ }
+ }
+ return str;
+ }
+
+ template <typename Imm>
+ std::string RepeatCRImm(void (Riscv64Assembler::*f)(XRegister, Imm),
+ bool is_short,
+ bool no_zero_reg,
+ bool no_zero_imm,
+ int imm_bits,
+ int shift,
+ const std::string& fmt) {
+ auto regs = is_short ? GetRegistersShort() : GetRegisters();
+ if (no_zero_reg) {
+ CHECK(!is_short);
+ CHECK_EQ(regs[0], Zero);
+ regs = regs.SubArray(1);
+ }
+ return RepeatCTemplateRegImm(
+ f, regs, &AssemblerRISCV64Test::GetRegisterName, imm_bits, shift, no_zero_imm, fmt);
+ }
+
+ template <typename Imm>
+ std::string RepeatCFImm(void (Riscv64Assembler::*f)(FRegister, Imm),
+ int imm_bits,
+ int shift,
+ const std::string& fmt) {
+ auto regs = GetFPRegisters();
+ return RepeatCTemplateRegImm(
+ f, regs, &AssemblerRISCV64Test::GetFPRegName, imm_bits, shift, /*no_zero_imm=*/false, fmt);
+ }
+
+ template <typename Reg1>
+ std::string RepeatTemplatedShortRegistersImm(void (Riscv64Assembler::*f)(Reg1,
+ XRegister,
+ int32_t),
+ ArrayRef<const Reg1> reg1_registers,
+ std::string (Base::*GetName1)(const Reg1&),
+ int imm_bits,
+ int shift,
+ bool no_zero_imm,
+ const std::string& fmt) {
+ CHECK(f != nullptr);
+ auto imms = CreateImmediateValuesBits(abs(imm_bits), imm_bits > 0, shift);
+ std::string str;
+ for (Reg1 reg1 : reg1_registers) {
+ for (XRegister reg2 : GetRegistersShort()) {
+ for (int64_t imm_raw : imms) {
+ if (no_zero_imm && imm_raw == 0) {
+ continue;
+ }
+
+ int32_t imm = CreateImmediate(imm_raw);
+ (GetAssembler()->*f)(reg1, reg2, imm);
+
+ std::string base = fmt;
+ ReplaceReg(REG1_TOKEN, (this->*GetName1)(reg1), &base);
+ ReplaceReg(REG2_TOKEN, GetRegisterName(reg2), &base);
+ ReplaceImm(imm, /*bias=*/0, /*multiplier=*/1, &base);
+ str += base;
+ str += "\n";
+ }
+ }
+ }
+ return str;
+ }
+
+ std::string RepeatCRRImm(void (Riscv64Assembler::*f)(XRegister, XRegister, int32_t),
+ int imm_bits,
+ int shift,
+ const std::string& fmt) {
+ return RepeatTemplatedShortRegistersImm(f,
+ GetRegistersShort(),
+ &AssemblerRISCV64Test::GetRegisterName,
+ imm_bits,
+ shift,
+ /*no_zero_imm=*/false,
+ fmt);
+ }
+
+ std::string RepeatCFRImm(void (Riscv64Assembler::*f)(FRegister, XRegister, int32_t),
+ int imm_bits,
+ int shift,
+ const std::string& fmt) {
+ return RepeatTemplatedShortRegistersImm(f,
+ GetFPRegistersShort(),
+ &AssemblerRISCV64Test::GetFPRegName,
+ imm_bits,
+ shift,
+ /*no_zero_imm=*/false,
+ fmt);
+ }
+
+ std::string RepeatCRRShort(void (Riscv64Assembler::*f)(XRegister, XRegister),
+ const std::string& fmt) {
+ return RepeatTemplatedRegisters(f,
+ GetRegistersShort(),
+ GetRegistersShort(),
+ &AssemblerRISCV64Test::GetRegisterName,
+ &AssemblerRISCV64Test::GetRegisterName,
+ fmt);
+ }
+
+ std::string RepeatCRRNonZero(void (Riscv64Assembler::*f)(XRegister, XRegister),
+ const std::string& fmt) {
+ auto regs = GetRegisters();
+ CHECK_EQ(regs[0], Zero);
+ auto regs_no_zero = regs.SubArray(1);
+ return RepeatTemplatedRegisters(f,
+ regs_no_zero,
+ regs_no_zero,
+ &AssemblerRISCV64Test::GetRegisterName,
+ &AssemblerRISCV64Test::GetRegisterName,
+ fmt);
+ }
+
+ std::string RepeatCRShort(void (Riscv64Assembler::*f)(XRegister), const std::string& fmt) {
+ return RepeatTemplatedRegister(
+ f, GetRegistersShort(), &AssemblerRISCV64Test::GetRegisterName, fmt);
+ }
+
+ template <typename Imm>
+ std::string RepeatImm(void (Riscv64Assembler::*f)(Imm),
+ bool no_zero_imm,
+ int imm_bits,
+ int shift,
+ const std::string& fmt) {
+ auto imms = CreateImmediateValuesBits(abs(imm_bits), imm_bits > 0, shift);
+ std::string str;
+ for (int64_t imm_raw : imms) {
+ if (no_zero_imm && imm_raw == 0) {
+ continue;
+ }
+
+ Imm imm = CreateImmediate(imm_raw);
+ (GetAssembler()->*f)(imm);
+
+ std::string base = fmt;
+ ReplaceImm(imm, /*bias=*/0, /*multiplier=*/1, &base);
+ str += base;
+ str += "\n";
+ }
+
+ return str;
+ }
+
+ std::string RepeatRNoZero(void (Riscv64Assembler::*f)(XRegister), const std::string& fmt) {
+ auto regs = GetRegisters();
+ CHECK_EQ(regs[0], Zero);
+ return RepeatTemplatedRegister(
+ f, regs.SubArray(1), &AssemblerRISCV64Test::GetRegisterName, fmt);
+ }
+
template <typename Reg1, typename Reg2>
std::string RepeatTemplatedRegistersRoundingMode(
void (Riscv64Assembler::*f)(Reg1, Reg2, FPRoundingMode),
@@ -1973,12 +2187,13 @@ class AssemblerRISCV64Test : public AssemblerTest<Riscv64Assembler,
std::map<XRegister, std::string, RISCV64CpuRegisterCompare> secondary_register_names_;
std::unique_ptr<const Riscv64InstructionSetFeatures> instruction_set_features_;
- bool use_simple_march_ = false;
+ std::optional<std::string> march_override_;
};
TEST_F(AssemblerRISCV64Test, Toolchain) { EXPECT_TRUE(CheckTools()); }
TEST_F(AssemblerRISCV64Test, Lui) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRIb(&Riscv64Assembler::Lui, 20, "lui {reg}, {imm}"), "Lui");
}
@@ -1987,21 +2202,25 @@ TEST_F(AssemblerRISCV64Test, Auipc) {
}
TEST_F(AssemblerRISCV64Test, Jal) {
+ ScopedCSuppression scs(this);
// TODO(riscv64): Change "-19, 2" to "-20, 1" for "C" Standard Extension.
DriverStr(RepeatRIbS(&Riscv64Assembler::Jal, -19, 2, "jal {reg}, {imm}\n"), "Jal");
}
TEST_F(AssemblerRISCV64Test, Jalr) {
+ ScopedCSuppression scs(this);
// TODO(riscv64): Change "-11, 2" to "-12, 1" for "C" Standard Extension.
DriverStr(RepeatRRIb(&Riscv64Assembler::Jalr, -12, "jalr {reg1}, {reg2}, {imm}\n"), "Jalr");
}
TEST_F(AssemblerRISCV64Test, Beq) {
+ ScopedCSuppression scs(this);
// TODO(riscv64): Change "-11, 2" to "-12, 1" for "C" Standard Extension.
DriverStr(RepeatRRIbS(&Riscv64Assembler::Beq, -11, 2, "beq {reg1}, {reg2}, {imm}\n"), "Beq");
}
TEST_F(AssemblerRISCV64Test, Bne) {
+ ScopedCSuppression scs(this);
// TODO(riscv64): Change "-11, 2" to "-12, 1" for "C" Standard Extension.
DriverStr(RepeatRRIbS(&Riscv64Assembler::Bne, -11, 2, "bne {reg1}, {reg2}, {imm}\n"), "Bne");
}
@@ -2027,50 +2246,62 @@ TEST_F(AssemblerRISCV64Test, Bgeu) {
}
TEST_F(AssemblerRISCV64Test, Lb) {
+ // Note: There is no 16-bit instruction for `Lb()`.
DriverStr(RepeatRRIb(&Riscv64Assembler::Lb, -12, "lb {reg1}, {imm}({reg2})"), "Lb");
}
TEST_F(AssemblerRISCV64Test, Lh) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Lh, -12, "lh {reg1}, {imm}({reg2})"), "Lh");
}
TEST_F(AssemblerRISCV64Test, Lw) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Lw, -12, "lw {reg1}, {imm}({reg2})"), "Lw");
}
TEST_F(AssemblerRISCV64Test, Ld) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Ld, -12, "ld {reg1}, {imm}({reg2})"), "Ld");
}
TEST_F(AssemblerRISCV64Test, Lbu) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Lbu, -12, "lbu {reg1}, {imm}({reg2})"), "Lbu");
}
TEST_F(AssemblerRISCV64Test, Lhu) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Lhu, -12, "lhu {reg1}, {imm}({reg2})"), "Lhu");
}
TEST_F(AssemblerRISCV64Test, Lwu) {
+ // Note: There is no 16-bit instruction for `Lwu()`.
DriverStr(RepeatRRIb(&Riscv64Assembler::Lwu, -12, "lwu {reg1}, {imm}({reg2})"), "Lwu");
}
TEST_F(AssemblerRISCV64Test, Sb) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Sb, -12, "sb {reg1}, {imm}({reg2})"), "Sb");
}
TEST_F(AssemblerRISCV64Test, Sh) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Sh, -12, "sh {reg1}, {imm}({reg2})"), "Sh");
}
TEST_F(AssemblerRISCV64Test, Sw) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Sw, -12, "sw {reg1}, {imm}({reg2})"), "Sw");
}
TEST_F(AssemblerRISCV64Test, Sd) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Sd, -12, "sd {reg1}, {imm}({reg2})"), "Sd");
}
TEST_F(AssemblerRISCV64Test, Addi) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Addi, -12, "addi {reg1}, {reg2}, {imm}"), "Addi");
}
@@ -2091,26 +2322,32 @@ TEST_F(AssemblerRISCV64Test, Ori) {
}
TEST_F(AssemblerRISCV64Test, Andi) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Andi, -12, "andi {reg1}, {reg2}, {imm}"), "Andi");
}
TEST_F(AssemblerRISCV64Test, Slli) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Slli, 6, "slli {reg1}, {reg2}, {imm}"), "Slli");
}
TEST_F(AssemblerRISCV64Test, Srli) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Srli, 6, "srli {reg1}, {reg2}, {imm}"), "Srli");
}
TEST_F(AssemblerRISCV64Test, Srai) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Srai, 6, "srai {reg1}, {reg2}, {imm}"), "Srai");
}
TEST_F(AssemblerRISCV64Test, Add) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRR(&Riscv64Assembler::Add, "add {reg1}, {reg2}, {reg3}"), "Add");
}
TEST_F(AssemblerRISCV64Test, Sub) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRR(&Riscv64Assembler::Sub, "sub {reg1}, {reg2}, {reg3}"), "Sub");
}
@@ -2123,14 +2360,17 @@ TEST_F(AssemblerRISCV64Test, Sltu) {
}
TEST_F(AssemblerRISCV64Test, Xor) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRR(&Riscv64Assembler::Xor, "xor {reg1}, {reg2}, {reg3}"), "Xor");
}
TEST_F(AssemblerRISCV64Test, Or) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRR(&Riscv64Assembler::Or, "or {reg1}, {reg2}, {reg3}"), "Or");
}
TEST_F(AssemblerRISCV64Test, And) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRR(&Riscv64Assembler::And, "and {reg1}, {reg2}, {reg3}"), "And");
}
@@ -2147,6 +2387,7 @@ TEST_F(AssemblerRISCV64Test, Sra) {
}
TEST_F(AssemblerRISCV64Test, Addiw) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRIb(&Riscv64Assembler::Addiw, -12, "addiw {reg1}, {reg2}, {imm}"), "Addiw");
}
@@ -2166,10 +2407,12 @@ TEST_F(AssemblerRISCV64Test, Sraiw) {
}
TEST_F(AssemblerRISCV64Test, Addw) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRR(&Riscv64Assembler::Addw, "addw {reg1}, {reg2}, {reg3}"), "Addw");
}
TEST_F(AssemblerRISCV64Test, Subw) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRR(&Riscv64Assembler::Subw, "subw {reg1}, {reg2}, {reg3}"), "Subw");
}
@@ -2191,6 +2434,7 @@ TEST_F(AssemblerRISCV64Test, Ecall) {
}
TEST_F(AssemblerRISCV64Test, Ebreak) {
+ ScopedCSuppression scs(this);
__ Ebreak();
DriverStr("ebreak\n", "Ebreak");
}
@@ -2241,6 +2485,7 @@ TEST_F(AssemblerRISCV64Test, FenceI) {
}
TEST_F(AssemblerRISCV64Test, Mul) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRRR(&Riscv64Assembler::Mul, "mul {reg1}, {reg2}, {reg3}"), "Mul");
}
@@ -2433,18 +2678,22 @@ TEST_F(AssemblerRISCV64Test, Csrrci) {
}
TEST_F(AssemblerRISCV64Test, FLw) {
+ // Note: 16-bit variants of `flw` are not available on riscv64.
DriverStr(RepeatFRIb(&Riscv64Assembler::FLw, -12, "flw {reg1}, {imm}({reg2})"), "FLw");
}
TEST_F(AssemblerRISCV64Test, FLd) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatFRIb(&Riscv64Assembler::FLd, -12, "fld {reg1}, {imm}({reg2})"), "FLw");
}
TEST_F(AssemblerRISCV64Test, FSw) {
+ // Note: 16-bit variants of `fsw` are not available on riscv64.
DriverStr(RepeatFRIb(&Riscv64Assembler::FSw, 2, "fsw {reg1}, {imm}({reg2})"), "FSw");
}
TEST_F(AssemblerRISCV64Test, FSd) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatFRIb(&Riscv64Assembler::FSd, 2, "fsd {reg1}, {imm}({reg2})"), "FSd");
}
@@ -2874,7 +3123,384 @@ TEST_F(AssemblerRISCV64Test, FClassD) {
DriverStr(RepeatrF(&Riscv64Assembler::FClassD, "fclass.d {reg1}, {reg2}"), "FClassD");
}
+TEST_F(AssemblerRISCV64Test, CLwsp) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CLwsp,
+ /*is_short=*/false,
+ /*no_zero_reg=*/true,
+ /*no_zero_imm=*/false,
+ /*imm_bits=*/6,
+ /*shift=*/2,
+ "c.lwsp {reg}, {imm}(sp)"),
+ "CLwsp");
+}
+
+TEST_F(AssemblerRISCV64Test, CLdsp) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CLdsp,
+ /*is_short=*/false,
+ /*no_zero_reg=*/true,
+ /*no_zero_imm=*/false,
+ /*imm_bits=*/6,
+ /*shift=*/3,
+ "c.ldsp {reg}, {imm}(sp)"),
+ "CLdsp");
+}
+
+TEST_F(AssemblerRISCV64Test, CFLdsp) {
+ DriverStr(RepeatCFImm(
+ &Riscv64Assembler::CFLdsp, /*imm_bits=*/6, /*shift=*/3, "c.fldsp {reg}, {imm}(sp)"),
+ "CFLdsp");
+}
+
+TEST_F(AssemblerRISCV64Test, CSwsp) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CSwsp,
+ /*is_short=*/false,
+ /*no_zero_reg=*/false,
+ /*no_zero_imm=*/false,
+ /*imm_bits=*/6,
+ /*shift=*/2,
+ "c.swsp {reg}, {imm}(sp)"),
+ "CLwsp");
+}
+
+TEST_F(AssemblerRISCV64Test, CSdsp) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CSdsp,
+ /*is_short=*/false,
+ /*no_zero_reg=*/false,
+ /*no_zero_imm=*/false,
+ /*imm_bits=*/6,
+ /*shift=*/3,
+ "c.sdsp {reg}, {imm}(sp)"),
+ "CLdsp");
+}
+
+TEST_F(AssemblerRISCV64Test, CFSdsp) {
+ DriverStr(RepeatCFImm(
+ &Riscv64Assembler::CFSdsp, /*imm_bits=*/6, /*shift=*/3, "c.fsdsp {reg}, {imm}(sp)"),
+ "CFLdsp");
+}
+
+TEST_F(AssemblerRISCV64Test, CLw) {
+ DriverStr(RepeatCRRImm(
+ &Riscv64Assembler::CLw, /*imm_bits=*/5, /*shift=*/2, "c.lw {reg1}, {imm}({reg2})"),
+ "CLw");
+}
+
+TEST_F(AssemblerRISCV64Test, CLd) {
+ DriverStr(RepeatCRRImm(
+ &Riscv64Assembler::CLd, /*imm_bits=*/5, /*shift=*/3, "c.ld {reg1}, {imm}({reg2})"),
+ "CLd");
+}
+
+TEST_F(AssemblerRISCV64Test, CFLd) {
+ DriverStr(RepeatCFRImm(&Riscv64Assembler::CFLd,
+ /*imm_bits=*/5,
+ /*shift=*/3,
+ "c.fld {reg1}, {imm}({reg2})"),
+ "CFLd");
+}
+
+TEST_F(AssemblerRISCV64Test, CSw) {
+ DriverStr(RepeatCRRImm(
+ &Riscv64Assembler::CSw, /*imm_bits=*/5, /*shift=*/2, "c.sw {reg1}, {imm}({reg2})"),
+ "CSw");
+}
+
+TEST_F(AssemblerRISCV64Test, CSd) {
+ DriverStr(RepeatCRRImm(
+ &Riscv64Assembler::CSd, /*imm_bits=*/5, /*shift=*/3, "c.sd {reg1}, {imm}({reg2})"),
+ "CSd");
+}
+
+TEST_F(AssemblerRISCV64Test, CFSd) {
+ DriverStr(RepeatCFRImm(&Riscv64Assembler::CFSd,
+ /*imm_bits=*/5,
+ /*shift=*/3,
+ "c.fsd {reg1}, {imm}({reg2})"),
+ "CFSd");
+}
+
+TEST_F(AssemblerRISCV64Test, CLi) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CLi,
+ /*is_short=*/false,
+ /*no_zero_reg=*/true,
+ /*no_zero_imm=*/false,
+ /*imm_bits=*/-6,
+ /*shift=*/0,
+ "c.li {reg}, {imm}"),
+ "CLi");
+}
+
+TEST_F(AssemblerRISCV64Test, CLui) {
+ std::string str;
+ auto imms = CreateImmediateValuesBits(/*imm_bits=*/5, /*as_uint=*/true);
+ for (uint32_t v = 0xfffe0; v <= 0xfffff; ++v) {
+ imms.push_back(v);
+ }
+
+ for (XRegister reg : GetRegisters()) {
+ for (int64_t imm_raw : imms) {
+ if (imm_raw == 0) {
+ continue;
+ }
+
+ if (reg == Zero || reg == SP) {
+ continue;
+ }
+
+ uint32_t imm = CreateImmediate(imm_raw);
+ GetAssembler()->CLui(reg, imm);
+
+ std::string base = "c.lui {reg}, {imm}";
+ ReplaceReg(REG_TOKEN, GetRegisterName(reg), &base);
+ ReplaceImm(imm, /*bias=*/0, /*multiplier=*/1, &base);
+ str += base;
+ str += "\n";
+ }
+ }
+
+ DriverStr(str, "CLui");
+}
+
+TEST_F(AssemblerRISCV64Test, CAddi) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CAddi,
+ /*is_short=*/false,
+ /*no_zero_reg=*/true,
+ /*no_zero_imm=*/true,
+ /*imm_bits=*/-6,
+ /*shift=*/0,
+ "c.addi {reg}, {imm}"),
+ "CAddi");
+}
+
+TEST_F(AssemblerRISCV64Test, CAddiw) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CAddiw,
+ /*is_short=*/false,
+ /*no_zero_reg=*/true,
+ /*no_zero_imm=*/false,
+ /*imm_bits=*/-6,
+ /*shift=*/0,
+ "c.addiw {reg}, {imm}"),
+ "CAddiw");
+}
+
+TEST_F(AssemblerRISCV64Test, CAddi16Sp) {
+ DriverStr(RepeatImm(&Riscv64Assembler::CAddi16Sp,
+ /*no_zero_imm=*/true,
+ /*imm_bits=*/-6,
+ /*shift=*/4,
+ "c.addi16sp sp, {imm}"),
+ "CAddi16Sp");
+}
+
+TEST_F(AssemblerRISCV64Test, CAddi4Spn) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CAddi4Spn,
+ /*is_short=*/true,
+ /*no_zero_reg=*/false,
+ /*no_zero_imm=*/true,
+ /*imm_bits=*/8,
+ /*shift=*/2,
+ "c.addi4spn {reg}, sp, {imm}"),
+ "CAddi4Spn");
+}
+
+TEST_F(AssemblerRISCV64Test, CSlli) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CSlli,
+ /*is_short=*/false,
+ /*no_zero_reg=*/true,
+ /*no_zero_imm=*/true,
+ /*imm_bits=*/6,
+ /*shift=*/0,
+ "c.slli {reg}, {imm}"),
+ "CSlli");
+}
+
+TEST_F(AssemblerRISCV64Test, CSRli) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CSrli,
+ /*is_short=*/true,
+ /*no_zero_reg=*/false,
+ /*no_zero_imm=*/true,
+ /*imm_bits=*/6,
+ /*shift=*/0,
+ "c.srli {reg}, {imm}"),
+ "CSRli");
+}
+
+TEST_F(AssemblerRISCV64Test, CSRai) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CSrai,
+ /*is_short=*/true,
+ /*no_zero_reg=*/false,
+ /*no_zero_imm=*/true,
+ /*imm_bits=*/6,
+ /*shift=*/0,
+ "c.srai {reg}, {imm}"),
+ "CSRai");
+}
+
+TEST_F(AssemblerRISCV64Test, CAndi) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CAndi,
+ /*is_short=*/true,
+ /*no_zero_reg=*/false,
+ /*no_zero_imm=*/false,
+ /*imm_bits=*/-6,
+ /*shift=*/0,
+ "c.andi {reg}, {imm}"),
+ "CAndi");
+}
+
+TEST_F(AssemblerRISCV64Test, CMv) {
+ DriverStr(RepeatCRRNonZero(&Riscv64Assembler::CMv, "c.mv {reg1}, {reg2}"), "CMv");
+}
+
+TEST_F(AssemblerRISCV64Test, CAdd) {
+ DriverStr(RepeatCRRNonZero(&Riscv64Assembler::CAdd, "c.add {reg1}, {reg2}"), "CAdd");
+}
+
+TEST_F(AssemblerRISCV64Test, CAnd) {
+ DriverStr(RepeatCRRShort(&Riscv64Assembler::CAnd, "c.and {reg1}, {reg2}"), "CAnd");
+}
+
+TEST_F(AssemblerRISCV64Test, COr) {
+ DriverStr(RepeatCRRShort(&Riscv64Assembler::COr, "c.or {reg1}, {reg2}"), "COr");
+}
+
+TEST_F(AssemblerRISCV64Test, CXor) {
+ DriverStr(RepeatCRRShort(&Riscv64Assembler::CXor, "c.xor {reg1}, {reg2}"), "CXor");
+}
+
+TEST_F(AssemblerRISCV64Test, CSub) {
+ DriverStr(RepeatCRRShort(&Riscv64Assembler::CSub, "c.sub {reg1}, {reg2}"), "CSub");
+}
+
+TEST_F(AssemblerRISCV64Test, CAddw) {
+ DriverStr(RepeatCRRShort(&Riscv64Assembler::CAddw, "c.addw {reg1}, {reg2}"), "CAddw");
+}
+
+TEST_F(AssemblerRISCV64Test, CSubw) {
+ DriverStr(RepeatCRRShort(&Riscv64Assembler::CSubw, "c.subw {reg1}, {reg2}"), "CSubw");
+}
+
+TEST_F(AssemblerRISCV64Test, CLbu) {
+ DriverStr(RepeatCRRImm(&Riscv64Assembler::CLbu,
+ /*imm_bits=*/2,
+ /*shift=*/0,
+ "c.lbu {reg1}, {imm}({reg2})"),
+ "CLbu");
+}
+
+TEST_F(AssemblerRISCV64Test, CLhu) {
+ DriverStr(RepeatCRRImm(&Riscv64Assembler::CLhu,
+ /*imm_bits=*/1,
+ /*shift=*/1,
+ "c.lhu {reg1}, {imm}({reg2})"),
+ "CLhu");
+}
+
+TEST_F(AssemblerRISCV64Test, CLh) {
+ DriverStr(RepeatCRRImm(&Riscv64Assembler::CLh,
+ /*imm_bits=*/1,
+ /*shift=*/1,
+ "c.lh {reg1}, {imm}({reg2})"),
+ "CLh");
+}
+
+TEST_F(AssemblerRISCV64Test, CSb) {
+ DriverStr(RepeatCRRImm(&Riscv64Assembler::CSb,
+ /*imm_bits=*/2,
+ /*shift=*/0,
+ "c.sb {reg1}, {imm}({reg2})"),
+ "CSb");
+}
+
+TEST_F(AssemblerRISCV64Test, CSh) {
+ DriverStr(RepeatCRRImm(&Riscv64Assembler::CSh,
+ /*imm_bits=*/1,
+ /*shift=*/1,
+ "c.sh {reg1}, {imm}({reg2})"),
+ "CSh");
+}
+
+TEST_F(AssemblerRISCV64Test, CZext_b) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CZext_b, "c.zext.b {reg}"), "CZext_b");
+}
+
+TEST_F(AssemblerRISCV64Test, CSext_b) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CSext_b, "c.sext.b {reg}"), "CSext_b");
+}
+
+TEST_F(AssemblerRISCV64Test, CZext_h) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CZext_h, "c.zext.h {reg}"), "CZext_h");
+}
+
+TEST_F(AssemblerRISCV64Test, CSext_h) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CSext_h, "c.sext.h {reg}"), "CSext_h");
+}
+
+TEST_F(AssemblerRISCV64Test, CZext_w) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CZext_w, "c.zext.w {reg}"), "CZext_w");
+}
+
+TEST_F(AssemblerRISCV64Test, CNot) {
+ DriverStr(RepeatCRShort(&Riscv64Assembler::CNot, "c.not {reg}"), "CNot");
+}
+
+TEST_F(AssemblerRISCV64Test, CMul) {
+ DriverStr(RepeatCRRShort(&Riscv64Assembler::CMul, "c.mul {reg1}, {reg2}"), "CMul");
+}
+
+TEST_F(AssemblerRISCV64Test, CJ) {
+ DriverStr(
+ RepeatImm(
+ &Riscv64Assembler::CJ, /*no_zero_imm=*/false, /*imm_bits=*/-11, /*shift=*/1, "c.j {imm}"),
+ "CJ");
+}
+
+TEST_F(AssemblerRISCV64Test, CJr) {
+ DriverStr(RepeatRNoZero(&Riscv64Assembler::CJr, "c.jr {reg}"), "CJr");
+}
+
+TEST_F(AssemblerRISCV64Test, CJalr) {
+ DriverStr(RepeatRNoZero(&Riscv64Assembler::CJalr, "c.jalr {reg}"), "CJalr");
+}
+
+TEST_F(AssemblerRISCV64Test, CBeqz) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CBeqz,
+ /*is_short=*/true,
+ /*no_zero_reg=*/false,
+ /*no_zero_imm=*/false,
+ /*imm_bits=*/-8,
+ /*shift=*/1,
+ "c.beqz {reg}, {imm}"),
+ "CBeqz");
+}
+
+TEST_F(AssemblerRISCV64Test, CBnez) {
+ DriverStr(RepeatCRImm(&Riscv64Assembler::CBnez,
+ /*is_short=*/true,
+ /*no_zero_reg=*/false,
+ /*no_zero_imm=*/false,
+ /*imm_bits=*/-8,
+ /*shift=*/1,
+ "c.bnez {reg}, {imm}"),
+ "CBnez");
+}
+
+TEST_F(AssemblerRISCV64Test, CEbreak) {
+ __ CEbreak();
+ DriverStr("c.ebreak", "CEbreak");
+}
+
+TEST_F(AssemblerRISCV64Test, CNop) {
+ __ CNop();
+ DriverStr("c.nop", "CNop");
+}
+
+TEST_F(AssemblerRISCV64Test, CUnimp) {
+ __ CUnimp();
+ DriverStr("c.unimp", "CUnimp");
+}
+
TEST_F(AssemblerRISCV64Test, AddUw) {
+ ScopedCSuppression scs(this); // Avoid `c.zext.w`.
DriverStr(RepeatRRR(&Riscv64Assembler::AddUw, "add.uw {reg1}, {reg2}, {reg3}"), "AddUw");
}
@@ -2991,6 +3617,21 @@ TEST_F(AssemblerRISCV64Test, Rev8) {
DriverStr(RepeatRR(&Riscv64Assembler::Rev8, "rev8 {reg1}, {reg2}"), "Rev8");
}
+TEST_F(AssemblerRISCV64Test, ZbbSextB) {
+ ScopedCSuppression scs(this);
+ DriverStr(RepeatRR(&Riscv64Assembler::ZbbSextB, "sext.b {reg1}, {reg2}"), "ZbbSextB");
+}
+
+TEST_F(AssemblerRISCV64Test, ZbbSextH) {
+ ScopedCSuppression scs(this);
+ DriverStr(RepeatRR(&Riscv64Assembler::ZbbSextH, "sext.h {reg1}, {reg2}"), "ZbbSextH");
+}
+
+TEST_F(AssemblerRISCV64Test, ZbbZextH) {
+ ScopedCSuppression scs(this);
+ DriverStr(RepeatRR(&Riscv64Assembler::ZbbZextH, "zext.h {reg1}, {reg2}"), "ZbbZextH");
+}
+
// Vector Instructions
TEST_F(AssemblerRISCV64Test, VSetvl) {
@@ -7203,22 +7844,25 @@ TEST_F(AssemblerRISCV64Test, VId_v) {
// Pseudo instructions.
TEST_F(AssemblerRISCV64Test, Nop) {
+ ScopedCSuppression scs(this);
__ Nop();
DriverStr("addi zero,zero,0", "Nop");
}
TEST_F(AssemblerRISCV64Test, Li) {
- SetUseSimpleMarch(true);
+ ScopedMarchOverride smo(this, "-march=rv64imafd");
TestLoadConst64("Li",
/*can_use_tmp=*/ false,
[&](XRegister rd, int64_t value) { __ Li(rd, value); });
}
TEST_F(AssemblerRISCV64Test, Mv) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRR(&Riscv64Assembler::Mv, "addi {reg1}, {reg2}, 0"), "Mv");
}
TEST_F(AssemblerRISCV64Test, Not) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRR(&Riscv64Assembler::Not, "xori {reg1}, {reg2}, -1"), "Not");
}
@@ -7231,6 +7875,7 @@ TEST_F(AssemblerRISCV64Test, NegW) {
}
TEST_F(AssemblerRISCV64Test, SextB) {
+ ScopedCSuppression scs(this);
// Note: SEXT.B from the Zbb extension is not supported.
DriverStr(RepeatRR(&Riscv64Assembler::SextB,
"slli {reg1}, {reg2}, 56\n"
@@ -7239,6 +7884,7 @@ TEST_F(AssemblerRISCV64Test, SextB) {
}
TEST_F(AssemblerRISCV64Test, SextH) {
+ ScopedCSuppression scs(this);
// Note: SEXT.H from the Zbb extension is not supported.
DriverStr(RepeatRR(&Riscv64Assembler::SextH,
"slli {reg1}, {reg2}, 48\n"
@@ -7247,14 +7893,17 @@ TEST_F(AssemblerRISCV64Test, SextH) {
}
TEST_F(AssemblerRISCV64Test, SextW) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRR(&Riscv64Assembler::SextW, "addiw {reg1}, {reg2}, 0\n"), "SextW");
}
TEST_F(AssemblerRISCV64Test, ZextB) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRR(&Riscv64Assembler::ZextB, "andi {reg1}, {reg2}, 255"), "ZextB");
}
TEST_F(AssemblerRISCV64Test, ZextH) {
+ ScopedCSuppression scs(this);
// Note: ZEXT.H from the Zbb extension is not supported.
DriverStr(RepeatRR(&Riscv64Assembler::ZextH,
"slli {reg1}, {reg2}, 48\n"
@@ -7263,6 +7912,7 @@ TEST_F(AssemblerRISCV64Test, ZextH) {
}
TEST_F(AssemblerRISCV64Test, ZextW) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRR(&Riscv64Assembler::ZextW,
"slli {reg1}, {reg2}, 32\n"
"srli {reg1}, {reg1}, 32"),
@@ -7310,11 +7960,13 @@ TEST_F(AssemblerRISCV64Test, FNegD) {
}
TEST_F(AssemblerRISCV64Test, Beqz) {
+ ScopedCSuppression scs(this);
// TODO(riscv64): Change "-11, 2" to "-12, 1" for "C" Standard Extension.
DriverStr(RepeatRIbS(&Riscv64Assembler::Beqz, -11, 2, "beq {reg}, zero, {imm}\n"), "Beqz");
}
TEST_F(AssemblerRISCV64Test, Bnez) {
+ ScopedCSuppression scs(this);
// TODO(riscv64): Change "-11, 2" to "-12, 1" for "C" Standard Extension.
DriverStr(RepeatRIbS(&Riscv64Assembler::Bnez, -11, 2, "bne {reg}, zero, {imm}\n"), "Bnez");
}
@@ -7360,6 +8012,7 @@ TEST_F(AssemblerRISCV64Test, Bleu) {
}
TEST_F(AssemblerRISCV64Test, J) {
+ ScopedCSuppression scs(this);
// TODO(riscv64): Change "-19, 2" to "-20, 1" for "C" Standard Extension.
DriverStr(RepeatIbS<int32_t>(&Riscv64Assembler::J, -19, 2, "j {imm}\n"), "J");
}
@@ -7370,18 +8023,22 @@ TEST_F(AssemblerRISCV64Test, JalRA) {
}
TEST_F(AssemblerRISCV64Test, Jr) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatR(&Riscv64Assembler::Jr, "jr {reg}\n"), "Jr");
}
TEST_F(AssemblerRISCV64Test, JalrRA) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatR(&Riscv64Assembler::Jalr, "jalr {reg}\n"), "JalrRA");
}
TEST_F(AssemblerRISCV64Test, Jalr0) {
+ ScopedCSuppression scs(this);
DriverStr(RepeatRR(&Riscv64Assembler::Jalr, "jalr {reg1}, {reg2}\n"), "Jalr0");
}
TEST_F(AssemblerRISCV64Test, Ret) {
+ ScopedCSuppression scs(this);
__ Ret();
DriverStr("ret\n", "Ret");
}
@@ -7434,6 +8091,7 @@ TEST_F(AssemblerRISCV64Test, Csrci) {
}
TEST_F(AssemblerRISCV64Test, LoadConst32) {
+ ScopedCSuppression scs(this);
// `LoadConst32()` emits the same code sequences as `Li()` for 32-bit values.
ScratchRegisterScope srs(GetAssembler());
srs.ExcludeXRegister(TMP);
@@ -7442,13 +8100,14 @@ TEST_F(AssemblerRISCV64Test, LoadConst32) {
}
TEST_F(AssemblerRISCV64Test, LoadConst64) {
- SetUseSimpleMarch(true);
+ ScopedMarchOverride smo(this, "-march=rv64imafd");
TestLoadConst64("LoadConst64",
/*can_use_tmp=*/ true,
[&](XRegister rd, int64_t value) { __ LoadConst64(rd, value); });
}
TEST_F(AssemblerRISCV64Test, AddConst32) {
+ ScopedCSuppression scs(this);
auto emit_op = [&](XRegister rd, XRegister rs1, int64_t value) {
__ AddConst32(rd, rs1, dchecked_integral_cast<int32_t>(value));
};
@@ -7456,7 +8115,7 @@ TEST_F(AssemblerRISCV64Test, AddConst32) {
}
TEST_F(AssemblerRISCV64Test, AddConst64) {
- SetUseSimpleMarch(true);
+ ScopedMarchOverride smo(this, "-march=rv64imafd");
auto emit_op = [&](XRegister rd, XRegister rs1, int64_t value) {
__ AddConst64(rd, rs1, value);
};
@@ -7464,38 +8123,47 @@ TEST_F(AssemblerRISCV64Test, AddConst64) {
}
TEST_F(AssemblerRISCV64Test, BcondForward3KiB) {
+ ScopedCSuppression scs(this);
TestBcondForward("BcondForward3KiB", 3 * KB, "1", GetPrintBcond());
}
TEST_F(AssemblerRISCV64Test, BcondForward3KiBBare) {
+ ScopedCSuppression scs(this);
TestBcondForward("BcondForward3KiB", 3 * KB, "1", GetPrintBcond(), /*is_bare=*/ true);
}
TEST_F(AssemblerRISCV64Test, BcondBackward3KiB) {
+ ScopedCSuppression scs(this);
TestBcondBackward("BcondBackward3KiB", 3 * KB, "1", GetPrintBcond());
}
TEST_F(AssemblerRISCV64Test, BcondBackward3KiBBare) {
+ ScopedCSuppression scs(this);
TestBcondBackward("BcondBackward3KiB", 3 * KB, "1", GetPrintBcond(), /*is_bare=*/ true);
}
TEST_F(AssemblerRISCV64Test, BcondForward5KiB) {
+ ScopedCSuppression scs(this);
TestBcondForward("BcondForward5KiB", 5 * KB, "1", GetPrintBcondOppositeAndJ("2"));
}
TEST_F(AssemblerRISCV64Test, BcondBackward5KiB) {
+ ScopedCSuppression scs(this);
TestBcondBackward("BcondBackward5KiB", 5 * KB, "1", GetPrintBcondOppositeAndJ("2"));
}
TEST_F(AssemblerRISCV64Test, BcondForward2MiB) {
+ ScopedCSuppression scs(this);
TestBcondForward("BcondForward2MiB", 2 * MB, "1", GetPrintBcondOppositeAndTail("2", "3"));
}
TEST_F(AssemblerRISCV64Test, BcondBackward2MiB) {
+ ScopedCSuppression scs(this);
TestBcondBackward("BcondBackward2MiB", 2 * MB, "1", GetPrintBcondOppositeAndTail("2", "3"));
}
TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset13Forward) {
+ ScopedCSuppression scs(this);
TestBeqA0A1Forward("BeqA0A1MaxOffset13Forward",
MaxOffset13ForwardDistance() - /*BEQ*/ 4u,
"1",
@@ -7503,6 +8171,7 @@ TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset13Forward) {
}
TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset13ForwardBare) {
+ ScopedCSuppression scs(this);
TestBeqA0A1Forward("BeqA0A1MaxOffset13ForwardBare",
MaxOffset13ForwardDistance() - /*BEQ*/ 4u,
"1",
@@ -7511,6 +8180,7 @@ TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset13ForwardBare) {
}
TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset13Backward) {
+ ScopedCSuppression scs(this);
TestBeqA0A1Backward("BeqA0A1MaxOffset13Forward",
MaxOffset13BackwardDistance(),
"1",
@@ -7518,6 +8188,7 @@ TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset13Backward) {
}
TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset13BackwardBare) {
+ ScopedCSuppression scs(this);
TestBeqA0A1Backward("BeqA0A1MaxOffset13ForwardBare",
MaxOffset13BackwardDistance(),
"1",
@@ -7526,6 +8197,7 @@ TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset13BackwardBare) {
}
TEST_F(AssemblerRISCV64Test, BeqA0A1OverMaxOffset13Forward) {
+ ScopedCSuppression scs(this);
TestBeqA0A1Forward("BeqA0A1OverMaxOffset13Forward",
MaxOffset13ForwardDistance() - /*BEQ*/ 4u + /*Exceed max*/ 4u,
"1",
@@ -7533,6 +8205,7 @@ TEST_F(AssemblerRISCV64Test, BeqA0A1OverMaxOffset13Forward) {
}
TEST_F(AssemblerRISCV64Test, BeqA0A1OverMaxOffset13Backward) {
+ ScopedCSuppression scs(this);
TestBeqA0A1Backward("BeqA0A1OverMaxOffset13Forward",
MaxOffset13BackwardDistance() + /*Exceed max*/ 4u,
"1",
@@ -7540,6 +8213,7 @@ TEST_F(AssemblerRISCV64Test, BeqA0A1OverMaxOffset13Backward) {
}
TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset21Forward) {
+ ScopedCSuppression scs(this);
TestBeqA0A1Forward("BeqA0A1MaxOffset21Forward",
MaxOffset21ForwardDistance() - /*J*/ 4u,
"1",
@@ -7547,6 +8221,7 @@ TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset21Forward) {
}
TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset21Backward) {
+ ScopedCSuppression scs(this);
TestBeqA0A1Backward("BeqA0A1MaxOffset21Backward",
MaxOffset21BackwardDistance() - /*BNE*/ 4u,
"1",
@@ -7554,6 +8229,7 @@ TEST_F(AssemblerRISCV64Test, BeqA0A1MaxOffset21Backward) {
}
TEST_F(AssemblerRISCV64Test, BeqA0A1OverMaxOffset21Forward) {
+ ScopedCSuppression scs(this);
TestBeqA0A1Forward("BeqA0A1OverMaxOffset21Forward",
MaxOffset21ForwardDistance() - /*J*/ 4u + /*Exceed max*/ 4u,
"1",
@@ -7561,6 +8237,7 @@ TEST_F(AssemblerRISCV64Test, BeqA0A1OverMaxOffset21Forward) {
}
TEST_F(AssemblerRISCV64Test, BeqA0A1OverMaxOffset21Backward) {
+ ScopedCSuppression scs(this);
TestBeqA0A1Backward("BeqA0A1OverMaxOffset21Backward",
MaxOffset21BackwardDistance() - /*BNE*/ 4u + /*Exceed max*/ 4u,
"1",
@@ -7568,15 +8245,18 @@ TEST_F(AssemblerRISCV64Test, BeqA0A1OverMaxOffset21Backward) {
}
TEST_F(AssemblerRISCV64Test, BeqA0A1AlmostCascade) {
+ ScopedCSuppression scs(this);
TestBeqA0A1MaybeCascade("BeqA0A1AlmostCascade", /*cascade=*/ false, GetPrintBcond());
}
TEST_F(AssemblerRISCV64Test, BeqA0A1Cascade) {
+ ScopedCSuppression scs(this);
TestBeqA0A1MaybeCascade(
"BeqA0A1AlmostCascade", /*cascade=*/ true, GetPrintBcondOppositeAndJ("1"));
}
TEST_F(AssemblerRISCV64Test, BcondElimination) {
+ ScopedCSuppression scs(this);
Riscv64Label label;
__ Bind(&label);
__ Nop();
@@ -7591,6 +8271,7 @@ TEST_F(AssemblerRISCV64Test, BcondElimination) {
}
TEST_F(AssemblerRISCV64Test, BcondUnconditional) {
+ ScopedCSuppression scs(this);
Riscv64Label label;
__ Bind(&label);
__ Nop();
@@ -7609,54 +8290,67 @@ TEST_F(AssemblerRISCV64Test, BcondUnconditional) {
}
TEST_F(AssemblerRISCV64Test, JalRdForward3KiB) {
+ ScopedCSuppression scs(this);
TestJalRdForward("JalRdForward3KiB", 3 * KB, "1", GetPrintJalRd());
}
TEST_F(AssemblerRISCV64Test, JalRdForward3KiBBare) {
+ ScopedCSuppression scs(this);
TestJalRdForward("JalRdForward3KiB", 3 * KB, "1", GetPrintJalRd(), /*is_bare=*/ true);
}
TEST_F(AssemblerRISCV64Test, JalRdBackward3KiB) {
+ ScopedCSuppression scs(this);
TestJalRdBackward("JalRdBackward3KiB", 3 * KB, "1", GetPrintJalRd());
}
TEST_F(AssemblerRISCV64Test, JalRdBackward3KiBBare) {
+ ScopedCSuppression scs(this);
TestJalRdBackward("JalRdBackward3KiB", 3 * KB, "1", GetPrintJalRd(), /*is_bare=*/ true);
}
TEST_F(AssemblerRISCV64Test, JalRdForward2MiB) {
+ ScopedCSuppression scs(this);
TestJalRdForward("JalRdForward2MiB", 2 * MB, "1", GetPrintCallRd("2"));
}
TEST_F(AssemblerRISCV64Test, JalRdBackward2MiB) {
+ ScopedCSuppression scs(this);
TestJalRdBackward("JalRdBackward2MiB", 2 * MB, "1", GetPrintCallRd("2"));
}
TEST_F(AssemblerRISCV64Test, JForward3KiB) {
+ ScopedCSuppression scs(this);
TestBuncondForward("JForward3KiB", 3 * KB, "1", GetEmitJ(), GetPrintJ());
}
TEST_F(AssemblerRISCV64Test, JForward3KiBBare) {
+ ScopedCSuppression scs(this);
TestBuncondForward("JForward3KiB", 3 * KB, "1", GetEmitJ(/*is_bare=*/ true), GetPrintJ());
}
TEST_F(AssemblerRISCV64Test, JBackward3KiB) {
+ ScopedCSuppression scs(this);
TestBuncondBackward("JBackward3KiB", 3 * KB, "1", GetEmitJ(), GetPrintJ());
}
TEST_F(AssemblerRISCV64Test, JBackward3KiBBare) {
+ ScopedCSuppression scs(this);
TestBuncondBackward("JBackward3KiB", 3 * KB, "1", GetEmitJ(/*is_bare=*/ true), GetPrintJ());
}
TEST_F(AssemblerRISCV64Test, JForward2MiB) {
+ ScopedCSuppression scs(this);
TestBuncondForward("JForward2MiB", 2 * MB, "1", GetEmitJ(), GetPrintTail("2"));
}
TEST_F(AssemblerRISCV64Test, JBackward2MiB) {
+ ScopedCSuppression scs(this);
TestBuncondBackward("JBackward2MiB", 2 * MB, "1", GetEmitJ(), GetPrintTail("2"));
}
TEST_F(AssemblerRISCV64Test, JMaxOffset21Forward) {
+ ScopedCSuppression scs(this);
TestBuncondForward("JMaxOffset21Forward",
MaxOffset21ForwardDistance() - /*J*/ 4u,
"1",
@@ -7665,6 +8359,7 @@ TEST_F(AssemblerRISCV64Test, JMaxOffset21Forward) {
}
TEST_F(AssemblerRISCV64Test, JMaxOffset21ForwardBare) {
+ ScopedCSuppression scs(this);
TestBuncondForward("JMaxOffset21Forward",
MaxOffset21ForwardDistance() - /*J*/ 4u,
"1",
@@ -7673,6 +8368,7 @@ TEST_F(AssemblerRISCV64Test, JMaxOffset21ForwardBare) {
}
TEST_F(AssemblerRISCV64Test, JMaxOffset21Backward) {
+ ScopedCSuppression scs(this);
TestBuncondBackward("JMaxOffset21Backward",
MaxOffset21BackwardDistance(),
"1",
@@ -7681,6 +8377,7 @@ TEST_F(AssemblerRISCV64Test, JMaxOffset21Backward) {
}
TEST_F(AssemblerRISCV64Test, JMaxOffset21BackwardBare) {
+ ScopedCSuppression scs(this);
TestBuncondBackward("JMaxOffset21Backward",
MaxOffset21BackwardDistance(),
"1",
@@ -7689,6 +8386,7 @@ TEST_F(AssemblerRISCV64Test, JMaxOffset21BackwardBare) {
}
TEST_F(AssemblerRISCV64Test, JOverMaxOffset21Forward) {
+ ScopedCSuppression scs(this);
TestBuncondForward("JOverMaxOffset21Forward",
MaxOffset21ForwardDistance() - /*J*/ 4u + /*Exceed max*/ 4u,
"1",
@@ -7697,6 +8395,7 @@ TEST_F(AssemblerRISCV64Test, JOverMaxOffset21Forward) {
}
TEST_F(AssemblerRISCV64Test, JOverMaxOffset21Backward) {
+ ScopedCSuppression scs(this);
TestBuncondBackward("JMaxOffset21Backward",
MaxOffset21BackwardDistance() + /*Exceed max*/ 4u,
"1",
@@ -7705,22 +8404,27 @@ TEST_F(AssemblerRISCV64Test, JOverMaxOffset21Backward) {
}
TEST_F(AssemblerRISCV64Test, CallForward3KiB) {
+ ScopedCSuppression scs(this);
TestBuncondForward("CallForward3KiB", 3 * KB, "1", GetEmitJal(), GetPrintJal());
}
TEST_F(AssemblerRISCV64Test, CallBackward3KiB) {
+ ScopedCSuppression scs(this);
TestBuncondBackward("CallBackward3KiB", 3 * KB, "1", GetEmitJal(), GetPrintJal());
}
TEST_F(AssemblerRISCV64Test, CallForward2MiB) {
+ ScopedCSuppression scs(this);
TestBuncondForward("CallForward2MiB", 2 * MB, "1", GetEmitJal(), GetPrintCall("2"));
}
TEST_F(AssemblerRISCV64Test, CallBackward2MiB) {
+ ScopedCSuppression scs(this);
TestBuncondBackward("CallBackward2MiB", 2 * MB, "1", GetEmitJal(), GetPrintCall("2"));
}
TEST_F(AssemblerRISCV64Test, CallMaxOffset21Forward) {
+ ScopedCSuppression scs(this);
TestBuncondForward("CallMaxOffset21Forward",
MaxOffset21ForwardDistance() - /*J*/ 4u,
"1",
@@ -7729,6 +8433,7 @@ TEST_F(AssemblerRISCV64Test, CallMaxOffset21Forward) {
}
TEST_F(AssemblerRISCV64Test, CallMaxOffset21Backward) {
+ ScopedCSuppression scs(this);
TestBuncondBackward("CallMaxOffset21Backward",
MaxOffset21BackwardDistance(),
"1",
@@ -7737,6 +8442,7 @@ TEST_F(AssemblerRISCV64Test, CallMaxOffset21Backward) {
}
TEST_F(AssemblerRISCV64Test, CallOverMaxOffset21Forward) {
+ ScopedCSuppression scs(this);
TestBuncondForward("CallOverMaxOffset21Forward",
MaxOffset21ForwardDistance() - /*J*/ 4u + /*Exceed max*/ 4u,
"1",
@@ -7745,6 +8451,7 @@ TEST_F(AssemblerRISCV64Test, CallOverMaxOffset21Forward) {
}
TEST_F(AssemblerRISCV64Test, CallOverMaxOffset21Backward) {
+ ScopedCSuppression scs(this);
TestBuncondBackward("CallMaxOffset21Backward",
MaxOffset21BackwardDistance() + /*Exceed max*/ 4u,
"1",
@@ -7753,66 +8460,82 @@ TEST_F(AssemblerRISCV64Test, CallOverMaxOffset21Backward) {
}
TEST_F(AssemblerRISCV64Test, Loadb) {
+ ScopedCSuppression scs(this); // Suppress 16-bit instructions for address formation.
TestLoadStoreArbitraryOffset("Loadb", "lb", &Riscv64Assembler::Loadb, /*is_store=*/ false);
}
TEST_F(AssemblerRISCV64Test, Loadh) {
+ ScopedCSuppression scs(this);
TestLoadStoreArbitraryOffset("Loadh", "lh", &Riscv64Assembler::Loadh, /*is_store=*/ false);
}
TEST_F(AssemblerRISCV64Test, Loadw) {
+ ScopedCSuppression scs(this);
TestLoadStoreArbitraryOffset("Loadw", "lw", &Riscv64Assembler::Loadw, /*is_store=*/ false);
}
TEST_F(AssemblerRISCV64Test, Loadd) {
+ ScopedCSuppression scs(this);
TestLoadStoreArbitraryOffset("Loadd", "ld", &Riscv64Assembler::Loadd, /*is_store=*/ false);
}
TEST_F(AssemblerRISCV64Test, Loadbu) {
+ ScopedCSuppression scs(this);
TestLoadStoreArbitraryOffset("Loadbu", "lbu", &Riscv64Assembler::Loadbu, /*is_store=*/ false);
}
TEST_F(AssemblerRISCV64Test, Loadhu) {
+ ScopedCSuppression scs(this);
TestLoadStoreArbitraryOffset("Loadhu", "lhu", &Riscv64Assembler::Loadhu, /*is_store=*/ false);
}
TEST_F(AssemblerRISCV64Test, Loadwu) {
+ ScopedCSuppression scs(this); // Suppress 16-bit instructions for address formation.
TestLoadStoreArbitraryOffset("Loadwu", "lwu", &Riscv64Assembler::Loadwu, /*is_store=*/ false);
}
TEST_F(AssemblerRISCV64Test, Storeb) {
+ ScopedCSuppression scs(this);
TestLoadStoreArbitraryOffset("Storeb", "sb", &Riscv64Assembler::Storeb, /*is_store=*/ true);
}
TEST_F(AssemblerRISCV64Test, Storeh) {
+ ScopedCSuppression scs(this);
TestLoadStoreArbitraryOffset("Storeh", "sh", &Riscv64Assembler::Storeh, /*is_store=*/ true);
}
TEST_F(AssemblerRISCV64Test, Storew) {
+ ScopedCSuppression scs(this);
TestLoadStoreArbitraryOffset("Storew", "sw", &Riscv64Assembler::Storew, /*is_store=*/ true);
}
TEST_F(AssemblerRISCV64Test, Stored) {
+ ScopedCSuppression scs(this);
TestLoadStoreArbitraryOffset("Stored", "sd", &Riscv64Assembler::Stored, /*is_store=*/ true);
}
TEST_F(AssemblerRISCV64Test, FLoadw) {
+ ScopedCSuppression scs(this); // Suppress 16-bit instructions for address formation.
TestFPLoadStoreArbitraryOffset("FLoadw", "flw", &Riscv64Assembler::FLoadw);
}
TEST_F(AssemblerRISCV64Test, FLoadd) {
+ ScopedCSuppression scs(this);
TestFPLoadStoreArbitraryOffset("FLoadd", "fld", &Riscv64Assembler::FLoadd);
}
TEST_F(AssemblerRISCV64Test, FStorew) {
+ ScopedCSuppression scs(this); // Suppress 16-bit instructions for address formation.
TestFPLoadStoreArbitraryOffset("FStorew", "fsw", &Riscv64Assembler::FStorew);
}
TEST_F(AssemblerRISCV64Test, FStored) {
+ ScopedCSuppression scs(this);
TestFPLoadStoreArbitraryOffset("FStored", "fsd", &Riscv64Assembler::FStored);
}
TEST_F(AssemblerRISCV64Test, Unimp) {
+ ScopedCSuppression scs(this);
__ Unimp();
DriverStr("unimp\n", "Unimp");
}
@@ -7842,14 +8565,17 @@ TEST_F(AssemblerRISCV64Test, LoadLabelAddress) {
}
TEST_F(AssemblerRISCV64Test, LoadLiteralWithPaddingForLong) {
+ ScopedCSuppression scs(this);
TestLoadLiteral("LoadLiteralWithPaddingForLong", /*with_padding_for_long=*/ true);
}
TEST_F(AssemblerRISCV64Test, LoadLiteralWithoutPaddingForLong) {
+ ScopedCSuppression scs(this);
TestLoadLiteral("LoadLiteralWithoutPaddingForLong", /*with_padding_for_long=*/ false);
}
TEST_F(AssemblerRISCV64Test, JumpTable) {
+ ScopedCSuppression scs(this);
std::string expected;
expected += EmitNops(sizeof(uint32_t));
Riscv64Label targets[4];
diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc b/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
index 9d3a29d252..00e1f54d03 100644
--- a/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
+++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64.cc
@@ -548,7 +548,7 @@ void Riscv64JNIMacroAssembler::DeliverPendingException() {
}
std::unique_ptr<JNIMacroLabel> Riscv64JNIMacroAssembler::CreateLabel() {
- return std::unique_ptr<JNIMacroLabel>(new Riscv64JNIMacroLabel());
+ return std::unique_ptr<JNIMacroLabel>(new (asm_.GetAllocator()) Riscv64JNIMacroLabel());
}
void Riscv64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
diff --git a/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc b/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc
index be6feeb9de..9717930d58 100644
--- a/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc
+++ b/compiler/utils/riscv64/jni_macro_assembler_riscv64_test.cc
@@ -46,6 +46,18 @@ class JniMacroAssemblerRiscv64Test : public AssemblerTestBase {
protected:
InstructionSet GetIsa() override { return InstructionSet::kRiscv64; }
+ std::vector<std::string> GetAssemblerCommand() override {
+ std::vector<std::string> result = AssemblerTestBase::GetAssemblerCommand();
+ if (march_override_.has_value()) {
+ auto it = std::find_if(result.begin(),
+ result.end(),
+ [](const std::string& s) { return StartsWith(s, "-march="); });
+ CHECK(it != result.end());
+ *it = march_override_.value();
+ }
+ return result;
+ }
+
void DriverStr(const std::string& assembly_text, const std::string& test_name) {
assembler_.FinalizeCode();
size_t cs = assembler_.CodeSize();
@@ -76,6 +88,9 @@ class JniMacroAssemblerRiscv64Test : public AssemblerTestBase {
MallocArenaPool pool_;
ArenaAllocator allocator_;
Riscv64JNIMacroAssembler assembler_;
+
+ // TODO: Implement auto-compression and remove this override.
+ std::optional<std::string> march_override_ = "-march=rv64imafdv_zba_zbb";
};
TEST_F(JniMacroAssemblerRiscv64Test, StackFrame) {
diff --git a/compiler/utils/x86/jni_macro_assembler_x86.cc b/compiler/utils/x86/jni_macro_assembler_x86.cc
index dfdbc183f1..3ee0530dc2 100644
--- a/compiler/utils/x86/jni_macro_assembler_x86.cc
+++ b/compiler/utils/x86/jni_macro_assembler_x86.cc
@@ -542,7 +542,7 @@ void X86JNIMacroAssembler::DeliverPendingException() {
}
std::unique_ptr<JNIMacroLabel> X86JNIMacroAssembler::CreateLabel() {
- return std::unique_ptr<JNIMacroLabel>(new X86JNIMacroLabel());
+ return std::unique_ptr<JNIMacroLabel>(new (asm_.GetAllocator()) X86JNIMacroLabel());
}
void X86JNIMacroAssembler::Jump(JNIMacroLabel* label) {
diff --git a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
index e9e6dbdae7..1b9cfa640e 100644
--- a/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
+++ b/compiler/utils/x86_64/jni_macro_assembler_x86_64.cc
@@ -610,7 +610,7 @@ void X86_64JNIMacroAssembler::DeliverPendingException() {
}
std::unique_ptr<JNIMacroLabel> X86_64JNIMacroAssembler::CreateLabel() {
- return std::unique_ptr<JNIMacroLabel>(new X86_64JNIMacroLabel());
+ return std::unique_ptr<JNIMacroLabel>(new (asm_.GetAllocator()) X86_64JNIMacroLabel());
}
void X86_64JNIMacroAssembler::Jump(JNIMacroLabel* label) {
diff --git a/dex2oat/dex2oat_image_test.cc b/dex2oat/dex2oat_image_test.cc
index 9ba540b973..e276638372 100644
--- a/dex2oat/dex2oat_image_test.cc
+++ b/dex2oat/dex2oat_image_test.cc
@@ -250,7 +250,6 @@ TEST_F(Dex2oatImageTest, TestModesAndFilters) {
}
TEST_F(Dex2oatImageTest, TestExtension) {
- TEST_DISABLED_FOR_RISCV64();
std::string error_msg;
MemMap reservation = ReserveCoreImageAddressSpace(&error_msg);
ASSERT_TRUE(reservation.IsValid()) << error_msg;
diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc
index 9c6a381ac3..4a8a4f3c65 100644
--- a/dex2oat/dex2oat_test.cc
+++ b/dex2oat/dex2oat_test.cc
@@ -1498,7 +1498,6 @@ TEST_F(Dex2oatVerifierAbort, HardFail) {
class Dex2oatDedupeCode : public Dex2oatTest {};
TEST_F(Dex2oatDedupeCode, DedupeTest) {
- TEST_DISABLED_FOR_RISCV64();
// Use MyClassNatives. It has lots of native methods that will produce deduplicate-able code.
std::unique_ptr<const DexFile> dex(OpenTestDexFile("MyClassNatives"));
std::string out_dir = GetScratchDir();
diff --git a/dex2oat/driver/compiler_driver_test.cc b/dex2oat/driver/compiler_driver_test.cc
index a82de055a9..759426a1d3 100644
--- a/dex2oat/driver/compiler_driver_test.cc
+++ b/dex2oat/driver/compiler_driver_test.cc
@@ -261,7 +261,6 @@ class CompilerDriverProfileTest : public CompilerDriverTest {
};
TEST_F(CompilerDriverProfileTest, ProfileGuidedCompilation) {
- TEST_DISABLED_FOR_RISCV64();
Thread* self = Thread::Current();
jobject class_loader;
{
diff --git a/dex2oat/verifier_deps_test.cc b/dex2oat/verifier_deps_test.cc
index 0b72382b21..a1e93b750e 100644
--- a/dex2oat/verifier_deps_test.cc
+++ b/dex2oat/verifier_deps_test.cc
@@ -584,7 +584,6 @@ TEST_F(VerifierDepsTest, VerifyDeps) {
}
TEST_F(VerifierDepsTest, CompilerDriver) {
- TEST_DISABLED_FOR_RISCV64();
SetupCompilerDriver();
// Test both multi-dex and single-dex configuration.
diff --git a/disassembler/disassembler_riscv64.cc b/disassembler/disassembler_riscv64.cc
index b92d1287b2..09e9faf7d7 100644
--- a/disassembler/disassembler_riscv64.cc
+++ b/disassembler/disassembler_riscv64.cc
@@ -76,11 +76,21 @@ class DisassemblerRiscv64::Printer {
kOpCFG = 0b111,
};
+ class ScopedNewLinePrinter {
+ std::ostream& os_;
+
+ public:
+ explicit ScopedNewLinePrinter(std::ostream& os) : os_(os) {}
+ ~ScopedNewLinePrinter() { os_ << '\n'; }
+ };
+
static const char* XRegName(uint32_t regno);
static const char* FRegName(uint32_t regno);
static const char* VRegName(uint32_t regno);
static const char* RoundingModeName(uint32_t rm);
+ // Regular instruction immediate utils
+
static int32_t Decode32Imm12(uint32_t insn32) {
uint32_t sign = (insn32 >> 31);
uint32_t imm12 = (insn32 >> 20);
@@ -99,12 +109,89 @@ class DisassemblerRiscv64::Printer {
return static_cast<int32_t>(imm) - static_cast<int32_t>(bit11 << 12); // Sign-extend.
}
+ // Compressed instruction immediate utils
+
+ // Extracts the offset from a compressed instruction
+ // where `offset[5:3]` is in bits `[12:10]` and `offset[2|6]` is in bits `[6:5]`
+ static uint32_t Decode16CMOffsetW(uint32_t insn16) {
+ DCHECK(IsUint<16>(insn16));
+ return BitFieldExtract(insn16, 5, 1) << 6 | BitFieldExtract(insn16, 10, 3) << 3 |
+ BitFieldExtract(insn16, 6, 1) << 2;
+ }
+
+ // Extracts the offset from a compressed instruction
+ // where `offset[5:3]` is in bits `[12:10]` and `offset[7:6]` is in bits `[6:5]`
+ static uint32_t Decode16CMOffsetD(uint32_t insn16) {
+ DCHECK(IsUint<16>(insn16));
+ return BitFieldExtract(insn16, 5, 2) << 6 | BitFieldExtract(insn16, 10, 3) << 3;
+ }
+
+ // Re-orders raw immediatate into real value
+ // where `imm[5:3]` is in bits `[5:3]` and `imm[8:6]` is in bits `[2:0]`
+ static uint32_t Uimm6ToOffsetD16(uint32_t uimm6) {
+ DCHECK(IsUint<6>(uimm6));
+ return (BitFieldExtract(uimm6, 3, 3) << 3) | (BitFieldExtract(uimm6, 0, 3) << 6);
+ }
+
+ // Re-orders raw immediatate to form real value
+ // where `imm[5:2]` is in bits `[5:2]` and `imm[7:6]` is in bits `[1:0]`
+ static uint32_t Uimm6ToOffsetW16(uint32_t uimm6) {
+ DCHECK(IsUint<6>(uimm6));
+ return (BitFieldExtract(uimm6, 2, 4) << 2) | (BitFieldExtract(uimm6, 0, 2) << 6);
+ }
+
+ // Re-orders raw immediatate to form real value
+ // where `imm[1]` is in bit `[0]` and `imm[0]` is in bit `[1]`
+ static uint32_t Uimm2ToOffset10(uint32_t uimm2) {
+ DCHECK(IsUint<2>(uimm2));
+ return (uimm2 >> 1) | (uimm2 & 0x1u) << 1;
+ }
+
+ // Re-orders raw immediatate to form real value
+ // where `imm[1]` is in bit `[0]` and `imm[0]` is `0`
+ static uint32_t Uimm2ToOffset1(uint32_t uimm2) {
+ DCHECK(IsUint<2>(uimm2));
+ return (uimm2 & 0x1u) << 1;
+ }
+
+ template <size_t kWidth>
+ static constexpr int32_t SignExtendBits(uint32_t bits) {
+ static_assert(kWidth < BitSizeOf<uint32_t>());
+ const uint32_t sign_bit = (bits >> kWidth) & 1u;
+ return static_cast<int32_t>(bits) - static_cast<int32_t>(sign_bit << kWidth);
+ }
+
+ // Extracts the immediate from a compressed instruction
+ // where `imm[5]` is in bit `[12]` and `imm[4:0]` is in bits `[6:2]`
+ // and performs sign-extension if required
+ template <typename T>
+ static T Decode16Imm6(uint32_t insn16) {
+ DCHECK(IsUint<16>(insn16));
+ static_assert(std::is_integral_v<T>, "T must be integral");
+ const T bits =
+ BitFieldInsert(BitFieldExtract(insn16, 2, 5), BitFieldExtract(insn16, 12, 1), 5, 1);
+ const T checked_bits = dchecked_integral_cast<T>(bits);
+ if (std::is_unsigned_v<T>) {
+ return checked_bits;
+ }
+ return SignExtendBits<6>(checked_bits);
+ }
+
+ // Regular instruction register utils
+
static uint32_t GetRd(uint32_t insn32) { return (insn32 >> 7) & 0x1fu; }
static uint32_t GetRs1(uint32_t insn32) { return (insn32 >> 15) & 0x1fu; }
static uint32_t GetRs2(uint32_t insn32) { return (insn32 >> 20) & 0x1fu; }
static uint32_t GetRs3(uint32_t insn32) { return insn32 >> 27; }
static uint32_t GetRoundingMode(uint32_t insn32) { return (insn32 >> 12) & 7u; }
+ // Compressed instruction register utils
+
+ static uint32_t GetRs1Short16(uint32_t insn16) { return BitFieldExtract(insn16, 7, 3) + 8u; }
+ static uint32_t GetRs2Short16(uint32_t insn16) { return BitFieldExtract(insn16, 2, 3) + 8u; }
+ static uint32_t GetRs1_16(uint32_t insn16) { return BitFieldExtract(insn16, 7, 5); }
+ static uint32_t GetRs2_16(uint32_t insn16) { return BitFieldExtract(insn16, 2, 5); }
+
void PrintBranchOffset(int32_t offset);
void PrintLoadStoreAddress(uint32_t rs1, int32_t offset);
@@ -1548,10 +1635,319 @@ void DisassemblerRiscv64::Printer::Dump32(const uint8_t* insn) {
void DisassemblerRiscv64::Printer::Dump16(const uint8_t* insn) {
uint32_t insn16 = static_cast<uint32_t>(insn[0]) + (static_cast<uint32_t>(insn[1]) << 8);
+ ScopedNewLinePrinter nl(os_);
CHECK_NE(insn16 & 3u, 3u);
- // TODO(riscv64): Disassemble instructions from the "C" extension.
- os_ << disassembler_->FormatInstructionPointer(insn)
- << StringPrintf(": %04x \t<unknown16>\n", insn16);
+ os_ << disassembler_->FormatInstructionPointer(insn) << StringPrintf(": %04x \t", insn16);
+
+ uint32_t funct3 = BitFieldExtract(insn16, 13, 3);
+ int32_t offset = -1;
+
+ switch (insn16 & 3u) {
+ case 0b00u: // Quadrant 0
+ switch (funct3) {
+ case 0b000u:
+ if (insn16 == 0u) {
+ os_ << "c.unimp";
+ } else {
+ uint32_t nzuimm = BitFieldExtract(insn16, 5, 8);
+ if (nzuimm != 0u) {
+ uint32_t decoded =
+ BitFieldExtract(nzuimm, 0, 1) << 3 | BitFieldExtract(nzuimm, 1, 1) << 2 |
+ BitFieldExtract(nzuimm, 2, 4) << 6 | BitFieldExtract(nzuimm, 6, 2) << 4;
+ os_ << "c.addi4spn " << XRegName(GetRs2Short16(insn16)) << ", sp, " << decoded;
+ } else {
+ os_ << "<unknown16>";
+ }
+ }
+ return;
+ case 0b001u:
+ offset = Decode16CMOffsetD(insn16);
+ os_ << "c.fld " << FRegName(GetRs2Short16(insn16));
+ break;
+ case 0b010u:
+ offset = Decode16CMOffsetW(insn16);
+ os_ << "c.lw " << XRegName(GetRs2Short16(insn16));
+ break;
+ case 0b011u:
+ offset = Decode16CMOffsetD(insn16);
+ os_ << "c.ld " << XRegName(GetRs2Short16(insn16));
+ break;
+ case 0b100u: {
+ uint32_t opcode2 = BitFieldExtract(insn16, 10, 3);
+ uint32_t imm = BitFieldExtract(insn16, 5, 2);
+ switch (opcode2) {
+ case 0b000:
+ offset = Uimm2ToOffset10(imm);
+ os_ << "c.lbu " << XRegName(GetRs2Short16(insn16));
+ break;
+ case 0b001:
+ offset = Uimm2ToOffset1(imm);
+ os_ << (BitFieldExtract(imm, 1, 1) == 0u ? "c.lhu " : "c.lh ");
+ os_ << XRegName(GetRs2Short16(insn16));
+ break;
+ case 0b010:
+ offset = Uimm2ToOffset10(imm);
+ os_ << "c.sb " << XRegName(GetRs2Short16(insn16));
+ break;
+ case 0b011:
+ if (BitFieldExtract(imm, 1, 1) == 0u) {
+ offset = Uimm2ToOffset1(imm);
+ os_ << "c.sh " << XRegName(GetRs2Short16(insn16));
+ break;
+ }
+ FALLTHROUGH_INTENDED;
+ default:
+ os_ << "<unknown16>";
+ return;
+ }
+ break;
+ }
+ case 0b101u:
+ offset = Decode16CMOffsetD(insn16);
+ os_ << "c.fsd " << FRegName(GetRs2Short16(insn16));
+ break;
+ case 0b110u:
+ offset = Decode16CMOffsetW(insn16);
+ os_ << "c.sw " << XRegName(GetRs2Short16(insn16));
+ break;
+ case 0b111u:
+ offset = Decode16CMOffsetD(insn16);
+ os_ << "c.sd " << XRegName(GetRs2Short16(insn16));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ os_ << ", ";
+ PrintLoadStoreAddress(GetRs1Short16(insn16), offset);
+ return;
+ case 0b01u: // Quadrant 1
+ switch (funct3) {
+ case 0b000u: {
+ uint32_t rd = GetRs1_16(insn16);
+ if (rd == 0) {
+ if (Decode16Imm6<uint32_t>(insn16) != 0u) {
+ os_ << "<hint16>";
+ } else {
+ os_ << "c.nop";
+ }
+ } else {
+ int32_t imm = Decode16Imm6<int32_t>(insn16);
+ if (imm != 0) {
+ os_ << "c.addi " << XRegName(rd) << ", " << imm;
+ } else {
+ os_ << "<hint16>";
+ }
+ }
+ break;
+ }
+ case 0b001u: {
+ uint32_t rd = GetRs1_16(insn16);
+ if (rd != 0) {
+ os_ << "c.addiw " << XRegName(rd) << ", " << Decode16Imm6<int32_t>(insn16);
+ } else {
+ os_ << "<unknown16>";
+ }
+ break;
+ }
+ case 0b010u: {
+ uint32_t rd = GetRs1_16(insn16);
+ if (rd != 0) {
+ os_ << "c.li " << XRegName(rd) << ", " << Decode16Imm6<int32_t>(insn16);
+ } else {
+ os_ << "<hint16>";
+ }
+ break;
+ }
+ case 0b011u: {
+ uint32_t rd = GetRs1_16(insn16);
+ uint32_t imm6_bits = Decode16Imm6<uint32_t>(insn16);
+ if (imm6_bits != 0u) {
+ if (rd == 2) {
+ int32_t nzimm =
+ BitFieldExtract(insn16, 6, 1) << 4 | BitFieldExtract(insn16, 2, 1) << 5 |
+ BitFieldExtract(insn16, 5, 1) << 6 | BitFieldExtract(insn16, 3, 2) << 7 |
+ BitFieldExtract(insn16, 12, 1) << 9;
+ os_ << "c.addi16sp sp, " << SignExtendBits<10>(nzimm);
+ } else if (rd != 0) {
+ // sign-extend bits and mask with 0xfffff as llvm-objdump does
+ uint32_t mask = MaskLeastSignificant<uint32_t>(20);
+ os_ << "c.lui " << XRegName(rd) << ", " << (SignExtendBits<6>(imm6_bits) & mask);
+ } else {
+ os_ << "<hint16>";
+ }
+ } else {
+ os_ << "<unknown16>";
+ }
+ break;
+ }
+ case 0b100u: {
+ uint32_t funct2 = BitFieldExtract(insn16, 10, 2);
+ switch (funct2) {
+ case 0b00: {
+ int32_t nzuimm = Decode16Imm6<uint32_t>(insn16);
+ if (nzuimm != 0) {
+ os_ << "c.srli " << XRegName(GetRs1Short16(insn16)) << ", " << nzuimm;
+ } else {
+ os_ << "<hint16>";
+ }
+ break;
+ }
+ case 0b01: {
+ int32_t nzuimm = Decode16Imm6<uint32_t>(insn16);
+ if (nzuimm != 0) {
+ os_ << "c.srai " << XRegName(GetRs1Short16(insn16)) << ", " << nzuimm;
+ } else {
+ os_ << "<hint16>";
+ }
+ break;
+ }
+ case 0b10:
+ os_ << "c.andi " << XRegName(GetRs1Short16(insn16)) << ", "
+ << Decode16Imm6<int32_t>(insn16);
+ break;
+ case 0b11: {
+ constexpr static const char* mnemonics[] = {
+ "c.sub", "c.xor", "c.or", "c.and", "c.subw", "c.addw", "c.mul", nullptr
+ };
+ uint32_t opc = BitFieldInsert(
+ BitFieldExtract(insn16, 5, 2), BitFieldExtract(insn16, 12, 1), 2, 1);
+ DCHECK(IsUint<3>(opc));
+ const char* mnem = mnemonics[opc];
+ if (mnem != nullptr) {
+ os_ << mnem << " " << XRegName(GetRs1Short16(insn16)) << ", "
+ << XRegName(GetRs2Short16(insn16));
+ } else {
+ constexpr static const char* zbc_mnemonics[] = {
+ "c.zext.b", "c.sext.b", "c.zext.h", "c.sext.h",
+ "c.zext.w", "c.not", nullptr, nullptr,
+ };
+ mnem = zbc_mnemonics[BitFieldExtract(insn16, 2, 3)];
+ if (mnem != nullptr) {
+ os_ << mnem << " " << XRegName(GetRs1Short16(insn16));
+ } else {
+ os_ << "<unknown16>";
+ }
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case 0b101u: {
+ int32_t disp = BitFieldExtract(insn16, 3, 3) << 1 | BitFieldExtract(insn16, 11, 1) << 4 |
+ BitFieldExtract(insn16, 2, 1) << 5 | BitFieldExtract(insn16, 7, 1) << 6 |
+ BitFieldExtract(insn16, 6, 1) << 7 | BitFieldExtract(insn16, 9, 2) << 8 |
+ BitFieldExtract(insn16, 8, 1) << 10 | BitFieldExtract(insn16, 12, 1) << 11;
+ os_ << "c.j ";
+ PrintBranchOffset(SignExtendBits<12>(disp));
+ break;
+ }
+ case 0b110u:
+ case 0b111u: {
+ int32_t disp = BitFieldExtract(insn16, 3, 2) << 1 | BitFieldExtract(insn16, 10, 2) << 3 |
+ BitFieldExtract(insn16, 2, 1) << 5 | BitFieldExtract(insn16, 5, 2) << 6 |
+ BitFieldExtract(insn16, 12, 1) << 8;
+
+ os_ << (funct3 == 0b110u ? "c.beqz " : "c.bnez ");
+ os_ << XRegName(GetRs1Short16(insn16)) << ", ";
+ PrintBranchOffset(SignExtendBits<9>(disp));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case 0b10u: // Quadrant 2
+ switch (funct3) {
+ case 0b000u: {
+ uint32_t nzuimm = Decode16Imm6<uint32_t>(insn16);
+ uint32_t rd = GetRs1_16(insn16);
+ if (rd == 0 || nzuimm == 0) {
+ os_ << "<hint16>";
+ } else {
+ os_ << "c.slli " << XRegName(rd) << ", " << nzuimm;
+ }
+ return;
+ }
+ case 0b001u: {
+ offset = Uimm6ToOffsetD16(Decode16Imm6<uint32_t>(insn16));
+ os_ << "c.fldsp " << FRegName(GetRs1_16(insn16));
+ break;
+ }
+ case 0b010u: {
+ uint32_t rd = GetRs1_16(insn16);
+ if (rd != 0) {
+ offset = Uimm6ToOffsetW16(Decode16Imm6<uint32_t>(insn16));
+ os_ << "c.lwsp " << XRegName(GetRs1_16(insn16));
+ } else {
+ os_ << "<unknown16>";
+ return;
+ }
+ break;
+ }
+ case 0b011u: {
+ uint32_t rd = GetRs1_16(insn16);
+ if (rd != 0) {
+ offset = Uimm6ToOffsetD16(Decode16Imm6<uint32_t>(insn16));
+ os_ << "c.ldsp " << XRegName(GetRs1_16(insn16));
+ } else {
+ os_ << "<unknown16>";
+ return;
+ }
+ break;
+ }
+ case 0b100u: {
+ uint32_t rd_rs1 = GetRs1_16(insn16);
+ uint32_t rs2 = GetRs2_16(insn16);
+ uint32_t b12 = BitFieldExtract(insn16, 12, 1);
+ if (b12 == 0) {
+ if (rd_rs1 != 0 && rs2 != 0) {
+ os_ << "c.mv " << XRegName(rd_rs1) << ", " << XRegName(rs2);
+ } else if (rd_rs1 != 0) {
+ os_ << "c.jr " << XRegName(rd_rs1);
+ } else if (rs2 != 0) {
+ os_ << "<hint16>";
+ } else {
+ os_ << "<unknown16>";
+ }
+ } else {
+ if (rd_rs1 != 0 && rs2 != 0) {
+ os_ << "c.add " << XRegName(rd_rs1) << ", " << XRegName(rs2);
+ } else if (rd_rs1 != 0) {
+ os_ << "c.jalr " << XRegName(rd_rs1);
+ } else if (rs2 != 0) {
+ os_ << "<hint16>";
+ } else {
+ os_ << "c.ebreak";
+ }
+ }
+ return;
+ }
+ case 0b101u:
+ offset = BitFieldExtract(insn16, 7, 3) << 6 | BitFieldExtract(insn16, 10, 3) << 3;
+ os_ << "c.fsdsp " << FRegName(GetRs2_16(insn16));
+ break;
+ case 0b110u:
+ offset = BitFieldExtract(insn16, 7, 2) << 6 | BitFieldExtract(insn16, 9, 4) << 2;
+ os_ << "c.swsp " << XRegName(GetRs2_16(insn16));
+ break;
+ case 0b111u:
+ offset = BitFieldExtract(insn16, 7, 3) << 6 | BitFieldExtract(insn16, 10, 3) << 3;
+ os_ << "c.sdsp " << XRegName(GetRs2_16(insn16));
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ os_ << ", ";
+ PrintLoadStoreAddress(/* sp */ 2, offset);
+
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void DisassemblerRiscv64::Printer::Dump2Byte(const uint8_t* data) {
diff --git a/imgdiag/imgdiag_test.cc b/imgdiag/imgdiag_test.cc
index 3ea7093278..9dd7953a24 100644
--- a/imgdiag/imgdiag_test.cc
+++ b/imgdiag/imgdiag_test.cc
@@ -115,7 +115,6 @@ TEST_F(ImgDiagTest, ImageDiffPidSelf) {
// because it's root read-only.
TEST_F(ImgDiagTest, DISABLED_ImageDiffPidSelf) {
#endif
- TEST_DISABLED_FOR_RISCV64();
// Invoke 'img_diag' against the current process.
// This should succeed because we have a runtime and so it should
// be able to map in the boot.art and do a diff for it.
diff --git a/libartbase/base/common_art_test.h b/libartbase/base/common_art_test.h
index 64e251e16c..c490e085cd 100644
--- a/libartbase/base/common_art_test.h
+++ b/libartbase/base/common_art_test.h
@@ -339,24 +339,4 @@ std::vector<pid_t> GetPidByName(const std::string& process_name);
GTEST_SKIP() << "WARNING: TEST DISABLED FOR MEMORY TOOL WITH HEAP POISONING"; \
}
-#define TEST_DISABLED_FOR_RISCV64() \
- if (kRuntimeISA == InstructionSet::kRiscv64) { \
- GTEST_SKIP() << "WARNING: TEST DISABLED FOR RISCV64"; \
- }
-
-// Don't print messages on setup to avoid getting multiple "test disabled" messages for one test.
-// Setup phase may need to be disabled as some test rely on having boot image / compiler / other
-// things that are not implemented for RISC-V.
-#define TEST_SETUP_DISABLED_FOR_RISCV64() \
- if (kRuntimeISA == InstructionSet::kRiscv64) { \
- GTEST_SKIP(); \
- }
-
-// Don't print messages on teardown to avoid getting multiple "test disabled" messages for one test.
-// Teardown phase may need to be disabled to match the disabled setup phase for some tests.
-#define TEST_TEARDOWN_DISABLED_FOR_RISCV64() \
- if (kRuntimeISA == InstructionSet::kRiscv64) { \
- GTEST_SKIP(); \
- }
-
#endif // ART_LIBARTBASE_BASE_COMMON_ART_TEST_H_
diff --git a/libartbase/base/safe_map.h b/libartbase/base/safe_map.h
index fa13fe0f68..c489bfd866 100644
--- a/libartbase/base/safe_map.h
+++ b/libartbase/base/safe_map.h
@@ -149,7 +149,7 @@ class SafeMap {
}
template <typename CreateFn>
- V& GetOrCreate(const K& k, CreateFn create) {
+ V& GetOrCreate(const K& k, CreateFn&& create) {
static_assert(std::is_same_v<V, std::invoke_result_t<CreateFn>>,
"Argument `create` should return a value of type V.");
auto lb = lower_bound(k);
diff --git a/libartbase/base/sdk_version.h b/libartbase/base/sdk_version.h
index d39aa95b5d..b955ab0322 100644
--- a/libartbase/base/sdk_version.h
+++ b/libartbase/base/sdk_version.h
@@ -38,6 +38,7 @@ enum class SdkVersion : uint32_t {
kS = 31u,
kS_V2 = 32u,
kT = 33u,
+ kU = 34u,
kMax = std::numeric_limits<uint32_t>::max(),
};
diff --git a/libartpalette/apex/palette_test.cc b/libartpalette/apex/palette_test.cc
index c2b7304d8d..63072c491b 100644
--- a/libartpalette/apex/palette_test.cc
+++ b/libartpalette/apex/palette_test.cc
@@ -54,16 +54,10 @@ bool PaletteSetTaskProfilesIsSupported(palette_status_t res) {
} // namespace
-// TODO(riscv64): remove `namespace art` when `TEST_DISABLED_FOR_RISCV64` is no longer needed.
-namespace art {
-
class PaletteClientTest : public testing::Test {};
TEST_F(PaletteClientTest, SchedPriority) {
- // On RISC-V tests run in Android-like chroot on a Linux VM => some syscalls work differently.
- TEST_DISABLED_FOR_RISCV64();
-
- int32_t tid = ::GetTid();
+ int32_t tid = GetTid();
int32_t saved_priority;
EXPECT_EQ(PALETTE_STATUS_OK, PaletteSchedGetPriority(tid, &saved_priority));
@@ -87,9 +81,6 @@ TEST_F(PaletteClientTest, Ashmem) {
#ifndef ART_TARGET_ANDROID
GTEST_SKIP() << "ashmem is only supported on Android";
#else
- // On RISC-V tests run in Android-like chroot on a Linux VM => some syscalls work differently.
- TEST_DISABLED_FOR_RISCV64();
-
int fd;
EXPECT_EQ(PALETTE_STATUS_OK, PaletteAshmemCreateRegion("ashmem-test", 4096, &fd));
EXPECT_EQ(PALETTE_STATUS_OK, PaletteAshmemSetProtRegion(fd, PROT_READ | PROT_EXEC));
@@ -175,5 +166,3 @@ TEST_F(PaletteClientTest, SetTaskProfilesCpp) {
}
#endif
}
-
-} // namespace art
diff --git a/libartservice/service/api/system-server-current.txt b/libartservice/service/api/system-server-current.txt
index f316065ed8..05163ebc70 100644
--- a/libartservice/service/api/system-server-current.txt
+++ b/libartservice/service/api/system-server-current.txt
@@ -19,6 +19,7 @@ package com.android.server.art {
method @NonNull public com.android.server.art.model.DexoptStatus getDexoptStatus(@NonNull com.android.server.pm.PackageManagerLocal.FilteredSnapshot, @NonNull String);
method @NonNull public com.android.server.art.model.DexoptStatus getDexoptStatus(@NonNull com.android.server.pm.PackageManagerLocal.FilteredSnapshot, @NonNull String, int);
method public int handleShellCommand(@NonNull android.os.Binder, @NonNull android.os.ParcelFileDescriptor, @NonNull android.os.ParcelFileDescriptor, @NonNull android.os.ParcelFileDescriptor, @NonNull String[]);
+ method public void onApexStaged(@NonNull String[]);
method public void onBoot(@NonNull String, @Nullable java.util.concurrent.Executor, @Nullable java.util.function.Consumer<com.android.server.art.model.OperationProgress>);
method public void printShellCommandHelp(@NonNull java.io.PrintWriter);
method public void removeDexoptDoneCallback(@NonNull com.android.server.art.ArtManagerLocal.DexoptDoneCallback);
diff --git a/libartservice/service/java/com/android/server/art/ArtManagerLocal.java b/libartservice/service/java/com/android/server/art/ArtManagerLocal.java
index 69cba7d860..db82bfd982 100644
--- a/libartservice/service/java/com/android/server/art/ArtManagerLocal.java
+++ b/libartservice/service/java/com/android/server/art/ArtManagerLocal.java
@@ -853,6 +853,25 @@ public final class ArtManagerLocal {
}
/**
+ * Notifies ART Service that there are apexes staged for installation on next reboot (see
+ * <a href="https://source.android.com/docs/core/ota/apex#apex-manager">the update sequence of
+ * an APEX</a>). ART Service may use this to schedule a pre-reboot dexopt job. This might change
+ * in the future.
+ *
+ * This immediately returns after scheduling the job and doesn't wait for the job to run.
+ *
+ * @param stagedApexModuleNames The <b>module names</b> of the staged apexes, corresponding to
+ * the directory beneath /apex, e.g., {@code com.android.art} (not the <b>package
+ * names</b>, e.g., {@code com.google.android.art}).
+ */
+ @SuppressLint("UnflaggedApi") // Flag support for mainline is not available.
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ public void onApexStaged(@NonNull String[] stagedApexModuleNames) {
+ // TODO(b/311377497): Check system requirements.
+ mInjector.getPreRebootDexoptJob().schedule();
+ }
+
+ /**
* Dumps the dexopt state of all packages in text format for debugging purposes.
*
* There are no stability guarantees for the output format.
@@ -1024,6 +1043,17 @@ public final class ArtManagerLocal {
return mInjector.getBackgroundDexoptJob();
}
+ /**
+ * Should be used by {@link BackgroundDexoptJobService} ONLY.
+ *
+ * @hide
+ */
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ @NonNull
+ PreRebootDexoptJob getPreRebootDexoptJob() {
+ return mInjector.getPreRebootDexoptJob();
+ }
+
@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
@Nullable
private DexoptResult maybeDowngradePackages(
@@ -1354,6 +1384,7 @@ public final class ArtManagerLocal {
@Nullable private final PackageManagerLocal mPackageManagerLocal;
@Nullable private final Config mConfig;
@Nullable private BackgroundDexoptJob mBgDexoptJob = null;
+ @Nullable private PreRebootDexoptJob mPrDexoptJob = null;
/** For compatibility with S and T. New code should not use this. */
@Deprecated
@@ -1443,6 +1474,15 @@ public final class ArtManagerLocal {
@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
@NonNull
+ public synchronized PreRebootDexoptJob getPreRebootDexoptJob() {
+ if (mPrDexoptJob == null) {
+ mPrDexoptJob = new PreRebootDexoptJob(mContext);
+ }
+ return mPrDexoptJob;
+ }
+
+ @RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+ @NonNull
public UserManager getUserManager() {
return Objects.requireNonNull(mContext.getSystemService(UserManager.class));
}
diff --git a/libartservice/service/java/com/android/server/art/BackgroundDexoptJob.java b/libartservice/service/java/com/android/server/art/BackgroundDexoptJob.java
index 9aeb2828fb..289c7cd235 100644
--- a/libartservice/service/java/com/android/server/art/BackgroundDexoptJob.java
+++ b/libartservice/service/java/com/android/server/art/BackgroundDexoptJob.java
@@ -41,6 +41,7 @@ import com.android.internal.annotations.GuardedBy;
import com.android.internal.annotations.VisibleForTesting;
import com.android.server.LocalManagerRegistry;
import com.android.server.art.model.ArtFlags;
+import com.android.server.art.model.ArtServiceJobInterface;
import com.android.server.art.model.Config;
import com.android.server.art.model.DexoptResult;
import com.android.server.art.model.OperationProgress;
@@ -59,7 +60,7 @@ import java.util.function.Consumer;
/** @hide */
@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
-public class BackgroundDexoptJob {
+public class BackgroundDexoptJob implements ArtServiceJobInterface {
private static final String TAG = ArtManagerLocal.TAG;
/**
@@ -68,7 +69,7 @@ public class BackgroundDexoptJob {
*/
private static final String JOB_PKG_NAME = Utils.PLATFORM_PACKAGE_NAME;
/** An arbitrary number. Must be unique among all jobs owned by the system uid. */
- private static final int JOB_ID = 27873780;
+ public static final int JOB_ID = 27873780;
@VisibleForTesting public static final long JOB_INTERVAL_MS = TimeUnit.DAYS.toMillis(1);
@@ -89,6 +90,7 @@ public class BackgroundDexoptJob {
}
/** Handles {@link BackgroundDexoptJobService#onStartJob(JobParameters)}. */
+ @Override
public boolean onStartJob(
@NonNull BackgroundDexoptJobService jobService, @NonNull JobParameters params) {
start().thenAcceptAsync(result -> {
@@ -113,6 +115,7 @@ public class BackgroundDexoptJob {
}
/** Handles {@link BackgroundDexoptJobService#onStopJob(JobParameters)}. */
+ @Override
public boolean onStopJob(@NonNull JobParameters params) {
synchronized (this) {
mLastStopReason = Optional.of(params.getStopReason());
@@ -124,7 +127,7 @@ public class BackgroundDexoptJob {
/** Handles {@link ArtManagerLocal#scheduleBackgroundDexoptJob()}. */
public @ScheduleStatus int schedule() {
- if (this != BackgroundDexoptJobService.getJob()) {
+ if (this != BackgroundDexoptJobService.getJob(JOB_ID)) {
throw new IllegalStateException("This job cannot be scheduled");
}
@@ -164,7 +167,7 @@ public class BackgroundDexoptJob {
/** Handles {@link ArtManagerLocal#unscheduleBackgroundDexoptJob()}. */
public void unschedule() {
- if (this != BackgroundDexoptJobService.getJob()) {
+ if (this != BackgroundDexoptJobService.getJob(JOB_ID)) {
throw new IllegalStateException("This job cannot be unscheduled");
}
diff --git a/libartservice/service/java/com/android/server/art/BackgroundDexoptJobService.java b/libartservice/service/java/com/android/server/art/BackgroundDexoptJobService.java
index 41425ee5cc..8bcc4b98be 100644
--- a/libartservice/service/java/com/android/server/art/BackgroundDexoptJobService.java
+++ b/libartservice/service/java/com/android/server/art/BackgroundDexoptJobService.java
@@ -24,6 +24,7 @@ import android.os.Build;
import androidx.annotation.RequiresApi;
import com.android.server.LocalManagerRegistry;
+import com.android.server.art.model.ArtServiceJobInterface;
/**
* Entry point for the callback from the job scheduler. This class is instantiated by the system
@@ -35,16 +36,21 @@ import com.android.server.LocalManagerRegistry;
public class BackgroundDexoptJobService extends JobService {
@Override
public boolean onStartJob(@NonNull JobParameters params) {
- return getJob().onStartJob(this, params);
+ return getJob(params.getJobId()).onStartJob(this, params);
}
@Override
public boolean onStopJob(@NonNull JobParameters params) {
- return getJob().onStopJob(params);
+ return getJob(params.getJobId()).onStopJob(params);
}
@NonNull
- static BackgroundDexoptJob getJob() {
- return LocalManagerRegistry.getManager(ArtManagerLocal.class).getBackgroundDexoptJob();
+ static ArtServiceJobInterface getJob(int jobId) {
+ if (jobId == BackgroundDexoptJob.JOB_ID) {
+ return LocalManagerRegistry.getManager(ArtManagerLocal.class).getBackgroundDexoptJob();
+ } else if (jobId == PreRebootDexoptJob.JOB_ID) {
+ return LocalManagerRegistry.getManager(ArtManagerLocal.class).getPreRebootDexoptJob();
+ }
+ throw new IllegalArgumentException("Unknown job ID " + jobId);
}
}
diff --git a/libartservice/service/java/com/android/server/art/PreRebootDexoptJob.java b/libartservice/service/java/com/android/server/art/PreRebootDexoptJob.java
new file mode 100644
index 0000000000..74bf913e62
--- /dev/null
+++ b/libartservice/service/java/com/android/server/art/PreRebootDexoptJob.java
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.server.art;
+
+import static com.android.server.art.model.ArtFlags.ScheduleStatus;
+
+import android.annotation.NonNull;
+import android.app.job.JobParameters;
+import android.content.Context;
+import android.os.Build;
+
+import androidx.annotation.RequiresApi;
+
+import com.android.internal.annotations.VisibleForTesting;
+import com.android.server.art.model.ArtFlags;
+import com.android.server.art.model.ArtServiceJobInterface;
+
+/** @hide */
+@RequiresApi(Build.VERSION_CODES.UPSIDE_DOWN_CAKE)
+public class PreRebootDexoptJob implements ArtServiceJobInterface {
+ private static final String TAG = ArtManagerLocal.TAG;
+
+ /**
+ * "android" is the package name for a <service> declared in
+ * frameworks/base/core/res/AndroidManifest.xml
+ */
+ private static final String JOB_PKG_NAME = Utils.PLATFORM_PACKAGE_NAME;
+ /** An arbitrary number. Must be unique among all jobs owned by the system uid. */
+ public static final int JOB_ID = 27873781;
+
+ @NonNull private final Injector mInjector;
+
+ public PreRebootDexoptJob(@NonNull Context context) {
+ this(new Injector(context));
+ }
+
+ @VisibleForTesting
+ public PreRebootDexoptJob(@NonNull Injector injector) {
+ mInjector = injector;
+ }
+
+ @Override
+ public boolean onStartJob(
+ @NonNull BackgroundDexoptJobService jobService, @NonNull JobParameters params) {
+ // "true" means the job will continue running until `jobFinished` is called.
+ return false;
+ }
+
+ @Override
+ public boolean onStopJob(@NonNull JobParameters params) {
+ // "true" means to execute again in the same interval with the default retry policy.
+ return true;
+ }
+
+ public @ScheduleStatus int schedule() {
+ // TODO(b/311377497): Schedule the job.
+ return ArtFlags.SCHEDULE_SUCCESS;
+ }
+
+ /**
+ * Injector pattern for testing purpose.
+ *
+ * @hide
+ */
+ @VisibleForTesting
+ public static class Injector {
+ @NonNull private final Context mContext;
+
+ Injector(@NonNull Context context) {
+ mContext = context;
+ }
+ }
+}
diff --git a/libartservice/service/java/com/android/server/art/model/ArtServiceJobInterface.java b/libartservice/service/java/com/android/server/art/model/ArtServiceJobInterface.java
new file mode 100644
index 0000000000..569996c566
--- /dev/null
+++ b/libartservice/service/java/com/android/server/art/model/ArtServiceJobInterface.java
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.server.art.model;
+
+import android.annotation.NonNull;
+import android.app.job.JobParameters;
+
+import com.android.server.art.BackgroundDexoptJobService;
+
+/** @hide */
+public interface ArtServiceJobInterface {
+ boolean onStartJob(
+ @NonNull BackgroundDexoptJobService jobService, @NonNull JobParameters params);
+
+ boolean onStopJob(@NonNull JobParameters params);
+}
diff --git a/libartservice/service/javatests/com/android/server/art/BackgroundDexoptJobTest.java b/libartservice/service/javatests/com/android/server/art/BackgroundDexoptJobTest.java
index c61461d0c9..85ae079c77 100644
--- a/libartservice/service/javatests/com/android/server/art/BackgroundDexoptJobTest.java
+++ b/libartservice/service/javatests/com/android/server/art/BackgroundDexoptJobTest.java
@@ -16,6 +16,7 @@
package com.android.server.art;
+import static com.android.server.art.BackgroundDexoptJob.JOB_ID;
import static com.android.server.art.model.Config.Callback;
import static com.android.server.art.model.DexoptResult.DexoptResultStatus;
import static com.android.server.art.model.DexoptResult.PackageDexoptResult;
@@ -103,7 +104,7 @@ public class BackgroundDexoptJobTest {
lenient().when(mInjector.getJobScheduler()).thenReturn(mJobScheduler);
mBackgroundDexoptJob = new BackgroundDexoptJob(mInjector);
- lenient().when(BackgroundDexoptJobService.getJob()).thenReturn(mBackgroundDexoptJob);
+ lenient().when(BackgroundDexoptJobService.getJob(JOB_ID)).thenReturn(mBackgroundDexoptJob);
lenient()
.doAnswer(invocation -> {
diff --git a/libdexfile/dex/code_item_accessors-inl.h b/libdexfile/dex/code_item_accessors-inl.h
index b74046071f..4586a0ff7a 100644
--- a/libdexfile/dex/code_item_accessors-inl.h
+++ b/libdexfile/dex/code_item_accessors-inl.h
@@ -202,20 +202,22 @@ template<typename NewLocalVisitor>
inline bool CodeItemDebugInfoAccessor::DecodeDebugLocalInfo(
bool is_static,
uint32_t method_idx,
- const NewLocalVisitor& new_local) const {
+ NewLocalVisitor&& new_local) const {
return dex_file_->DecodeDebugLocalInfo(RegistersSize(),
InsSize(),
InsnsSizeInCodeUnits(),
DebugInfoOffset(),
is_static,
method_idx,
- new_local);
+ std::forward<NewLocalVisitor>(new_local));
}
template <typename Visitor>
-inline uint32_t CodeItemDebugInfoAccessor::VisitParameterNames(const Visitor& visitor) const {
+inline uint32_t CodeItemDebugInfoAccessor::VisitParameterNames(Visitor&& visitor) const {
const uint8_t* stream = dex_file_->GetDebugInfoStream(DebugInfoOffset());
- return (stream != nullptr) ? DexFile::DecodeDebugInfoParameterNames(&stream, visitor) : 0u;
+ return (stream != nullptr) ?
+ DexFile::DecodeDebugInfoParameterNames(&stream, std::forward<Visitor>(visitor)) :
+ 0u;
}
inline bool CodeItemDebugInfoAccessor::GetLineNumForPc(const uint32_t address,
@@ -233,13 +235,13 @@ inline bool CodeItemDebugInfoAccessor::GetLineNumForPc(const uint32_t address,
}
template <typename Visitor>
-inline bool CodeItemDebugInfoAccessor::DecodeDebugPositionInfo(const Visitor& visitor) const {
+inline bool CodeItemDebugInfoAccessor::DecodeDebugPositionInfo(Visitor&& visitor) const {
return dex_file_->DecodeDebugPositionInfo(
dex_file_->GetDebugInfoStream(DebugInfoOffset()),
[this](uint32_t idx) {
return dex_file_->StringDataByIdx(dex::StringIndex(idx));
},
- visitor);
+ std::forward<Visitor>(visitor));
}
} // namespace art
diff --git a/libdexfile/dex/code_item_accessors.h b/libdexfile/dex/code_item_accessors.h
index 24296c8f1d..5952b2d7ea 100644
--- a/libdexfile/dex/code_item_accessors.h
+++ b/libdexfile/dex/code_item_accessors.h
@@ -164,15 +164,15 @@ class CodeItemDebugInfoAccessor : public CodeItemDataAccessor {
template<typename NewLocalVisitor>
bool DecodeDebugLocalInfo(bool is_static,
uint32_t method_idx,
- const NewLocalVisitor& new_local) const;
+ NewLocalVisitor&& new_local) const;
// Visit each parameter in the debug information. Returns the line number.
// The argument of the Visitor is dex::StringIndex.
template <typename Visitor>
- uint32_t VisitParameterNames(const Visitor& visitor) const;
+ uint32_t VisitParameterNames(Visitor&& visitor) const;
template <typename Visitor>
- bool DecodeDebugPositionInfo(const Visitor& visitor) const;
+ bool DecodeDebugPositionInfo(Visitor&& visitor) const;
bool GetLineNumForPc(const uint32_t pc, uint32_t* line_num) const;
diff --git a/libdexfile/dex/dex_file-inl.h b/libdexfile/dex/dex_file-inl.h
index 9291f6ee9e..b01b004e5b 100644
--- a/libdexfile/dex/dex_file-inl.h
+++ b/libdexfile/dex/dex_file-inl.h
@@ -427,8 +427,8 @@ bool DexFile::DecodeDebugLocalInfo(uint32_t registers_size,
template<typename DexDebugNewPosition, typename IndexToStringData>
bool DexFile::DecodeDebugPositionInfo(const uint8_t* stream,
- const IndexToStringData& index_to_string_data,
- const DexDebugNewPosition& position_functor) {
+ IndexToStringData&& index_to_string_data,
+ DexDebugNewPosition&& position_functor) {
if (stream == nullptr) {
return false;
}
@@ -514,7 +514,7 @@ inline IterationRange<ClassIterator> DexFile::GetClasses() const {
// Returns the line number
template <typename Visitor>
inline uint32_t DexFile::DecodeDebugInfoParameterNames(const uint8_t** debug_info,
- const Visitor& visitor) {
+ Visitor&& visitor) {
uint32_t line = DecodeUnsignedLeb128(debug_info);
const uint32_t parameters_size = DecodeUnsignedLeb128(debug_info);
for (uint32_t i = 0; i < parameters_size; ++i) {
diff --git a/libdexfile/dex/dex_file.h b/libdexfile/dex/dex_file.h
index a7d288120d..6bc6e4f4ca 100644
--- a/libdexfile/dex/dex_file.h
+++ b/libdexfile/dex/dex_file.h
@@ -787,8 +787,8 @@ class DexFile {
// Returns false if there is no debugging information or if it cannot be decoded.
template<typename DexDebugNewPosition, typename IndexToStringData>
static bool DecodeDebugPositionInfo(const uint8_t* stream,
- const IndexToStringData& index_to_string_data,
- const DexDebugNewPosition& position_functor);
+ IndexToStringData&& index_to_string_data,
+ DexDebugNewPosition&& position_functor);
const char* GetSourceFile(const dex::ClassDef& class_def) const {
if (!class_def.source_file_idx_.IsValid()) {
@@ -893,7 +893,7 @@ class DexFile {
template <typename Visitor>
static uint32_t DecodeDebugInfoParameterNames(const uint8_t** debug_info,
- const Visitor& visitor);
+ Visitor&& visitor);
static inline bool StringEquals(const DexFile* df1, dex::StringIndex sidx1,
const DexFile* df2, dex::StringIndex sidx2);
@@ -935,7 +935,17 @@ class DexFile {
// This is different to the "data section" in the standard dex header.
ArrayRef<const uint8_t> const data_;
- // Typically the dex file name when available, alternatively some identifying string.
+ // The full absolute path to the dex file, if it was loaded from disk.
+ //
+ // Can also be a path to a multidex container (typically apk), followed by
+ // DexFileLoader.kMultiDexSeparator (i.e. '!') and the file inside the
+ // container.
+ //
+ // On host this may not be an absolute path.
+ //
+ // On device libnativeloader uses this to determine the location of the java
+ // package or shared library, which decides where to load native libraries
+ // from.
//
// The ClassLinker will use this to match DexFiles the boot class
// path to DexCache::GetLocation when loading from an image.
diff --git a/libdexfile/dex/dex_file_loader.h b/libdexfile/dex/dex_file_loader.h
index 6530303542..ec7f8ba977 100644
--- a/libdexfile/dex/dex_file_loader.h
+++ b/libdexfile/dex/dex_file_loader.h
@@ -327,6 +327,17 @@ class DexFileLoader {
const File* file_ = &kInvalidFile;
std::optional<File> owned_file_; // May be used as backing storage for 'file_'.
std::shared_ptr<DexFileContainer> root_container_;
+
+ // The full absolute path to the dex file, if it was loaded from disk.
+ //
+ // Can also be a path to a multidex container (typically apk), followed by
+ // kMultiDexSeparator and the file inside the container.
+ //
+ // On host this may not be an absolute path.
+ //
+ // On device libnativeloader uses this to determine the location of the java
+ // package or shared library, which decides where to load native libraries
+ // from.
const std::string location_;
};
diff --git a/libdexfile/dex/dex_file_verifier.cc b/libdexfile/dex/dex_file_verifier.cc
index 8c25b5a4cf..ba3472015d 100644
--- a/libdexfile/dex/dex_file_verifier.cc
+++ b/libdexfile/dex/dex_file_verifier.cc
@@ -20,11 +20,11 @@
#include <bitset>
#include <limits>
#include <memory>
+#include <stack>
#include "android-base/logging.h"
#include "android-base/macros.h"
#include "android-base/stringprintf.h"
-
#include "base/hash_map.h"
#include "base/leb128.h"
#include "base/safe_map.h"
@@ -291,9 +291,21 @@ class DexFileVerifier {
bool CheckStaticFieldTypes(const dex::ClassDef& class_def);
bool CheckPadding(uint32_t aligned_offset, DexFile::MapItemType type);
+
+ // The encoded values, arrays and annotations are allowed to be very deeply nested,
+ // so use heap todo-list instead of stack recursion (the work is done in LIFO order).
+ struct ToDoItem {
+ uint32_t array_size = 0; // CheckArrayElement.
+ uint32_t annotation_size = 0; // CheckAnnotationElement.
+ uint32_t last_idx = kDexNoIndex; // CheckAnnotationElement.
+ };
+ using ToDoList = std::stack<ToDoItem>;
bool CheckEncodedValue();
bool CheckEncodedArray();
+ bool CheckArrayElement();
bool CheckEncodedAnnotation();
+ bool CheckAnnotationElement(/*inout*/ uint32_t* last_idx);
+ bool FlushToDoList();
bool CheckIntraTypeIdItem();
bool CheckIntraProtoIdItem();
@@ -454,6 +466,9 @@ class DexFileVerifier {
// Class definition indexes, valid only if corresponding `defined_classes_[.]` is true.
std::vector<uint16_t> defined_class_indexes_;
+
+ // Used by CheckEncodedValue to avoid recursion. Field so we can reuse allocated memory.
+ ToDoList todo_;
};
template <typename ExtraCheckFn>
@@ -1184,12 +1199,15 @@ bool DexFileVerifier::CheckEncodedValue() {
bool DexFileVerifier::CheckEncodedArray() {
DECODE_UNSIGNED_CHECKED_FROM(ptr_, size);
+ todo_.emplace(ToDoItem{.array_size = size});
+ return true;
+}
- for (; size != 0u; --size) {
- if (!CheckEncodedValue()) {
- failure_reason_ = StringPrintf("Bad encoded_array value: %s", failure_reason_.c_str());
- return false;
- }
+// Always called directly from FlushToDoList, which avoids recursion.
+bool DexFileVerifier::CheckArrayElement() {
+ if (!CheckEncodedValue()) {
+ failure_reason_ = StringPrintf("Bad encoded_array value: %s", failure_reason_.c_str());
+ return false;
}
return true;
}
@@ -1201,25 +1219,44 @@ bool DexFileVerifier::CheckEncodedAnnotation() {
}
DECODE_UNSIGNED_CHECKED_FROM(ptr_, size);
- uint32_t last_idx = 0;
+ todo_.emplace(ToDoItem{.annotation_size = size, .last_idx = kDexNoIndex});
+ return true;
+}
- for (uint32_t i = 0; i < size; i++) {
- DECODE_UNSIGNED_CHECKED_FROM(ptr_, idx);
- if (!CheckIndex(idx, header_->string_ids_size_, "annotation_element name_idx")) {
- return false;
- }
+// Always called directly from FlushToDoList, which avoids recursion.
+bool DexFileVerifier::CheckAnnotationElement(/*inout*/ uint32_t* last_idx) {
+ DECODE_UNSIGNED_CHECKED_FROM(ptr_, idx);
+ if (!CheckIndex(idx, header_->string_ids_size_, "annotation_element name_idx")) {
+ return false;
+ }
- if (UNLIKELY(last_idx >= idx && i != 0)) {
- ErrorStringPrintf("Out-of-order annotation_element name_idx: %x then %x",
- last_idx, idx);
- return false;
- }
+ if (UNLIKELY(*last_idx >= idx && *last_idx != kDexNoIndex)) {
+ ErrorStringPrintf("Out-of-order annotation_element name_idx: %x then %x", *last_idx, idx);
+ return false;
+ }
+ *last_idx = idx;
- if (!CheckEncodedValue()) {
- return false;
- }
+ return CheckEncodedValue();
+}
- last_idx = idx;
+// Keep processing the rest of the to-do list until we are finished or encounter an error.
+bool DexFileVerifier::FlushToDoList() {
+ while (!todo_.empty()) {
+ ToDoItem& item = todo_.top();
+ DCHECK(item.array_size == 0u || item.annotation_size == 0u);
+ if (item.array_size > 0) {
+ item.array_size--;
+ if (!CheckArrayElement()) {
+ return false;
+ }
+ } else if (item.annotation_size > 0) {
+ item.annotation_size--;
+ if (!CheckAnnotationElement(&item.last_idx)) {
+ return false;
+ }
+ } else {
+ todo_.pop();
+ }
}
return true;
}
@@ -1970,7 +2007,8 @@ bool DexFileVerifier::CheckIntraAnnotationItem() {
return false;
}
- if (!CheckEncodedAnnotation()) {
+ CHECK(todo_.empty());
+ if (!CheckEncodedAnnotation() || !FlushToDoList()) {
return false;
}
@@ -2222,7 +2260,8 @@ bool DexFileVerifier::CheckIntraSectionIterate(uint32_t section_count) {
break;
}
case DexFile::kDexTypeEncodedArrayItem: {
- if (!CheckEncodedArray()) {
+ CHECK(todo_.empty());
+ if (!CheckEncodedArray() || !FlushToDoList()) {
return false;
}
break;
@@ -3465,6 +3504,7 @@ bool DexFileVerifier::Verify() {
return false;
}
+ CHECK(todo_.empty()); // No unprocessed work left over.
return true;
}
diff --git a/libelffile/elf/elf_debug_reader.h b/libelffile/elf/elf_debug_reader.h
index fc7ad5654b..05aa339c13 100644
--- a/libelffile/elf/elf_debug_reader.h
+++ b/libelffile/elf/elf_debug_reader.h
@@ -119,7 +119,7 @@ class ElfDebugReader {
}
template <typename VisitSym>
- void VisitFunctionSymbols(VisitSym visit_sym) {
+ void VisitFunctionSymbols(VisitSym&& visit_sym) {
const Elf_Shdr* symtab = GetSection(".symtab");
const Elf_Shdr* strtab = GetSection(".strtab");
const Elf_Shdr* text = GetSection(".text");
@@ -135,12 +135,12 @@ class ElfDebugReader {
}
}
if (gnu_debugdata_reader_ != nullptr) {
- gnu_debugdata_reader_->VisitFunctionSymbols(visit_sym);
+ gnu_debugdata_reader_->VisitFunctionSymbols(std::forward<VisitSym>(visit_sym));
}
}
template <typename VisitSym>
- void VisitDynamicSymbols(VisitSym visit_sym) {
+ void VisitDynamicSymbols(VisitSym&& visit_sym) {
const Elf_Shdr* dynsym = GetSection(".dynsym");
const Elf_Shdr* dynstr = GetSection(".dynstr");
if (dynsym != nullptr && dynstr != nullptr) {
@@ -153,7 +153,7 @@ class ElfDebugReader {
}
template <typename VisitCIE, typename VisitFDE>
- void VisitDebugFrame(VisitCIE visit_cie, VisitFDE visit_fde) {
+ void VisitDebugFrame(VisitCIE&& visit_cie, VisitFDE&& visit_fde) {
const Elf_Shdr* debug_frame = GetSection(".debug_frame");
if (debug_frame != nullptr) {
for (size_t offset = 0; offset < debug_frame->sh_size;) {
@@ -169,7 +169,8 @@ class ElfDebugReader {
}
}
if (gnu_debugdata_reader_ != nullptr) {
- gnu_debugdata_reader_->VisitDebugFrame(visit_cie, visit_fde);
+ gnu_debugdata_reader_->VisitDebugFrame(std::forward<VisitCIE>(visit_cie),
+ std::forward<VisitFDE>(visit_fde));
}
}
diff --git a/libnativeloader/Android.bp b/libnativeloader/Android.bp
index 16449ac745..e9c26c592c 100644
--- a/libnativeloader/Android.bp
+++ b/libnativeloader/Android.bp
@@ -158,6 +158,7 @@ art_cc_test {
"native_loader_test.cpp",
],
srcs: [
+ "library_namespaces_test.cpp",
"native_loader_api_test.c",
"native_loader_test.cpp",
"open_system_library.cpp",
diff --git a/libnativeloader/library_namespaces.cpp b/libnativeloader/library_namespaces.cpp
index 1e29f4e457..e2b27294f9 100644
--- a/libnativeloader/library_namespaces.cpp
+++ b/libnativeloader/library_namespaces.cpp
@@ -16,23 +16,28 @@
#if defined(ART_TARGET_ANDROID)
+#define LOG_TAG "nativeloader"
+
#include "library_namespaces.h"
#include <dirent.h>
#include <dlfcn.h>
+#include <stdio.h>
+#include <algorithm>
+#include <optional>
#include <regex>
#include <string>
+#include <string_view>
#include <vector>
-#include <android-base/file.h>
-#include <android-base/logging.h>
-#include <android-base/macros.h>
-#include <android-base/result.h>
-#include <android-base/strings.h>
-#include <android-base/stringprintf.h>
-#include <nativehelper/scoped_utf_chars.h>
-
+#include "android-base/file.h"
+#include "android-base/logging.h"
+#include "android-base/macros.h"
+#include "android-base/result.h"
+#include "android-base/stringprintf.h"
+#include "android-base/strings.h"
+#include "nativehelper/scoped_utf_chars.h"
#include "nativeloader/dlext_namespaces.h"
#include "public_libraries.h"
#include "utils.h"
@@ -41,20 +46,9 @@ namespace android::nativeloader {
namespace {
-constexpr const char* kApexPath = "/apex/";
+using ::android::base::Error;
-// The device may be configured to have the vendor libraries loaded to a separate namespace.
-// For historical reasons this namespace was named sphal but effectively it is intended
-// to use to load vendor libraries to separate namespace with controlled interface between
-// vendor and system namespaces.
-constexpr const char* kVendorNamespaceName = "sphal";
-// Similar to sphal namespace, product namespace provides some product libraries.
-constexpr const char* kProductNamespaceName = "product";
-
-// vndk namespace for unbundled vendor apps
-constexpr const char* kVndkNamespaceName = "vndk";
-// vndk_product namespace for unbundled product apps
-constexpr const char* kVndkProductNamespaceName = "vndk_product";
+constexpr const char* kApexPath = "/apex/";
// clns-XX is a linker namespace that is created for normal apps installed in
// the data partition. To be specific, it is created for the app classloader.
@@ -89,15 +83,8 @@ constexpr const char* kVendorLibPath = "/vendor/" LIB;
// a symlink to the other.
constexpr const char* kProductLibPath = "/product/" LIB ":/system/product/" LIB;
-const std::regex kVendorDexPathRegex("(^|:)(/system)?/vendor/");
-const std::regex kProductDexPathRegex("(^|:)(/system)?/product/");
-
-// Define origin partition of APK
-using ApkOrigin = enum {
- APK_ORIGIN_DEFAULT = 0,
- APK_ORIGIN_VENDOR = 1, // Includes both /vendor and /system/vendor
- APK_ORIGIN_PRODUCT = 2, // Includes both /product and /system/product
-};
+const std::regex kVendorPathRegex("(/system)?/vendor/.*");
+const std::regex kProductPathRegex("(/system)?/product/.*");
jobject GetParentClassLoader(JNIEnv* env, jobject class_loader) {
jclass class_loader_class = env->FindClass("java/lang/ClassLoader");
@@ -107,22 +94,44 @@ jobject GetParentClassLoader(JNIEnv* env, jobject class_loader) {
return env->CallObjectMethod(class_loader, get_parent);
}
-ApkOrigin GetApkOriginFromDexPath(const std::string& dex_path) {
- ApkOrigin apk_origin = APK_ORIGIN_DEFAULT;
- if (std::regex_search(dex_path, kVendorDexPathRegex)) {
- apk_origin = APK_ORIGIN_VENDOR;
- }
- if (std::regex_search(dex_path, kProductDexPathRegex)) {
- LOG_ALWAYS_FATAL_IF(apk_origin == APK_ORIGIN_VENDOR,
- "Dex path contains both vendor and product partition : %s",
- dex_path.c_str());
+} // namespace
- apk_origin = APK_ORIGIN_PRODUCT;
+ApiDomain GetApiDomainFromPath(const std::string_view path) {
+ if (std::regex_match(path.begin(), path.end(), kVendorPathRegex)) {
+ return API_DOMAIN_VENDOR;
+ }
+ if (is_product_treblelized() && std::regex_match(path.begin(), path.end(), kProductPathRegex)) {
+ return API_DOMAIN_PRODUCT;
}
- return apk_origin;
+ return API_DOMAIN_DEFAULT;
}
-} // namespace
+// Returns the API domain for a ':'-separated list of paths, or an error if they
+// match more than one.
+Result<ApiDomain> GetApiDomainFromPathList(const std::string& path_list) {
+ ApiDomain result = API_DOMAIN_DEFAULT;
+ size_t start_pos = 0;
+ while (true) {
+ size_t end_pos = path_list.find(':', start_pos);
+ ApiDomain api_domain =
+ GetApiDomainFromPath(std::string_view(path_list).substr(start_pos, end_pos));
+ // Allow mixing API_DOMAIN_DEFAULT with any other domain. That's a bit lax,
+ // since the default e.g. includes /data, which strictly speaking is a
+ // separate domain. However, we keep it this way to not risk compat issues
+ // until we actually need all domains.
+ if (api_domain != API_DOMAIN_DEFAULT) {
+ if (result != API_DOMAIN_DEFAULT && result != api_domain) {
+ return Error() << "Path list crosses partition boundaries: " << path_list;
+ }
+ result = api_domain;
+ }
+ if (end_pos == std::string::npos) {
+ break;
+ }
+ start_pos = end_pos + 1;
+ }
+ return result;
+}
void LibraryNamespaces::Initialize() {
// Once public namespace is initialized there is no
@@ -170,7 +179,7 @@ static const std::string filter_public_libraries(
}
std::vector<std::string> filtered;
std::vector<std::string> orig = android::base::Split(public_libraries, ":");
- for (const auto& lib : uses_libraries) {
+ for (const std::string& lib : uses_libraries) {
if (std::find(orig.begin(), orig.end(), lib) != orig.end()) {
filtered.emplace_back(lib);
}
@@ -178,32 +187,29 @@ static const std::string filter_public_libraries(
return android::base::Join(filtered, ":");
}
-Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t target_sdk_version,
- jobject class_loader, bool is_shared,
- jstring dex_path_j,
- jstring java_library_path,
- jstring java_permitted_path,
- jstring uses_library_list) {
+Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env,
+ uint32_t target_sdk_version,
+ jobject class_loader,
+ ApiDomain api_domain,
+ bool is_shared,
+ const std::string& dex_path,
+ jstring library_path_j,
+ jstring permitted_path_j,
+ jstring uses_library_list_j) {
std::string library_path; // empty string by default.
- std::string dex_path;
- if (java_library_path != nullptr) {
- ScopedUtfChars library_path_utf_chars(env, java_library_path);
+ if (library_path_j != nullptr) {
+ ScopedUtfChars library_path_utf_chars(env, library_path_j);
library_path = library_path_utf_chars.c_str();
}
- if (dex_path_j != nullptr) {
- ScopedUtfChars dex_path_chars(env, dex_path_j);
- dex_path = dex_path_chars.c_str();
- }
-
std::vector<std::string> uses_libraries;
- if (uses_library_list != nullptr) {
- ScopedUtfChars names(env, uses_library_list);
+ if (uses_library_list_j != nullptr) {
+ ScopedUtfChars names(env, uses_library_list_j);
uses_libraries = android::base::Split(names.c_str(), ":");
} else {
- // uses_library_list could be nullptr when System.loadLibrary is called from a
- // custom classloader. In that case, we don't know the list of public
+ // uses_library_list_j could be nullptr when System.loadLibrary is called
+ // from a custom classloader. In that case, we don't know the list of public
// libraries because we don't know which apk the classloader is for. Only
// choices we can have are 1) allowing all public libs (as before), or 2)
// not allowing all but NDK libs. Here we take #1 because #2 would surprise
@@ -214,8 +220,6 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
uses_libraries.emplace_back(LIBRARY_ALL);
}
- ApkOrigin apk_origin = GetApkOriginFromDexPath(dex_path);
-
// (http://b/27588281) This is a workaround for apps using custom
// classloaders and calling System.load() with an absolute path which
// is outside of the classloader library search path.
@@ -224,8 +228,8 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
// under /data and /mnt/expand
std::string permitted_path = kAlwaysPermittedDirectories;
- if (java_permitted_path != nullptr) {
- ScopedUtfChars path(env, java_permitted_path);
+ if (permitted_path_j != nullptr) {
+ ScopedUtfChars path(env, permitted_path_j);
if (path.c_str() != nullptr && path.size() > 0) {
permitted_path = permitted_path + ":" + path.c_str();
}
@@ -236,13 +240,13 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
std::string system_exposed_libraries = default_public_libraries();
std::string namespace_name = kClassloaderNamespaceName;
- ApkOrigin unbundled_app_origin = APK_ORIGIN_DEFAULT;
- const char* apk_origin_msg = "other apk"; // Only for debug logging.
+ ApiDomain unbundled_app_domain = API_DOMAIN_DEFAULT;
+ const char* api_domain_msg = "other apk"; // Only for debug logging.
if (!is_shared) {
- if (apk_origin == APK_ORIGIN_VENDOR) {
- unbundled_app_origin = APK_ORIGIN_VENDOR;
- apk_origin_msg = "unbundled vendor apk";
+ if (api_domain == API_DOMAIN_VENDOR) {
+ unbundled_app_domain = API_DOMAIN_VENDOR;
+ api_domain_msg = "unbundled vendor apk";
// For vendor apks, give access to the vendor libs even though they are
// treated as unbundled; the libs and apks are still bundled together in the
@@ -255,9 +259,9 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
// Different name is useful for debugging
namespace_name = kVendorClassloaderNamespaceName;
- } else if (apk_origin == APK_ORIGIN_PRODUCT && is_product_treblelized()) {
- unbundled_app_origin = APK_ORIGIN_PRODUCT;
- apk_origin_msg = "unbundled product apk";
+ } else if (api_domain == API_DOMAIN_PRODUCT) {
+ unbundled_app_domain = API_DOMAIN_PRODUCT;
+ api_domain_msg = "unbundled product apk";
// Like for vendor apks, give access to the product libs since they are
// bundled together in the same partition.
@@ -287,20 +291,20 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
"Configuring %s for %s %s. target_sdk_version=%u, uses_libraries=%s, library_path=%s, "
"permitted_path=%s",
namespace_name.c_str(),
- apk_origin_msg,
+ api_domain_msg,
dex_path.c_str(),
static_cast<unsigned>(target_sdk_version),
android::base::Join(uses_libraries, ':').c_str(),
library_path.c_str(),
permitted_path.c_str());
- if (unbundled_app_origin != APK_ORIGIN_VENDOR) {
+ if (unbundled_app_domain != API_DOMAIN_VENDOR) {
// Extended public libraries are NOT available to unbundled vendor apks, but
// they are to other apps, including those in system, system_ext, and
// product partitions. The reason is that when GSI is used, the system
// partition may get replaced, and then vendor apps may fail. It's fine for
// product apps, because that partition isn't mounted in GSI tests.
- auto libs =
+ const std::string libs =
filter_public_libraries(target_sdk_version, uses_libraries, extended_public_libraries());
if (!libs.empty()) {
ALOGD("Extending system_exposed_libraries: %s", libs.c_str());
@@ -320,27 +324,33 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
bool also_used_as_anonymous = is_main_classloader;
// Note: this function is executed with g_namespaces_mutex held, thus no
// racing here.
- auto app_ns = NativeLoaderNamespace::Create(
- namespace_name, library_path, permitted_path, parent_ns, is_shared,
- target_sdk_version < 24 /* is_exempt_list_enabled */, also_used_as_anonymous);
+ Result<NativeLoaderNamespace> app_ns =
+ NativeLoaderNamespace::Create(namespace_name,
+ library_path,
+ permitted_path,
+ parent_ns,
+ is_shared,
+ target_sdk_version < 24 /* is_exempt_list_enabled */,
+ also_used_as_anonymous);
if (!app_ns.ok()) {
return app_ns.error();
}
// ... and link to other namespaces to allow access to some public libraries
bool is_bridged = app_ns->IsBridged();
- auto system_ns = NativeLoaderNamespace::GetSystemNamespace(is_bridged);
+ Result<NativeLoaderNamespace> system_ns = NativeLoaderNamespace::GetSystemNamespace(is_bridged);
if (!system_ns.ok()) {
return system_ns.error();
}
- auto linked = app_ns->Link(&system_ns.value(), system_exposed_libraries);
+ Result<void> linked = app_ns->Link(&system_ns.value(), system_exposed_libraries);
if (!linked.ok()) {
return linked.error();
}
for (const auto&[apex_ns_name, public_libs] : apex_public_libraries()) {
- auto ns = NativeLoaderNamespace::GetExportedNamespace(apex_ns_name, is_bridged);
+ Result<NativeLoaderNamespace> ns =
+ NativeLoaderNamespace::GetExportedNamespace(apex_ns_name, is_bridged);
// Even if APEX namespace is visible, it may not be available to bridged.
if (ns.ok()) {
linked = app_ns->Link(&ns.value(), public_libs);
@@ -351,8 +361,9 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
}
// Give access to VNDK-SP libraries from the 'vndk' namespace for unbundled vendor apps.
- if (unbundled_app_origin == APK_ORIGIN_VENDOR && !vndksp_libraries_vendor().empty()) {
- auto vndk_ns = NativeLoaderNamespace::GetExportedNamespace(kVndkNamespaceName, is_bridged);
+ if (unbundled_app_domain == API_DOMAIN_VENDOR && !vndksp_libraries_vendor().empty()) {
+ Result<NativeLoaderNamespace> vndk_ns =
+ NativeLoaderNamespace::GetExportedNamespace(kVndkNamespaceName, is_bridged);
if (vndk_ns.ok()) {
linked = app_ns->Link(&vndk_ns.value(), vndksp_libraries_vendor());
if (!linked.ok()) {
@@ -362,8 +373,9 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
}
// Give access to VNDK-SP libraries from the 'vndk_product' namespace for unbundled product apps.
- if (unbundled_app_origin == APK_ORIGIN_PRODUCT && !vndksp_libraries_product().empty()) {
- auto vndk_ns = NativeLoaderNamespace::GetExportedNamespace(kVndkProductNamespaceName, is_bridged);
+ if (unbundled_app_domain == API_DOMAIN_PRODUCT && !vndksp_libraries_product().empty()) {
+ Result<NativeLoaderNamespace> vndk_ns =
+ NativeLoaderNamespace::GetExportedNamespace(kVndkProductNamespaceName, is_bridged);
if (vndk_ns.ok()) {
linked = app_ns->Link(&vndk_ns.value(), vndksp_libraries_product());
if (!linked.ok()) {
@@ -373,11 +385,12 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
}
for (const std::string& each_jar_path : android::base::Split(dex_path, ":")) {
- auto apex_ns_name = FindApexNamespaceName(each_jar_path);
- if (apex_ns_name.ok()) {
- const auto& jni_libs = apex_jni_libraries(*apex_ns_name);
+ std::optional<std::string> apex_ns_name = FindApexNamespaceName(each_jar_path);
+ if (apex_ns_name.has_value()) {
+ const std::string& jni_libs = apex_jni_libraries(apex_ns_name.value());
if (jni_libs != "") {
- auto apex_ns = NativeLoaderNamespace::GetExportedNamespace(*apex_ns_name, is_bridged);
+ Result<NativeLoaderNamespace> apex_ns =
+ NativeLoaderNamespace::GetExportedNamespace(apex_ns_name.value(), is_bridged);
if (apex_ns.ok()) {
linked = app_ns->Link(&apex_ns.value(), jni_libs);
if (!linked.ok()) {
@@ -388,12 +401,13 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
}
}
- auto vendor_libs = filter_public_libraries(target_sdk_version, uses_libraries,
- vendor_public_libraries());
+ const std::string vendor_libs =
+ filter_public_libraries(target_sdk_version, uses_libraries, vendor_public_libraries());
if (!vendor_libs.empty()) {
- auto vendor_ns = NativeLoaderNamespace::GetExportedNamespace(kVendorNamespaceName, is_bridged);
+ Result<NativeLoaderNamespace> vendor_ns =
+ NativeLoaderNamespace::GetExportedNamespace(kVendorNamespaceName, is_bridged);
// when vendor_ns is not configured, link to the system namespace
- auto target_ns = vendor_ns.ok() ? vendor_ns : system_ns;
+ Result<NativeLoaderNamespace> target_ns = vendor_ns.ok() ? vendor_ns : system_ns;
if (target_ns.ok()) {
linked = app_ns->Link(&target_ns.value(), vendor_libs);
if (!linked.ok()) {
@@ -402,10 +416,10 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
}
}
- auto product_libs = filter_public_libraries(target_sdk_version, uses_libraries,
- product_public_libraries());
+ const std::string product_libs =
+ filter_public_libraries(target_sdk_version, uses_libraries, product_public_libraries());
if (!product_libs.empty()) {
- auto target_ns = system_ns;
+ Result<NativeLoaderNamespace> target_ns = system_ns;
if (is_product_treblelized()) {
target_ns = NativeLoaderNamespace::GetExportedNamespace(kProductNamespaceName, is_bridged);
}
@@ -422,8 +436,8 @@ Result<NativeLoaderNamespace*> LibraryNamespaces::Create(JNIEnv* env, uint32_t t
}
}
- auto& emplaced = namespaces_.emplace_back(
- std::make_pair(env->NewWeakGlobalRef(class_loader), *app_ns));
+ std::pair<jweak, NativeLoaderNamespace>& emplaced =
+ namespaces_.emplace_back(std::make_pair(env->NewWeakGlobalRef(class_loader), *app_ns));
if (is_main_classloader) {
app_main_namespace_ = &emplaced.second;
}
@@ -459,7 +473,7 @@ NativeLoaderNamespace* LibraryNamespaces::FindParentNamespaceByClassLoader(JNIEn
return nullptr;
}
-base::Result<std::string> FindApexNamespaceName(const std::string& location) {
+std::optional<std::string> FindApexNamespaceName(const std::string& location) {
// Lots of implicit assumptions here: we expect `location` to be of the form:
// /apex/modulename/...
//
@@ -473,7 +487,7 @@ base::Result<std::string> FindApexNamespaceName(const std::string& location) {
std::replace(name.begin(), name.end(), '.', '_');
return name;
}
- return base::Error();
+ return std::nullopt;
}
} // namespace android::nativeloader
diff --git a/libnativeloader/library_namespaces.h b/libnativeloader/library_namespaces.h
index 4871528f47..ae1cd88f20 100644
--- a/libnativeloader/library_namespaces.h
+++ b/libnativeloader/library_namespaces.h
@@ -21,20 +21,43 @@
#error "Not available for host or linux target"
#endif
-#define LOG_TAG "nativeloader"
-
-#include "native_loader_namespace.h"
-
#include <list>
+#include <optional>
#include <string>
+#include <string_view>
-#include <android-base/result.h>
-#include <jni.h>
+#include "android-base/result.h"
+#include "jni.h"
+#include "native_loader_namespace.h"
namespace android::nativeloader {
using android::base::Result;
+// The device may be configured to have the vendor libraries loaded to a separate namespace.
+// For historical reasons this namespace was named sphal but effectively it is intended
+// to use to load vendor libraries to separate namespace with controlled interface between
+// vendor and system namespaces.
+constexpr const char* kVendorNamespaceName = "sphal";
+// Similar to sphal namespace, product namespace provides some product libraries.
+constexpr const char* kProductNamespaceName = "product";
+
+// vndk namespace for unbundled vendor apps
+constexpr const char* kVndkNamespaceName = "vndk";
+// vndk_product namespace for unbundled product apps
+constexpr const char* kVndkProductNamespaceName = "vndk_product";
+
+// API domains, roughly corresponding to partitions. Interdependencies between
+// these must follow API restrictions, while intradependencies do not.
+using ApiDomain = enum {
+ API_DOMAIN_DEFAULT = 0, // Locations other than those below, in particular for ordinary apps
+ API_DOMAIN_VENDOR = 1, // Vendor partition
+ API_DOMAIN_PRODUCT = 2, // Product partition
+};
+
+ApiDomain GetApiDomainFromPath(const std::string_view path);
+Result<ApiDomain> GetApiDomainFromPathList(const std::string& path_list);
+
// LibraryNamespaces is a singleton object that manages NativeLoaderNamespace
// objects for an app process. Its main job is to create (and configure) a new
// NativeLoaderNamespace object for a Java ClassLoader, and to find an existing
@@ -53,10 +76,15 @@ class LibraryNamespaces {
initialized_ = false;
app_main_namespace_ = nullptr;
}
- Result<NativeLoaderNamespace*> Create(JNIEnv* env, uint32_t target_sdk_version,
- jobject class_loader, bool is_shared, jstring dex_path,
- jstring java_library_path, jstring java_permitted_path,
- jstring uses_library_list);
+ Result<NativeLoaderNamespace*> Create(JNIEnv* env,
+ uint32_t target_sdk_version,
+ jobject class_loader,
+ ApiDomain api_domain,
+ bool is_shared,
+ const std::string& dex_path,
+ jstring library_path_j,
+ jstring permitted_path_j,
+ jstring uses_library_list_j);
NativeLoaderNamespace* FindNamespaceByClassLoader(JNIEnv* env, jobject class_loader);
private:
@@ -68,7 +96,7 @@ class LibraryNamespaces {
std::list<std::pair<jweak, NativeLoaderNamespace>> namespaces_;
};
-Result<std::string> FindApexNamespaceName(const std::string& location);
+std::optional<std::string> FindApexNamespaceName(const std::string& location);
} // namespace android::nativeloader
diff --git a/libnativeloader/library_namespaces_test.cpp b/libnativeloader/library_namespaces_test.cpp
new file mode 100644
index 0000000000..7780418846
--- /dev/null
+++ b/libnativeloader/library_namespaces_test.cpp
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(ART_TARGET_ANDROID)
+
+#include "library_namespaces.h"
+
+#include "android-base/result-gmock.h"
+#include "gtest/gtest.h"
+#include "public_libraries.h"
+
+namespace android {
+namespace nativeloader {
+namespace {
+
+using ::android::base::testing::HasError;
+using ::android::base::testing::HasValue;
+using ::android::base::testing::WithMessage;
+using ::testing::StartsWith;
+
+TEST(LibraryNamespacesTest, TestGetApiDomainFromPath) {
+ // GetApiDomainFromPath returns API_DOMAIN_PRODUCT only if the device is
+ // trebleized and has an unbundled product partition.
+ ApiDomain api_domain_product = is_product_treblelized() ? API_DOMAIN_PRODUCT : API_DOMAIN_DEFAULT;
+
+ EXPECT_EQ(GetApiDomainFromPath("/data/somewhere"), API_DOMAIN_DEFAULT);
+ EXPECT_EQ(GetApiDomainFromPath("/system/somewhere"), API_DOMAIN_DEFAULT);
+ EXPECT_EQ(GetApiDomainFromPath("/product/somewhere"), api_domain_product);
+ EXPECT_EQ(GetApiDomainFromPath("/vendor/somewhere"), API_DOMAIN_VENDOR);
+ EXPECT_EQ(GetApiDomainFromPath("/system/product/somewhere"), api_domain_product);
+ EXPECT_EQ(GetApiDomainFromPath("/system/vendor/somewhere"), API_DOMAIN_VENDOR);
+
+ EXPECT_EQ(GetApiDomainFromPath(""), API_DOMAIN_DEFAULT);
+ EXPECT_EQ(GetApiDomainFromPath("/"), API_DOMAIN_DEFAULT);
+ EXPECT_EQ(GetApiDomainFromPath("product/somewhere"), API_DOMAIN_DEFAULT);
+ EXPECT_EQ(GetApiDomainFromPath("/product"), API_DOMAIN_DEFAULT);
+ EXPECT_EQ(GetApiDomainFromPath("/product/"), api_domain_product);
+ EXPECT_EQ(GetApiDomainFromPath(":/product/"), API_DOMAIN_DEFAULT);
+
+ EXPECT_EQ(GetApiDomainFromPath("/data/somewhere:/product/somewhere"), API_DOMAIN_DEFAULT);
+ EXPECT_EQ(GetApiDomainFromPath("/vendor/somewhere:/product/somewhere"), API_DOMAIN_VENDOR);
+ EXPECT_EQ(GetApiDomainFromPath("/product/somewhere:/vendor/somewhere"), api_domain_product);
+}
+
+TEST(LibraryNamespacesTest, TestGetApiDomainFromPathList) {
+ // GetApiDomainFromPath returns API_DOMAIN_PRODUCT only if the device is
+ // trebleized and has an unbundled product partition.
+ ApiDomain api_domain_product = is_product_treblelized() ? API_DOMAIN_PRODUCT : API_DOMAIN_DEFAULT;
+
+ EXPECT_THAT(GetApiDomainFromPathList("/data/somewhere"), HasValue(API_DOMAIN_DEFAULT));
+ EXPECT_THAT(GetApiDomainFromPathList("/system/somewhere"), HasValue(API_DOMAIN_DEFAULT));
+ EXPECT_THAT(GetApiDomainFromPathList("/product/somewhere"), HasValue(api_domain_product));
+ EXPECT_THAT(GetApiDomainFromPathList("/vendor/somewhere"), HasValue(API_DOMAIN_VENDOR));
+ EXPECT_THAT(GetApiDomainFromPathList("/system/product/somewhere"), HasValue(api_domain_product));
+ EXPECT_THAT(GetApiDomainFromPathList("/system/vendor/somewhere"), HasValue(API_DOMAIN_VENDOR));
+
+ EXPECT_THAT(GetApiDomainFromPathList(""), HasValue(API_DOMAIN_DEFAULT));
+ EXPECT_THAT(GetApiDomainFromPathList(":"), HasValue(API_DOMAIN_DEFAULT));
+ EXPECT_THAT(GetApiDomainFromPathList(":/vendor/somewhere"), HasValue(API_DOMAIN_VENDOR));
+ EXPECT_THAT(GetApiDomainFromPathList("/vendor/somewhere:"), HasValue(API_DOMAIN_VENDOR));
+
+ EXPECT_THAT(GetApiDomainFromPathList("/data/somewhere:/product/somewhere"),
+ HasValue(api_domain_product));
+ if (api_domain_product == API_DOMAIN_PRODUCT) {
+ EXPECT_THAT(GetApiDomainFromPathList("/vendor/somewhere:/product/somewhere"),
+ HasError(WithMessage(StartsWith("Path list crosses partition boundaries"))));
+ EXPECT_THAT(GetApiDomainFromPathList("/product/somewhere:/vendor/somewhere"),
+ HasError(WithMessage(StartsWith("Path list crosses partition boundaries"))));
+ }
+}
+
+} // namespace
+} // namespace nativeloader
+} // namespace android
+
+#endif // ART_TARGET_ANDROID
diff --git a/libnativeloader/native_loader.cpp b/libnativeloader/native_loader.cpp
index 2deb5ef3d0..61925431ef 100644
--- a/libnativeloader/native_loader.cpp
+++ b/libnativeloader/native_loader.cpp
@@ -24,19 +24,21 @@
#include <algorithm>
#include <memory>
#include <mutex>
+#include <optional>
#include <string>
#include <vector>
-#include <android-base/file.h>
-#include <android-base/macros.h>
-#include <android-base/strings.h>
-#include <android-base/thread_annotations.h>
-#include <nativebridge/native_bridge.h>
-#include <nativehelper/scoped_utf_chars.h>
+#include "android-base/file.h"
+#include "android-base/macros.h"
+#include "android-base/strings.h"
+#include "android-base/thread_annotations.h"
+#include "nativebridge/native_bridge.h"
+#include "nativehelper/scoped_utf_chars.h"
+#include "public_libraries.h"
#ifdef ART_TARGET_ANDROID
-#include <log/log.h>
#include "library_namespaces.h"
+#include "log/log.h"
#include "nativeloader/dlext_namespaces.h"
#endif
@@ -46,6 +48,9 @@ namespace {
#if defined(ART_TARGET_ANDROID)
+using ::android::base::Result;
+using ::android::nativeloader::LibraryNamespaces;
+
// NATIVELOADER_DEFAULT_NAMESPACE_LIBS is an environment variable that can be
// used to list extra libraries (separated by ":") that libnativeloader will
// load from the default namespace. The libraries must be listed without paths,
@@ -62,21 +67,24 @@ namespace {
// test libraries that depend on ART internal libraries.
constexpr const char* kNativeloaderExtraLibs = "nativeloader-extra-libs";
-using android::nativeloader::LibraryNamespaces;
-
std::mutex g_namespaces_mutex;
-LibraryNamespaces* g_namespaces = new LibraryNamespaces;
-NativeLoaderNamespace* g_nativeloader_extra_libs_namespace = nullptr;
-
-android_namespace_t* FindExportedNamespace(const char* caller_location) {
- auto name = nativeloader::FindApexNamespaceName(caller_location);
- if (name.ok()) {
- android_namespace_t* boot_namespace = android_get_exported_namespace(name->c_str());
- LOG_ALWAYS_FATAL_IF((boot_namespace == nullptr),
- "Error finding namespace of apex: no namespace called %s", name->c_str());
- return boot_namespace;
+LibraryNamespaces* g_namespaces GUARDED_BY(g_namespaces_mutex) = new LibraryNamespaces;
+NativeLoaderNamespace* g_nativeloader_extra_libs_namespace GUARDED_BY(g_namespaces_mutex) = nullptr;
+
+std::optional<NativeLoaderNamespace> FindApexNamespace(const char* caller_location) {
+ std::optional<std::string> name = nativeloader::FindApexNamespaceName(caller_location);
+ if (name.has_value()) {
+ // Native Bridge is never used for APEXes.
+ Result<NativeLoaderNamespace> ns =
+ NativeLoaderNamespace::GetExportedNamespace(name.value(), /*is_bridged=*/false);
+ LOG_ALWAYS_FATAL_IF(!ns.ok(),
+ "Error finding ns %s for APEX location %s: %s",
+ name.value().c_str(),
+ caller_location,
+ ns.error().message().c_str());
+ return ns.value();
}
- return nullptr;
+ return std::nullopt;
}
Result<void> CreateNativeloaderDefaultNamespaceLibsLink(NativeLoaderNamespace& ns)
@@ -133,26 +141,34 @@ Result<void*> TryLoadNativeloaderExtraLib(const char* path) {
if (!ns.ok()) {
return ns.error();
}
- return ns.value()->Load(path);
+
+ Result<void*> res = ns.value()->Load(path);
+ ALOGD("Load %s using ns %s from NATIVELOADER_DEFAULT_NAMESPACE_LIBS match: %s",
+ path,
+ ns.value()->name().c_str(),
+ res.ok() ? "ok" : res.error().message().c_str());
+ return res;
}
Result<NativeLoaderNamespace*> CreateClassLoaderNamespaceLocked(JNIEnv* env,
int32_t target_sdk_version,
jobject class_loader,
+ nativeloader::ApiDomain api_domain,
bool is_shared,
- jstring dex_path,
- jstring library_path,
- jstring permitted_path,
- jstring uses_library_list)
+ const std::string& dex_path,
+ jstring library_path_j,
+ jstring permitted_path_j,
+ jstring uses_library_list_j)
REQUIRES(g_namespaces_mutex) {
Result<NativeLoaderNamespace*> ns = g_namespaces->Create(env,
target_sdk_version,
class_loader,
+ api_domain,
is_shared,
dex_path,
- library_path,
- permitted_path,
- uses_library_list);
+ library_path_j,
+ permitted_path_j,
+ uses_library_list_j);
if (!ns.ok()) {
return ns;
}
@@ -163,7 +179,7 @@ Result<NativeLoaderNamespace*> CreateClassLoaderNamespaceLocked(JNIEnv* env,
return ns;
}
-#endif // #if defined(ART_TARGET_ANDROID)
+#endif // ART_TARGET_ANDROID
} // namespace
@@ -183,47 +199,86 @@ void ResetNativeLoader() {
#endif
}
-jstring CreateClassLoaderNamespace(JNIEnv* env, int32_t target_sdk_version, jobject class_loader,
- bool is_shared, jstring dex_path, jstring library_path,
- jstring permitted_path, jstring uses_library_list) {
+// dex_path_j may be a ':'-separated list of paths, e.g. when creating a shared
+// library loader - cf. mCodePaths in android.content.pm.SharedLibraryInfo.
+jstring CreateClassLoaderNamespace(JNIEnv* env,
+ int32_t target_sdk_version,
+ jobject class_loader,
+ bool is_shared,
+ jstring dex_path_j,
+ jstring library_path_j,
+ jstring permitted_path_j,
+ jstring uses_library_list_j) {
#if defined(ART_TARGET_ANDROID)
+ std::string dex_path;
+ if (dex_path_j != nullptr) {
+ ScopedUtfChars dex_path_chars(env, dex_path_j);
+ dex_path = dex_path_chars.c_str();
+ }
+
+ Result<nativeloader::ApiDomain> api_domain = nativeloader::GetApiDomainFromPathList(dex_path);
+ if (!api_domain.ok()) {
+ return env->NewStringUTF(api_domain.error().message().c_str());
+ }
+
std::lock_guard<std::mutex> guard(g_namespaces_mutex);
Result<NativeLoaderNamespace*> ns = CreateClassLoaderNamespaceLocked(env,
target_sdk_version,
class_loader,
+ api_domain.value(),
is_shared,
dex_path,
- library_path,
- permitted_path,
- uses_library_list);
+ library_path_j,
+ permitted_path_j,
+ uses_library_list_j);
if (!ns.ok()) {
return env->NewStringUTF(ns.error().message().c_str());
}
+
#else
- UNUSED(env, target_sdk_version, class_loader, is_shared, dex_path, library_path, permitted_path,
- uses_library_list);
+ UNUSED(env,
+ target_sdk_version,
+ class_loader,
+ is_shared,
+ dex_path_j,
+ library_path_j,
+ permitted_path_j,
+ uses_library_list_j);
#endif
+
return nullptr;
}
-void* OpenNativeLibrary(JNIEnv* env, int32_t target_sdk_version, const char* path,
- jobject class_loader, const char* caller_location, jstring library_path,
- bool* needs_native_bridge, char** error_msg) {
+void* OpenNativeLibrary(JNIEnv* env,
+ int32_t target_sdk_version,
+ const char* path,
+ jobject class_loader,
+ const char* caller_location,
+ jstring library_path_j,
+ bool* needs_native_bridge,
+ char** error_msg) {
#if defined(ART_TARGET_ANDROID)
- UNUSED(target_sdk_version);
-
if (class_loader == nullptr) {
+ // class_loader is null only for the boot class loader (see
+ // IsBootClassLoader call in JavaVMExt::LoadNativeLibrary), i.e. the caller
+ // is in the boot classpath.
*needs_native_bridge = false;
if (caller_location != nullptr) {
- android_namespace_t* boot_namespace = FindExportedNamespace(caller_location);
- if (boot_namespace != nullptr) {
+ std::optional<NativeLoaderNamespace> ns = FindApexNamespace(caller_location);
+ if (ns.has_value()) {
const android_dlextinfo dlextinfo = {
.flags = ANDROID_DLEXT_USE_NAMESPACE,
- .library_namespace = boot_namespace,
+ .library_namespace = ns.value().ToRawAndroidNamespace(),
};
void* handle = android_dlopen_ext(path, RTLD_NOW, &dlextinfo);
- if (handle == nullptr) {
- *error_msg = strdup(dlerror());
+ char* dlerror_msg = handle == nullptr ? strdup(dlerror()) : nullptr;
+ ALOGD("Load %s using APEX ns %s for caller %s: %s",
+ path,
+ ns.value().name().c_str(),
+ caller_location,
+ dlerror_msg == nullptr ? "ok" : dlerror_msg);
+ if (dlerror_msg != nullptr) {
+ *error_msg = dlerror_msg;
}
return handle;
}
@@ -244,53 +299,86 @@ void* OpenNativeLibrary(JNIEnv* env, int32_t target_sdk_version, const char* pat
// Fall back to the system namespace. This happens for preloaded JNI
// libraries in the zygote.
- // TODO(b/185833744): Investigate if this should fall back to the app main
- // namespace (aka anonymous namespace) instead.
void* handle = OpenSystemLibrary(path, RTLD_NOW);
- if (handle == nullptr) {
- *error_msg = strdup(dlerror());
+ char* dlerror_msg = handle == nullptr ? strdup(dlerror()) : nullptr;
+ ALOGD("Load %s using system ns (caller=%s): %s",
+ path,
+ caller_location == nullptr ? "<unknown>" : caller_location,
+ dlerror_msg == nullptr ? "ok" : dlerror_msg);
+ if (dlerror_msg != nullptr) {
+ *error_msg = dlerror_msg;
}
return handle;
}
std::lock_guard<std::mutex> guard(g_namespaces_mutex);
- NativeLoaderNamespace* ns;
-
- if ((ns = g_namespaces->FindNamespaceByClassLoader(env, class_loader)) == nullptr) {
- // This is the case where the classloader was not created by ApplicationLoaders
- // In this case we create an isolated not-shared namespace for it.
- Result<NativeLoaderNamespace*> isolated_ns =
- CreateClassLoaderNamespaceLocked(env,
- target_sdk_version,
- class_loader,
- /*is_shared=*/false,
- /*dex_path=*/nullptr,
- library_path,
- /*permitted_path=*/nullptr,
- /*uses_library_list=*/nullptr);
- if (!isolated_ns.ok()) {
- *error_msg = strdup(isolated_ns.error().message().c_str());
- return nullptr;
- } else {
- ns = *isolated_ns;
+
+ {
+ NativeLoaderNamespace* ns = g_namespaces->FindNamespaceByClassLoader(env, class_loader);
+ if (ns != nullptr) {
+ *needs_native_bridge = ns->IsBridged();
+ Result<void*> handle = ns->Load(path);
+ ALOGD("Load %s using ns %s from class loader (caller=%s): %s",
+ path,
+ ns->name().c_str(),
+ caller_location == nullptr ? "<unknown>" : caller_location,
+ handle.ok() ? "ok" : handle.error().message().c_str());
+ if (!handle.ok()) {
+ *error_msg = strdup(handle.error().message().c_str());
+ return nullptr;
+ }
+ return handle.value();
}
}
- return OpenNativeLibraryInNamespace(ns, path, needs_native_bridge, error_msg);
-#else
+ // This is the case where the classloader was not created by ApplicationLoaders
+ // In this case we create an isolated not-shared namespace for it.
+ const std::string empty_dex_path;
+ Result<NativeLoaderNamespace*> isolated_ns =
+ CreateClassLoaderNamespaceLocked(env,
+ target_sdk_version,
+ class_loader,
+ nativeloader::API_DOMAIN_DEFAULT,
+ /*is_shared=*/false,
+ empty_dex_path,
+ library_path_j,
+ /*permitted_path=*/nullptr,
+ /*uses_library_list=*/nullptr);
+ if (!isolated_ns.ok()) {
+ ALOGD("Failed to create isolated ns for %s (caller=%s)",
+ path,
+ caller_location == nullptr ? "<unknown>" : caller_location);
+ *error_msg = strdup(isolated_ns.error().message().c_str());
+ return nullptr;
+ }
+
+ *needs_native_bridge = isolated_ns.value()->IsBridged();
+ Result<void*> handle = isolated_ns.value()->Load(path);
+ ALOGD("Load %s using isolated ns %s (caller=%s): %s",
+ path,
+ isolated_ns.value()->name().c_str(),
+ caller_location == nullptr ? "<unknown>" : caller_location,
+ handle.ok() ? "ok" : handle.error().message().c_str());
+ if (!handle.ok()) {
+ *error_msg = strdup(handle.error().message().c_str());
+ return nullptr;
+ }
+ return handle.value();
+
+#else // !ART_TARGET_ANDROID
UNUSED(env, target_sdk_version, class_loader, caller_location);
// Do some best effort to emulate library-path support. It will not
// work for dependencies.
//
// Note: null has a special meaning and must be preserved.
- std::string c_library_path; // Empty string by default.
- if (library_path != nullptr && path != nullptr && path[0] != '/') {
- ScopedUtfChars library_path_utf_chars(env, library_path);
- c_library_path = library_path_utf_chars.c_str();
+ std::string library_path; // Empty string by default.
+ if (library_path_j != nullptr && path != nullptr && path[0] != '/') {
+ ScopedUtfChars library_path_utf_chars(env, library_path_j);
+ library_path = library_path_utf_chars.c_str();
}
- std::vector<std::string> library_paths = base::Split(c_library_path, ":");
+ std::vector<std::string> library_paths = base::Split(library_path, ":");
for (const std::string& lib_path : library_paths) {
*needs_native_bridge = false;
@@ -323,7 +411,7 @@ void* OpenNativeLibrary(JNIEnv* env, int32_t target_sdk_version, const char* pat
}
}
return nullptr;
-#endif
+#endif // !ART_TARGET_ANDROID
}
bool CloseNativeLibrary(void* handle, const bool needs_native_bridge, char** error_msg) {
@@ -351,7 +439,7 @@ void NativeLoaderFreeErrorMessage(char* msg) {
#if defined(ART_TARGET_ANDROID)
void* OpenNativeLibraryInNamespace(NativeLoaderNamespace* ns, const char* path,
bool* needs_native_bridge, char** error_msg) {
- auto handle = ns->Load(path);
+ Result<void*> handle = ns->Load(path);
if (!handle.ok() && error_msg != nullptr) {
*error_msg = strdup(handle.error().message().c_str());
}
@@ -397,4 +485,4 @@ void LinkNativeLoaderNamespaceToExportedNamespaceLibrary(struct NativeLoaderName
#endif // ART_TARGET_ANDROID
-}; // namespace android
+} // namespace android
diff --git a/libnativeloader/native_loader_lazy.cpp b/libnativeloader/native_loader_lazy.cpp
index 5b82d00560..1c82dc428f 100644
--- a/libnativeloader/native_loader_lazy.cpp
+++ b/libnativeloader/native_loader_lazy.cpp
@@ -35,7 +35,7 @@ void* GetLibHandle() {
template <typename FuncPtr>
FuncPtr GetFuncPtr(const char* function_name) {
- auto f = reinterpret_cast<FuncPtr>(dlsym(GetLibHandle(), function_name));
+ FuncPtr f = reinterpret_cast<FuncPtr>(dlsym(GetLibHandle(), function_name));
LOG_FATAL_IF(f == nullptr, "Failed to get address of %s: %s", function_name, dlerror());
return f;
}
diff --git a/libnativeloader/native_loader_namespace.cpp b/libnativeloader/native_loader_namespace.cpp
index 669fa74dc2..cfb84b7d9e 100644
--- a/libnativeloader/native_loader_namespace.cpp
+++ b/libnativeloader/native_loader_namespace.cpp
@@ -52,12 +52,12 @@ std::string GetLinkerError(bool is_bridged) {
Result<NativeLoaderNamespace> NativeLoaderNamespace::GetExportedNamespace(const std::string& name,
bool is_bridged) {
if (!is_bridged) {
- auto raw = android_get_exported_namespace(name.c_str());
+ android_namespace_t* raw = android_get_exported_namespace(name.c_str());
if (raw != nullptr) {
return NativeLoaderNamespace(name, raw);
}
} else {
- auto raw = NativeBridgeGetExportedNamespace(name.c_str());
+ native_bridge_namespace_t* raw = NativeBridgeGetExportedNamespace(name.c_str());
if (raw != nullptr) {
return NativeLoaderNamespace(name, raw);
}
@@ -69,7 +69,7 @@ Result<NativeLoaderNamespace> NativeLoaderNamespace::GetExportedNamespace(const
// "system" for those in the Runtime APEX. Try "system" first since
// "default" always exists.
Result<NativeLoaderNamespace> NativeLoaderNamespace::GetSystemNamespace(bool is_bridged) {
- auto ns = GetExportedNamespace(kSystemNamespaceName, is_bridged);
+ Result<NativeLoaderNamespace> ns = GetExportedNamespace(kSystemNamespaceName, is_bridged);
if (ns.ok()) return ns;
ns = GetExportedNamespace(kDefaultNamespaceName, is_bridged);
if (ns.ok()) return ns;
@@ -96,7 +96,7 @@ Result<NativeLoaderNamespace> NativeLoaderNamespace::Create(
}
// Fall back to the system namespace if no parent is set.
- auto system_ns = GetSystemNamespace(is_bridged);
+ Result<NativeLoaderNamespace> system_ns = GetSystemNamespace(is_bridged);
if (!system_ns.ok()) {
return system_ns.error();
}
diff --git a/libnativeloader/native_loader_test.cpp b/libnativeloader/native_loader_test.cpp
index 72348ed364..3b05aae06b 100644
--- a/libnativeloader/native_loader_test.cpp
+++ b/libnativeloader/native_loader_test.cpp
@@ -564,7 +564,8 @@ jni com_android_bar libbar.so:libbar2.so
public com_android_bar libpublic.so
)";
- auto jni_libs = ParseApexLibrariesConfig(file_content, "jni");
+ Result<std::map<std::string, std::string>> jni_libs =
+ ParseApexLibrariesConfig(file_content, "jni");
ASSERT_RESULT_OK(jni_libs);
std::map<std::string, std::string> expected_jni_libs {
{"com_android_foo", "libfoo.so"},
@@ -572,7 +573,8 @@ jni com_android_bar libbar.so:libbar2.so
};
ASSERT_EQ(expected_jni_libs, *jni_libs);
- auto public_libs = ParseApexLibrariesConfig(file_content, "public");
+ Result<std::map<std::string, std::string>> public_libs =
+ ParseApexLibrariesConfig(file_content, "public");
ASSERT_RESULT_OK(public_libs);
std::map<std::string, std::string> expected_public_libs {
{"com_android_bar", "libpublic.so"},
@@ -586,7 +588,7 @@ jni com_android_foo libfoo
# missing <library list>
jni com_android_bar
)";
- auto result = ParseApexLibrariesConfig(file_content, "jni");
+ Result<std::map<std::string, std::string>> result = ParseApexLibrariesConfig(file_content, "jni");
ASSERT_FALSE(result.ok());
ASSERT_EQ("Malformed line \"jni com_android_bar\"", result.error().message());
}
@@ -598,7 +600,7 @@ public apex2 lib
# unknown tag
unknown com_android_foo libfoo
)";
- auto result = ParseApexLibrariesConfig(file_content, "jni");
+ Result<std::map<std::string, std::string>> result = ParseApexLibrariesConfig(file_content, "jni");
ASSERT_FALSE(result.ok());
ASSERT_EQ("Invalid tag \"unknown com_android_foo libfoo\"", result.error().message());
}
@@ -608,7 +610,7 @@ TEST(NativeLoaderApexLibrariesConfigParser, RejectInvalidApexNamespace) {
# apex linker namespace should be mangled ('.' -> '_')
jni com.android.foo lib
)";
- auto result = ParseApexLibrariesConfig(file_content, "jni");
+ Result<std::map<std::string, std::string>> result = ParseApexLibrariesConfig(file_content, "jni");
ASSERT_FALSE(result.ok());
ASSERT_EQ("Invalid apex_namespace \"jni com.android.foo lib\"", result.error().message());
}
@@ -618,7 +620,7 @@ TEST(NativeLoaderApexLibrariesConfigParser, RejectInvalidLibraryList) {
# library list is ":" separated list of filenames
jni com_android_foo lib64/libfoo.so
)";
- auto result = ParseApexLibrariesConfig(file_content, "jni");
+ Result<std::map<std::string, std::string>> result = ParseApexLibrariesConfig(file_content, "jni");
ASSERT_FALSE(result.ok());
ASSERT_EQ("Invalid library_list \"jni com_android_foo lib64/libfoo.so\"", result.error().message());
}
diff --git a/libnativeloader/public_libraries.cpp b/libnativeloader/public_libraries.cpp
index 87210c8f14..390c2987d6 100644
--- a/libnativeloader/public_libraries.cpp
+++ b/libnativeloader/public_libraries.cpp
@@ -78,7 +78,7 @@ std::string vndk_version_str(bool use_product_vndk) {
// insert vndk version in every {} placeholder
void InsertVndkVersionStr(std::string* file_name, bool use_product_vndk) {
CHECK(file_name != nullptr);
- auto version = vndk_version_str(use_product_vndk);
+ const std::string version = vndk_version_str(use_product_vndk);
size_t pos = file_name->find("{}");
while (pos != std::string::npos) {
file_name->replace(pos, 2, version);
@@ -94,7 +94,7 @@ Result<std::vector<std::string>> ReadConfig(
const std::function<Result<bool>(const ConfigEntry& /* entry */)>& filter_fn) {
std::string file_content;
if (!base::ReadFileToString(configFile, &file_content)) {
- return ErrnoError();
+ return ErrnoError() << "Failed to read " << configFile;
}
Result<std::vector<std::string>> result = ParseConfig(file_content, filter_fn);
if (!result.ok()) {
@@ -123,7 +123,7 @@ void ReadExtensionLibraries(const char* dirname, std::vector<std::string>* sonam
"Error extracting company name from public native library list file path \"%s\"",
config_file_path.c_str());
- auto ret = ReadConfig(
+ Result<std::vector<std::string>> ret = ReadConfig(
config_file_path, [&company_name](const struct ConfigEntry& entry) -> Result<bool> {
if (android::base::StartsWith(entry.soname, "lib") &&
android::base::EndsWith(entry.soname, "." + company_name + ".so")) {
@@ -139,8 +139,8 @@ void ReadExtensionLibraries(const char* dirname, std::vector<std::string>* sonam
if (ret.ok()) {
sonames->insert(sonames->end(), ret->begin(), ret->end());
} else {
- LOG_ALWAYS_FATAL("Error reading public native library list from \"%s\": %s",
- config_file_path.c_str(), ret.error().message().c_str());
+ LOG_ALWAYS_FATAL("Error reading extension library list: %s",
+ ret.error().message().c_str());
}
}
}
@@ -149,7 +149,7 @@ void ReadExtensionLibraries(const char* dirname, std::vector<std::string>* sonam
static std::string InitDefaultPublicLibraries(bool for_preload) {
std::string config_file = root_dir() + kDefaultPublicLibrariesFile;
- auto sonames =
+ Result<std::vector<std::string>> sonames =
ReadConfig(config_file, [&for_preload](const struct ConfigEntry& entry) -> Result<bool> {
if (for_preload) {
return !entry.nopreload;
@@ -158,8 +158,7 @@ static std::string InitDefaultPublicLibraries(bool for_preload) {
}
});
if (!sonames.ok()) {
- LOG_ALWAYS_FATAL("Error reading public native library list from \"%s\": %s",
- config_file.c_str(), sonames.error().message().c_str());
+ LOG_ALWAYS_FATAL("%s", sonames.error().message().c_str());
return "";
}
@@ -167,8 +166,8 @@ static std::string InitDefaultPublicLibraries(bool for_preload) {
if (!for_preload) {
// Remove the public libs provided by apexes because these libs are available
// from apex namespaces.
- for (const auto& p : apex_public_libraries()) {
- auto public_libs = base::Split(p.second, ":");
+ for (const std::pair<std::string, std::string>& p : apex_public_libraries()) {
+ std::vector<std::string> public_libs = base::Split(p.second, ":");
sonames->erase(std::remove_if(sonames->begin(),
sonames->end(),
[&public_libs](const std::string& v) {
@@ -186,7 +185,7 @@ static std::string InitDefaultPublicLibraries(bool for_preload) {
static std::string InitVendorPublicLibraries() {
// This file is optional, quietly ignore if the file does not exist.
- auto sonames = ReadConfig(kVendorPublicLibrariesFile, always_true);
+ Result<std::vector<std::string>> sonames = ReadConfig(kVendorPublicLibrariesFile, always_true);
if (!sonames.ok()) {
ALOGI("InitVendorPublicLibraries skipped: %s", sonames.error().message().c_str());
return "";
@@ -250,9 +249,9 @@ static std::string InitLlndkLibrariesVendor() {
} else {
config_file = kLlndkLibrariesNoVndkFile;
}
- auto sonames = ReadConfig(config_file, always_true);
+ Result<std::vector<std::string>> sonames = ReadConfig(config_file, always_true);
if (!sonames.ok()) {
- LOG_ALWAYS_FATAL("%s: %s", config_file.c_str(), sonames.error().message().c_str());
+ LOG_ALWAYS_FATAL("%s", sonames.error().message().c_str());
return "";
}
std::string libs = android::base::Join(*sonames, ':');
@@ -272,9 +271,9 @@ static std::string InitLlndkLibrariesProduct() {
} else {
config_file = kLlndkLibrariesNoVndkFile;
}
- auto sonames = ReadConfig(config_file, always_true);
+ Result<std::vector<std::string>> sonames = ReadConfig(config_file, always_true);
if (!sonames.ok()) {
- LOG_ALWAYS_FATAL("%s: %s", config_file.c_str(), sonames.error().message().c_str());
+ LOG_ALWAYS_FATAL("%s", sonames.error().message().c_str());
return "";
}
std::string libs = android::base::Join(*sonames, ':');
@@ -290,7 +289,7 @@ static std::string InitVndkspLibrariesVendor() {
std::string config_file = kVndkLibrariesFile;
InsertVndkVersionStr(&config_file, false);
- auto sonames = ReadConfig(config_file, always_true);
+ Result<std::vector<std::string>> sonames = ReadConfig(config_file, always_true);
if (!sonames.ok()) {
LOG_ALWAYS_FATAL("%s", sonames.error().message().c_str());
return "";
@@ -307,7 +306,7 @@ static std::string InitVndkspLibrariesProduct() {
}
std::string config_file = kVndkLibrariesFile;
InsertVndkVersionStr(&config_file, true);
- auto sonames = ReadConfig(config_file, always_true);
+ Result<std::vector<std::string>> sonames = ReadConfig(config_file, always_true);
if (!sonames.ok()) {
LOG_ALWAYS_FATAL("%s", sonames.error().message().c_str());
return "";
@@ -460,8 +459,8 @@ Result<std::vector<std::string>> ParseConfig(
std::vector<std::string> lines = base::Split(file_content, "\n");
std::vector<std::string> sonames;
- for (auto& line : lines) {
- auto trimmed_line = base::Trim(line);
+ for (std::string& line : lines) {
+ std::string trimmed_line = base::Trim(line);
if (trimmed_line[0] == '#' || trimmed_line.empty()) {
continue;
}
@@ -528,12 +527,12 @@ Result<std::vector<std::string>> ParseConfig(
Result<std::map<std::string, std::string>> ParseApexLibrariesConfig(const std::string& file_content, const std::string& tag) {
std::map<std::string, std::string> entries;
std::vector<std::string> lines = base::Split(file_content, "\n");
- for (auto& line : lines) {
- auto trimmed_line = base::Trim(line);
+ for (std::string& line : lines) {
+ std::string trimmed_line = base::Trim(line);
if (trimmed_line[0] == '#' || trimmed_line.empty()) {
continue;
}
- auto config_line = ParseApexLibrariesConfigLine(trimmed_line);
+ Result<ApexLibrariesConfigLine> config_line = ParseApexLibrariesConfigLine(trimmed_line);
if (!config_line.ok()) {
return config_line.error();
}
diff --git a/libnativeloader/test/Android.bp b/libnativeloader/test/Android.bp
index 68c9126407..6166725af7 100644
--- a/libnativeloader/test/Android.bp
+++ b/libnativeloader/test/Android.bp
@@ -66,16 +66,6 @@ android_test_helper_app {
],
}
-java_library {
- name: "loadlibrarytest_test_utils",
- sdk_version: "31",
- static_libs: [
- "androidx.test.ext.junit",
- "androidx.test.ext.truth",
- ],
- srcs: ["src/android/test/lib/TestUtils.java"],
-}
-
// Test fixture that represents a shared library in /system/framework.
java_library {
name: "libnativeloader_system_shared_lib",
@@ -110,6 +100,24 @@ java_library {
srcs: ["src/android/test/vendorsharedlib/VendorSharedLib.java"],
}
+java_library {
+ name: "loadlibrarytest_testlib",
+ sdk_version: "system_31",
+ static_libs: [
+ "androidx.test.ext.junit",
+ "androidx.test.ext.truth",
+ "androidx.test.rules",
+ "modules-utils-build_system",
+ ],
+ libs: [
+ "libnativeloader_system_shared_lib",
+ "libnativeloader_system_ext_shared_lib",
+ "libnativeloader_product_shared_lib",
+ "libnativeloader_vendor_shared_lib",
+ ],
+ srcs: ["src/android/test/lib/*.java"],
+}
+
java_defaults {
name: "loadlibrarytest_app_defaults",
defaults: ["art_module_source_build_java_defaults"],
@@ -120,9 +128,7 @@ java_defaults {
// measures getting enabled in these tests, so set some high number.
target_sdk_version: "9999",
static_libs: [
- "androidx.test.ext.junit",
- "androidx.test.rules",
- "loadlibrarytest_test_utils",
+ "loadlibrarytest_testlib",
],
libs: [
"libnativeloader_system_shared_lib",
diff --git a/libnativeloader/test/src/android/test/app/DataAppTest.java b/libnativeloader/test/src/android/test/app/DataAppTest.java
index 4ba8afcbd2..905b5bb0d1 100644
--- a/libnativeloader/test/src/android/test/app/DataAppTest.java
+++ b/libnativeloader/test/src/android/test/app/DataAppTest.java
@@ -16,25 +16,30 @@
package android.test.app;
+import android.test.lib.AppTestCommon;
import android.test.lib.TestUtils;
import android.test.productsharedlib.ProductSharedLib;
import android.test.systemextsharedlib.SystemExtSharedLib;
import android.test.systemsharedlib.SystemSharedLib;
import android.test.vendorsharedlib.VendorSharedLib;
+
import androidx.test.filters.MediumTest;
-import androidx.test.runner.AndroidJUnit4;
+
import org.junit.Test;
-import org.junit.runner.RunWith;
@MediumTest
-@RunWith(AndroidJUnit4.class)
-public class DataAppTest {
+public class DataAppTest extends AppTestCommon {
+ @Override
+ public AppLocation getAppLocation() {
+ return AppLocation.DATA;
+ }
+
@Test
public void testLoadExtendedPublicLibraries() {
System.loadLibrary("system_extpub.oem1");
System.loadLibrary("system_extpub.oem2");
System.loadLibrary("system_extpub1.oem1");
- TestUtils.assertLinkerNamespaceError( // Missing <uses-native-library>.
+ TestUtils.assertLibraryInaccessible( // Missing <uses-native-library>.
() -> System.loadLibrary("system_extpub_nouses.oem2"));
if (!TestUtils.skipPublicProductLibTests()) {
System.loadLibrary("product_extpub.product1");
@@ -44,12 +49,12 @@ public class DataAppTest {
@Test
public void testLoadPrivateLibraries() {
- TestUtils.assertLinkerNamespaceError(() -> System.loadLibrary("system_private1"));
- TestUtils.assertLinkerNamespaceError(() -> System.loadLibrary("systemext_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("system_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("systemext_private1"));
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(() -> System.loadLibrary("product_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("product_private1"));
}
- TestUtils.assertLibraryNotFound(() -> System.loadLibrary("vendor_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("vendor_private1"));
}
@Test
@@ -64,46 +69,60 @@ public class DataAppTest {
public void testLoadPrivateLibrariesViaSystemSharedLib() {
// TODO(b/237577392): Loading a private native system library via a shared system library
// ought to work.
- // SystemSharedLib.loadLibrary("system_private2");
- // SystemSharedLib.loadLibrary("systemext_private2");
+ TestUtils.assertLibraryInaccessible(() -> SystemSharedLib.loadLibrary("system_private2"));
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemSharedLib.loadLibrary("systemext_private2"));
+
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(() -> SystemSharedLib.loadLibrary("product_private2"));
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemSharedLib.loadLibrary("product_private2"));
}
- TestUtils.assertLibraryNotFound(() -> SystemSharedLib.loadLibrary("vendor_private2"));
+
+ TestUtils.assertLibraryInaccessible(() -> SystemSharedLib.loadLibrary("vendor_private2"));
}
@Test
public void testLoadPrivateLibrariesViaSystemExtSharedLib() {
// TODO(b/237577392): Loading a private native system library via a shared system library
// ought to work.
- // SystemExtSharedLib.loadLibrary("system_private3");
- // SystemExtSharedLib.loadLibrary("systemext_private3");
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemExtSharedLib.loadLibrary("system_private3"));
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemExtSharedLib.loadLibrary("systemext_private3"));
+
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(
+ TestUtils.assertLibraryInaccessible(
() -> SystemExtSharedLib.loadLibrary("product_private3"));
}
- TestUtils.assertLibraryNotFound(() -> SystemExtSharedLib.loadLibrary("vendor_private3"));
+
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemExtSharedLib.loadLibrary("vendor_private3"));
}
@Test
public void testLoadPrivateLibrariesViaProductSharedLib() {
- TestUtils.assertLinkerNamespaceError(() -> ProductSharedLib.loadLibrary("system_private4"));
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(() -> ProductSharedLib.loadLibrary("system_private4"));
+ TestUtils.assertLibraryInaccessible(
() -> ProductSharedLib.loadLibrary("systemext_private4"));
+
if (!TestUtils.skipPublicProductLibTests()) {
ProductSharedLib.loadLibrary("product_private4");
}
- TestUtils.assertLibraryNotFound(() -> ProductSharedLib.loadLibrary("vendor_private4"));
+
+ TestUtils.assertLibraryInaccessible(() -> ProductSharedLib.loadLibrary("vendor_private4"));
}
@Test
public void testLoadPrivateLibrariesViaVendorSharedLib() {
- TestUtils.assertLinkerNamespaceError(() -> VendorSharedLib.loadLibrary("system_private5"));
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(() -> VendorSharedLib.loadLibrary("system_private5"));
+ TestUtils.assertLibraryInaccessible(
() -> VendorSharedLib.loadLibrary("systemext_private5"));
+
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(() -> VendorSharedLib.loadLibrary("product_private5"));
+ TestUtils.assertLibraryInaccessible(
+ () -> VendorSharedLib.loadLibrary("product_private5"));
}
+
VendorSharedLib.loadLibrary("vendor_private5");
}
@@ -117,15 +136,15 @@ public class DataAppTest {
@Test
public void testLoadPrivateLibrariesWithAbsolutePaths() {
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/system", "system_private6")));
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/system_ext", "systemext_private6")));
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/product", "product_private6")));
}
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/vendor", "vendor_private6")));
}
}
diff --git a/libnativeloader/test/src/android/test/app/ProductAppTest.java b/libnativeloader/test/src/android/test/app/ProductAppTest.java
index 82d8b6e1f1..4cf379c9c6 100644
--- a/libnativeloader/test/src/android/test/app/ProductAppTest.java
+++ b/libnativeloader/test/src/android/test/app/ProductAppTest.java
@@ -16,8 +16,7 @@
package android.test.app;
-import android.os.Build;
-import android.os.SystemProperties;
+import android.test.lib.AppTestCommon;
import android.test.lib.TestUtils;
import android.test.productsharedlib.ProductSharedLib;
import android.test.systemextsharedlib.SystemExtSharedLib;
@@ -25,19 +24,19 @@ import android.test.systemsharedlib.SystemSharedLib;
import android.test.vendorsharedlib.VendorSharedLib;
import androidx.test.filters.MediumTest;
-import androidx.test.runner.AndroidJUnit4;
import org.junit.Test;
-import org.junit.runner.RunWith;
@MediumTest
-@RunWith(AndroidJUnit4.class)
-public class ProductAppTest {
- // True if apps in product partitions get shared library namespaces, so we
- // cannot test that libs in system and system_ext get blocked.
- private static boolean productAppsAreShared() {
- return Build.VERSION.SDK_INT <= 34 && // UPSIDE_DOWN_CAKE
- SystemProperties.get("ro.product.vndk.version").isEmpty();
+public class ProductAppTest extends AppTestCommon {
+ @Override
+ public AppLocation getAppLocation() {
+ return AppLocation.PRODUCT;
+ }
+
+ @Test
+ public void testPrivateLibsExist() {
+ TestUtils.testPrivateLibsExist("/product", "product_private");
}
@Test
@@ -45,8 +44,8 @@ public class ProductAppTest {
System.loadLibrary("system_extpub.oem1");
System.loadLibrary("system_extpub.oem2");
System.loadLibrary("system_extpub1.oem1");
- if (!productAppsAreShared()) {
- TestUtils.assertLinkerNamespaceError( // Missing <uses-native-library>.
+ if (!TestUtils.productAppsAreShared()) {
+ TestUtils.assertLibraryInaccessible( // Missing <uses-native-library>.
() -> System.loadLibrary("system_extpub_nouses.oem2"));
}
System.loadLibrary("product_extpub.product1");
@@ -55,12 +54,12 @@ public class ProductAppTest {
@Test
public void testLoadPrivateLibraries() {
- if (!productAppsAreShared()) {
- TestUtils.assertLinkerNamespaceError(() -> System.loadLibrary("system_private1"));
- TestUtils.assertLinkerNamespaceError(() -> System.loadLibrary("systemext_private1"));
+ if (!TestUtils.productAppsAreShared()) {
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("system_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("systemext_private1"));
}
System.loadLibrary("product_private1");
- TestUtils.assertLibraryNotFound(() -> System.loadLibrary("vendor_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("vendor_private1"));
}
@Test
@@ -73,49 +72,65 @@ public class ProductAppTest {
@Test
public void testLoadPrivateLibrariesViaSystemSharedLib() {
- // TODO(b/237577392): Loading a private native system library via a shared system library
- // ought to work.
- // SystemSharedLib.loadLibrary("system_private2");
- // SystemSharedLib.loadLibrary("systemext_private2");
- if (!productAppsAreShared()) {
- TestUtils.assertLibraryNotFound(() -> SystemSharedLib.loadLibrary("product_private2"));
+ if (!TestUtils.productAppsAreShared()) {
+ // TODO(b/237577392): Loading a private native system library via a shared system
+ // library ought to work.
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemSharedLib.loadLibrary("system_private2"));
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemSharedLib.loadLibrary("systemext_private2"));
+
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemSharedLib.loadLibrary("product_private2"));
}
- TestUtils.assertLibraryNotFound(() -> SystemSharedLib.loadLibrary("vendor_private2"));
+
+ TestUtils.assertLibraryInaccessible(() -> SystemSharedLib.loadLibrary("vendor_private2"));
}
@Test
public void testLoadPrivateLibrariesViaSystemExtSharedLib() {
- // TODO(b/237577392): Loading a private native system library via a shared system library
- // ought to work.
- // SystemExtSharedLib.loadLibrary("system_private3");
- // SystemExtSharedLib.loadLibrary("systemext_private3");
- if (!productAppsAreShared()) {
- TestUtils.assertLibraryNotFound(
+ if (!TestUtils.productAppsAreShared()) {
+ // TODO(b/237577392): Loading a private native system library via a shared system
+ // library ought to work.
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemExtSharedLib.loadLibrary("system_private3"));
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemExtSharedLib.loadLibrary("systemext_private3"));
+
+ TestUtils.assertLibraryInaccessible(
() -> SystemExtSharedLib.loadLibrary("product_private3"));
}
- TestUtils.assertLibraryNotFound(() -> SystemExtSharedLib.loadLibrary("vendor_private3"));
+
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemExtSharedLib.loadLibrary("vendor_private3"));
}
@Test
public void testLoadPrivateLibrariesViaProductSharedLib() {
- if (!productAppsAreShared()) {
- TestUtils.assertLinkerNamespaceError(
+ if (!TestUtils.productAppsAreShared()) {
+ TestUtils.assertLibraryInaccessible(
() -> ProductSharedLib.loadLibrary("system_private4"));
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> ProductSharedLib.loadLibrary("systemext_private4"));
}
+
+ // Can load product_private4 by name only through the app classloader namespace.
ProductSharedLib.loadLibrary("product_private4");
- TestUtils.assertLibraryNotFound(() -> ProductSharedLib.loadLibrary("vendor_private4"));
+
+ TestUtils.assertLibraryInaccessible(() -> ProductSharedLib.loadLibrary("vendor_private4"));
}
@Test
public void testLoadPrivateLibrariesViaVendorSharedLib() {
- if (!productAppsAreShared()) {
- TestUtils.assertLinkerNamespaceError(
+ if (!TestUtils.productAppsAreShared()) {
+ TestUtils.assertLibraryInaccessible(
() -> VendorSharedLib.loadLibrary("system_private5"));
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> VendorSharedLib.loadLibrary("systemext_private5"));
- TestUtils.assertLibraryNotFound(() -> VendorSharedLib.loadLibrary("product_private5"));
+
+ TestUtils.assertLibraryInaccessible(
+ () -> VendorSharedLib.loadLibrary("product_private5"));
+
// When the app has a shared namespace, its libraries get loaded
// with shared namespaces as well, inheriting the same paths. So
// since the app wouldn't have access to /vendor/${LIB},
@@ -132,14 +147,14 @@ public class ProductAppTest {
@Test
public void testLoadPrivateLibrariesWithAbsolutePaths() {
- if (!productAppsAreShared()) {
- TestUtils.assertLinkerNamespaceError(
+ if (!TestUtils.productAppsAreShared()) {
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/system", "system_private6")));
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/system_ext", "systemext_private6")));
}
System.load(TestUtils.libPath("/product", "product_private6"));
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/vendor", "vendor_private6")));
}
}
diff --git a/libnativeloader/test/src/android/test/app/SystemAppTest.java b/libnativeloader/test/src/android/test/app/SystemAppTest.java
index a909a4c49d..8d14753b18 100644
--- a/libnativeloader/test/src/android/test/app/SystemAppTest.java
+++ b/libnativeloader/test/src/android/test/app/SystemAppTest.java
@@ -16,20 +16,31 @@
package android.test.app;
+import android.test.lib.AppTestCommon;
import android.test.lib.TestUtils;
import android.test.productsharedlib.ProductSharedLib;
import android.test.systemextsharedlib.SystemExtSharedLib;
import android.test.systemsharedlib.SystemSharedLib;
import android.test.vendorsharedlib.VendorSharedLib;
+
import androidx.test.filters.MediumTest;
-import androidx.test.runner.AndroidJUnit4;
+
import org.junit.Test;
-import org.junit.runner.RunWith;
// These tests are run from /system/app, /system/priv-app, and /system_ext/app.
@MediumTest
-@RunWith(AndroidJUnit4.class)
-public class SystemAppTest {
+public class SystemAppTest extends AppTestCommon {
+ @Override
+ public AppLocation getAppLocation() {
+ return AppLocation.SYSTEM;
+ }
+
+ @Test
+ public void testPrivateLibsExist() {
+ TestUtils.testPrivateLibsExist("/system", "system_private");
+ TestUtils.testPrivateLibsExist("/system_ext", "systemext_private");
+ }
+
@Test
public void testLoadExtendedPublicLibraries() {
System.loadLibrary("system_extpub.oem1");
@@ -49,9 +60,9 @@ public class SystemAppTest {
System.loadLibrary("system_private1");
System.loadLibrary("systemext_private1");
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(() -> System.loadLibrary("product_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("product_private1"));
}
- TestUtils.assertLibraryNotFound(() -> System.loadLibrary("vendor_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("vendor_private1"));
}
@Test
@@ -66,41 +77,57 @@ public class SystemAppTest {
public void testLoadPrivateLibrariesViaSystemSharedLib() {
SystemSharedLib.loadLibrary("system_private2");
SystemSharedLib.loadLibrary("systemext_private2");
+
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(() -> SystemSharedLib.loadLibrary("product_private2"));
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemSharedLib.loadLibrary("product_private2"));
}
- TestUtils.assertLibraryNotFound(() -> SystemSharedLib.loadLibrary("vendor_private2"));
+
+ TestUtils.assertLibraryInaccessible(() -> SystemSharedLib.loadLibrary("vendor_private2"));
}
@Test
public void testLoadPrivateLibrariesViaSystemExtSharedLib() {
SystemExtSharedLib.loadLibrary("system_private3");
SystemExtSharedLib.loadLibrary("systemext_private3");
+
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(
+ TestUtils.assertLibraryInaccessible(
() -> SystemExtSharedLib.loadLibrary("product_private3"));
}
- TestUtils.assertLibraryNotFound(() -> SystemExtSharedLib.loadLibrary("vendor_private3"));
+
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemExtSharedLib.loadLibrary("vendor_private3"));
}
@Test
public void testLoadPrivateLibrariesViaProductSharedLib() {
+ // See AppTestCommon.isSharedSystemApp() for an explanation of these
+ // behaviours.
ProductSharedLib.loadLibrary("system_private4");
ProductSharedLib.loadLibrary("systemext_private4");
+
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(() -> ProductSharedLib.loadLibrary("product_private4"));
+ TestUtils.assertLibraryInaccessible(
+ () -> ProductSharedLib.loadLibrary("product_private4"));
}
- TestUtils.assertLibraryNotFound(() -> ProductSharedLib.loadLibrary("vendor_private4"));
+
+ TestUtils.assertLibraryInaccessible(() -> ProductSharedLib.loadLibrary("vendor_private4"));
}
@Test
public void testLoadPrivateLibrariesViaVendorSharedLib() {
+ // See AppTestCommon.isSharedSystemApp() for an explanation of these
+ // behaviours.
VendorSharedLib.loadLibrary("system_private5");
VendorSharedLib.loadLibrary("systemext_private5");
+
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(() -> VendorSharedLib.loadLibrary("product_private5"));
+ TestUtils.assertLibraryInaccessible(
+ () -> VendorSharedLib.loadLibrary("product_private5"));
}
- TestUtils.assertLibraryNotFound(() -> VendorSharedLib.loadLibrary("vendor_private5"));
+
+ TestUtils.assertLibraryInaccessible(() -> VendorSharedLib.loadLibrary("vendor_private5"));
}
@Test
@@ -116,10 +143,10 @@ public class SystemAppTest {
System.load(TestUtils.libPath("/system", "system_private6"));
System.load(TestUtils.libPath("/system_ext", "systemext_private6"));
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/product", "product_private6")));
}
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/vendor", "vendor_private6")));
}
}
diff --git a/libnativeloader/test/src/android/test/app/VendorAppTest.java b/libnativeloader/test/src/android/test/app/VendorAppTest.java
index 52688bbf35..377f670c74 100644
--- a/libnativeloader/test/src/android/test/app/VendorAppTest.java
+++ b/libnativeloader/test/src/android/test/app/VendorAppTest.java
@@ -16,25 +16,35 @@
package android.test.app;
+import android.test.lib.AppTestCommon;
import android.test.lib.TestUtils;
import android.test.productsharedlib.ProductSharedLib;
import android.test.systemextsharedlib.SystemExtSharedLib;
import android.test.systemsharedlib.SystemSharedLib;
import android.test.vendorsharedlib.VendorSharedLib;
+
import androidx.test.filters.MediumTest;
-import androidx.test.runner.AndroidJUnit4;
+
import org.junit.Test;
-import org.junit.runner.RunWith;
@MediumTest
-@RunWith(AndroidJUnit4.class)
-public class VendorAppTest {
+public class VendorAppTest extends AppTestCommon {
+ @Override
+ public AppLocation getAppLocation() {
+ return AppLocation.VENDOR;
+ }
+
+ @Test
+ public void testPrivateLibsExist() {
+ TestUtils.testPrivateLibsExist("/vendor", "vendor_private");
+ }
+
@Test
public void testLoadExtendedPublicLibraries() {
- TestUtils.assertLinkerNamespaceError(() -> System.loadLibrary("system_extpub.oem1"));
- TestUtils.assertLinkerNamespaceError(() -> System.loadLibrary("system_extpub.oem2"));
- TestUtils.assertLinkerNamespaceError(() -> System.loadLibrary("system_extpub1.oem1"));
- TestUtils.assertLinkerNamespaceError(() -> System.loadLibrary("system_extpub_nouses.oem2"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("system_extpub.oem1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("system_extpub.oem2"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("system_extpub1.oem1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("system_extpub_nouses.oem2"));
if (!TestUtils.skipPublicProductLibTests()) {
System.loadLibrary("product_extpub.product1");
System.loadLibrary("product_extpub1.product1");
@@ -43,10 +53,10 @@ public class VendorAppTest {
@Test
public void testLoadPrivateLibraries() {
- TestUtils.assertLinkerNamespaceError(() -> System.loadLibrary("system_private1"));
- TestUtils.assertLinkerNamespaceError(() -> System.loadLibrary("systemext_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("system_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("systemext_private1"));
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(() -> System.loadLibrary("product_private1"));
+ TestUtils.assertLibraryInaccessible(() -> System.loadLibrary("product_private1"));
}
System.loadLibrary("vendor_private1");
}
@@ -63,52 +73,67 @@ public class VendorAppTest {
public void testLoadPrivateLibrariesViaSystemSharedLib() {
// TODO(b/237577392): Loading a private native system library via a shared system library
// ought to work.
- // SystemSharedLib.loadLibrary("system_private2");
- // SystemSharedLib.loadLibrary("systemext_private2");
+ TestUtils.assertLibraryInaccessible(() -> SystemSharedLib.loadLibrary("system_private2"));
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemSharedLib.loadLibrary("systemext_private2"));
+
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(() -> SystemSharedLib.loadLibrary("product_private2"));
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemSharedLib.loadLibrary("product_private2"));
}
- TestUtils.assertLibraryNotFound(() -> SystemSharedLib.loadLibrary("vendor_private2"));
+
+ TestUtils.assertLibraryInaccessible(() -> SystemSharedLib.loadLibrary("vendor_private2"));
}
@Test
public void testLoadPrivateLibrariesViaSystemExtSharedLib() {
// TODO(b/237577392): Loading a private native system library via a shared system library
// ought to work.
- // SystemExtSharedLib.loadLibrary("system_private3");
- // SystemExtSharedLib.loadLibrary("systemext_private3");
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemExtSharedLib.loadLibrary("system_private3"));
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemExtSharedLib.loadLibrary("systemext_private3"));
+
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(
+ TestUtils.assertLibraryInaccessible(
() -> SystemExtSharedLib.loadLibrary("product_private3"));
}
- TestUtils.assertLibraryNotFound(() -> SystemExtSharedLib.loadLibrary("vendor_private3"));
+
+ TestUtils.assertLibraryInaccessible(
+ () -> SystemExtSharedLib.loadLibrary("vendor_private3"));
}
@Test
public void testLoadPrivateLibrariesViaProductSharedLib() {
- TestUtils.assertLinkerNamespaceError(() -> ProductSharedLib.loadLibrary("system_private4"));
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(() -> ProductSharedLib.loadLibrary("system_private4"));
+ TestUtils.assertLibraryInaccessible(
() -> ProductSharedLib.loadLibrary("systemext_private4"));
+
if (!TestUtils.skipPublicProductLibTests()) {
ProductSharedLib.loadLibrary("product_private4");
}
- TestUtils.assertLibraryNotFound(() -> ProductSharedLib.loadLibrary("vendor_private4"));
+
+ TestUtils.assertLibraryInaccessible(() -> ProductSharedLib.loadLibrary("vendor_private4"));
}
@Test
public void testLoadPrivateLibrariesViaVendorSharedLib() {
- TestUtils.assertLinkerNamespaceError(() -> VendorSharedLib.loadLibrary("system_private5"));
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(() -> VendorSharedLib.loadLibrary("system_private5"));
+ TestUtils.assertLibraryInaccessible(
() -> VendorSharedLib.loadLibrary("systemext_private5"));
+
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLibraryNotFound(() -> VendorSharedLib.loadLibrary("product_private5"));
+ TestUtils.assertLibraryInaccessible(
+ () -> VendorSharedLib.loadLibrary("product_private5"));
}
+
+ // Can load vendor_private5 by name only through the app classloader namespace.
VendorSharedLib.loadLibrary("vendor_private5");
}
@Test
public void testLoadExtendedPublicLibrariesWithAbsolutePaths() {
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/system", "system_extpub3.oem1")));
if (!TestUtils.skipPublicProductLibTests()) {
System.load(TestUtils.libPath("/product", "product_extpub3.product1"));
@@ -117,12 +142,12 @@ public class VendorAppTest {
@Test
public void testLoadPrivateLibrariesWithAbsolutePaths() {
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/system", "system_private6")));
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/system_ext", "systemext_private6")));
if (!TestUtils.skipPublicProductLibTests()) {
- TestUtils.assertLinkerNamespaceError(
+ TestUtils.assertLibraryInaccessible(
() -> System.load(TestUtils.libPath("/product", "product_private6")));
}
System.load(TestUtils.libPath("/vendor", "vendor_private6"));
diff --git a/libnativeloader/test/src/android/test/hostside/LibnativeloaderTest.java b/libnativeloader/test/src/android/test/hostside/LibnativeloaderTest.java
index 55a6dd27b3..bcb4528ffe 100644
--- a/libnativeloader/test/src/android/test/hostside/LibnativeloaderTest.java
+++ b/libnativeloader/test/src/android/test/hostside/LibnativeloaderTest.java
@@ -278,7 +278,10 @@ public class LibnativeloaderTest extends BaseHostJUnit4Test {
void pushPrivateLibs(ZipFile libApk) throws Exception {
// Push the libraries once for each test. Since we cannot unload them, we need a fresh
// never-before-loaded library in each loadLibrary call.
- for (int i = 1; i <= 6; ++i) {
+ //
+ // Remember to update testPrivateLibsExist in TestUtils.java when
+ // the number of libraries changes.
+ for (int i = 1; i <= 10; ++i) {
pushNativeTestLib(libApk, "libsystem_testlib.so",
"/system/${LIB}/libsystem_private" + i + ".so");
pushNativeTestLib(libApk, "libsystem_testlib.so",
diff --git a/libnativeloader/test/src/android/test/lib/AppTestCommon.java b/libnativeloader/test/src/android/test/lib/AppTestCommon.java
new file mode 100644
index 0000000000..51f4655839
--- /dev/null
+++ b/libnativeloader/test/src/android/test/lib/AppTestCommon.java
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.test.lib;
+
+import android.test.productsharedlib.ProductSharedLib;
+import android.test.systemextsharedlib.SystemExtSharedLib;
+import android.test.systemsharedlib.SystemSharedLib;
+import android.test.vendorsharedlib.VendorSharedLib;
+
+import androidx.test.runner.AndroidJUnit4;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+
+@RunWith(AndroidJUnit4.class)
+public abstract class AppTestCommon {
+ public enum AppLocation { DATA, SYSTEM, PRODUCT, VENDOR }
+
+ public abstract AppLocation getAppLocation();
+
+ // Loading private libs using absolute paths through shared libs should
+ // normally only depend on the location of the shared lib, so these tests
+ // are shared for all apps, regardless of location.
+
+ // Returns true when system private native libs are accessible directly from
+ // the app classloader namespace.
+ private boolean systemPrivateLibsAccessibleFromAppNamespace() {
+ // Currently it only works from system apps. It also works from product
+ // apps on old versions where they were treated like system apps.
+ // TODO(b/237577392): Fix this to work from system shared libs.
+ return getAppLocation() == AppLocation.SYSTEM
+ || (getAppLocation() == AppLocation.PRODUCT && TestUtils.productAppsAreShared());
+ }
+
+ // Detect exception when product private libs are accessible directly from
+ // the app classloader namespace even when they shouldn't be.
+ private boolean productPrivateLibsAccessibleFromAppNamespace() {
+ // In old versions where product apps were treated like system apps, the
+ // product private libs were included in the system namespace, so
+ // they're accessible both from system and product apps.
+ // TODO(b/237577392): Fix this to work from product shared libs.
+ return (getAppLocation() == AppLocation.SYSTEM || getAppLocation() == AppLocation.PRODUCT)
+ && TestUtils.productAppsAreShared();
+ }
+
+ // Detect exception where we don't switch from a shared system namespace to
+ // a product or vendor "unbundled" namespace when calling into
+ // ProductSharedLib and VendorSharedLib. That means they still can load
+ // private system libs but not private libs in their own partition.
+ // TODO(mast): Stop propagating the shared property (isBundledApp in
+ // LoadedApk.java) down to public and vendor shared java libs?
+ private boolean noSwitchToVendorOrProductNamespace() {
+ // System apps get shared namespaces, and also product apps on old
+ // versions where they were treated like system apps.
+ return getAppLocation() == AppLocation.SYSTEM
+ || (getAppLocation() == AppLocation.PRODUCT && TestUtils.productAppsAreShared());
+ }
+
+ @Test
+ public void testLoadPrivateLibrariesViaSystemSharedLibWithAbsolutePaths() {
+ if (systemPrivateLibsAccessibleFromAppNamespace()) {
+ SystemSharedLib.load(TestUtils.libPath("/system", "system_private7"));
+ SystemSharedLib.load(TestUtils.libPath("/system_ext", "systemext_private7"));
+ } else {
+ TestUtils.assertLibraryInaccessible(() -> {
+ SystemSharedLib.load(TestUtils.libPath("/system", "system_private7"));
+ });
+ TestUtils.assertLibraryInaccessible(() -> {
+ SystemSharedLib.load(TestUtils.libPath("/system_ext", "systemext_private7"));
+ });
+ }
+
+ if (productPrivateLibsAccessibleFromAppNamespace()) {
+ SystemSharedLib.load(TestUtils.libPath("/product", "product_private7"));
+ } else {
+ TestUtils.assertLibraryInaccessible(() -> {
+ SystemSharedLib.load(TestUtils.libPath("/product", "product_private7"));
+ });
+ }
+
+ TestUtils.assertLibraryInaccessible(
+ () -> { SystemSharedLib.load(TestUtils.libPath("/vendor", "vendor_private7")); });
+ }
+
+ @Test
+ public void testLoadPrivateLibrariesViaSystemExtSharedLibWithAbsolutePaths() {
+ if (systemPrivateLibsAccessibleFromAppNamespace()) {
+ SystemExtSharedLib.load(TestUtils.libPath("/system", "system_private8"));
+ SystemExtSharedLib.load(TestUtils.libPath("/system_ext", "systemext_private8"));
+ } else {
+ TestUtils.assertLibraryInaccessible(() -> {
+ SystemExtSharedLib.load(TestUtils.libPath("/system", "system_private8"));
+ });
+ TestUtils.assertLibraryInaccessible(() -> {
+ SystemExtSharedLib.load(TestUtils.libPath("/system_ext", "systemext_private8"));
+ });
+ }
+
+ if (productPrivateLibsAccessibleFromAppNamespace()) {
+ SystemExtSharedLib.load(TestUtils.libPath("/product", "product_private8"));
+ } else {
+ TestUtils.assertLibraryInaccessible(() -> {
+ SystemExtSharedLib.load(TestUtils.libPath("/product", "product_private8"));
+ });
+ }
+
+ TestUtils.assertLibraryInaccessible(() -> {
+ SystemExtSharedLib.load(TestUtils.libPath("/vendor", "vendor_private8"));
+ });
+ }
+
+ @Test
+ public void testLoadPrivateLibrariesViaProductSharedLibWithAbsolutePaths() {
+ if (systemPrivateLibsAccessibleFromAppNamespace() || noSwitchToVendorOrProductNamespace()) {
+ ProductSharedLib.load(TestUtils.libPath("/system", "system_private9"));
+ ProductSharedLib.load(TestUtils.libPath("/system_ext", "systemext_private9"));
+ } else {
+ TestUtils.assertLibraryInaccessible(() -> {
+ ProductSharedLib.load(TestUtils.libPath("/system", "system_private9"));
+ });
+ TestUtils.assertLibraryInaccessible(() -> {
+ ProductSharedLib.load(TestUtils.libPath("/system_ext", "systemext_private9"));
+ });
+ }
+
+ boolean loadPrivateProductLib;
+ if (TestUtils.productAppsAreShared()) {
+ // The library is accessible if the app is in either system or
+ // product, because both are loaded as system apps and private product
+ // libs are available for both.
+ loadPrivateProductLib = getAppLocation() == AppLocation.SYSTEM
+ || getAppLocation() == AppLocation.PRODUCT;
+ } else {
+ loadPrivateProductLib = !noSwitchToVendorOrProductNamespace();
+ }
+ if (loadPrivateProductLib) {
+ ProductSharedLib.load(TestUtils.libPath("/product", "product_private9"));
+ } else {
+ TestUtils.assertLibraryInaccessible(() -> {
+ ProductSharedLib.load(TestUtils.libPath("/product", "product_private9"));
+ });
+ }
+
+ TestUtils.assertLibraryInaccessible(
+ () -> { ProductSharedLib.load(TestUtils.libPath("/vendor", "vendor_private9")); });
+ }
+
+ @Test
+ public void testLoadPrivateLibrariesViaVendorSharedLibWithAbsolutePaths() {
+ if (systemPrivateLibsAccessibleFromAppNamespace() || noSwitchToVendorOrProductNamespace()) {
+ VendorSharedLib.load(TestUtils.libPath("/system", "system_private10"));
+ VendorSharedLib.load(TestUtils.libPath("/system_ext", "systemext_private10"));
+ } else {
+ TestUtils.assertLibraryInaccessible(() -> {
+ VendorSharedLib.load(TestUtils.libPath("/system", "system_private10"));
+ });
+ TestUtils.assertLibraryInaccessible(() -> {
+ VendorSharedLib.load(TestUtils.libPath("/system_ext", "systemext_private10"));
+ });
+ }
+
+ if (productPrivateLibsAccessibleFromAppNamespace()) {
+ VendorSharedLib.load(TestUtils.libPath("/product", "product_private10"));
+ } else {
+ TestUtils.assertLibraryInaccessible(() -> {
+ VendorSharedLib.load(TestUtils.libPath("/product", "product_private10"));
+ });
+ }
+
+ if (!noSwitchToVendorOrProductNamespace()) {
+ VendorSharedLib.load(TestUtils.libPath("/vendor", "vendor_private10"));
+ } else {
+ TestUtils.assertLibraryInaccessible(() -> {
+ VendorSharedLib.load(TestUtils.libPath("/vendor", "vendor_private10"));
+ });
+ }
+ }
+}
diff --git a/libnativeloader/test/src/android/test/lib/TestUtils.java b/libnativeloader/test/src/android/test/lib/TestUtils.java
index ca64bad791..5f5cd911e8 100644
--- a/libnativeloader/test/src/android/test/lib/TestUtils.java
+++ b/libnativeloader/test/src/android/test/lib/TestUtils.java
@@ -17,25 +17,25 @@
package android.test.lib;
import static com.google.common.truth.Truth.assertThat;
+import static com.google.common.truth.Truth.assertWithMessage;
import static org.junit.Assert.assertThrows;
-import android.os.Build;
+import android.os.SystemProperties;
import androidx.test.platform.app.InstrumentationRegistry;
+import com.android.modules.utils.build.SdkLevel;
+
import org.junit.function.ThrowingRunnable;
-public final class TestUtils {
- public static void assertLibraryNotFound(ThrowingRunnable loadLibrary) {
- Throwable t = assertThrows(UnsatisfiedLinkError.class, loadLibrary);
- assertThat(t.getMessage()).containsMatch("dlopen failed: library .* not found");
- }
+import java.io.File;
- public static void assertLinkerNamespaceError(ThrowingRunnable loadLibrary) {
+public final class TestUtils {
+ public static void assertLibraryInaccessible(ThrowingRunnable loadLibrary) {
Throwable t = assertThrows(UnsatisfiedLinkError.class, loadLibrary);
assertThat(t.getMessage())
- .containsMatch("dlopen failed: .* is not accessible for the namespace");
+ .containsMatch("dlopen failed: .* (not found|not accessible for the namespace)");
}
public static String libPath(String dir, String libName) {
@@ -46,6 +46,25 @@ public final class TestUtils {
// True if we have to skip testing public libraries in the product
// partition, which got supported in T.
public static boolean skipPublicProductLibTests() {
- return Build.VERSION.SDK_INT < 33; // TIRAMISU
+ return !SdkLevel.isAtLeastT();
+ }
+
+ // True if apps in product partitions get shared library namespaces, so we
+ // cannot test that libs in system and system_ext get blocked.
+ public static boolean productAppsAreShared() {
+ return !SdkLevel.isAtLeastU() && SystemProperties.get("ro.product.vndk.version").isEmpty();
+ }
+
+ // Test that private libs are present, as a safeguard so that the dlopen
+ // failures we expect in other tests aren't due to them not being there.
+ public static void testPrivateLibsExist(String libDir, String libStem) {
+ // Remember to update pushPrivateLibs in LibnativeloaderTest.java when
+ // the number of libraries changes.
+ for (int i = 1; i <= 10; ++i) {
+ String libPath = libPath(libDir, libStem + i);
+ assertWithMessage(libPath + " does not exist")
+ .that(new File(libPath).exists())
+ .isTrue();
+ }
}
}
diff --git a/libnativeloader/test/src/android/test/productsharedlib/ProductSharedLib.java b/libnativeloader/test/src/android/test/productsharedlib/ProductSharedLib.java
index a500d2a976..a8966d9825 100644
--- a/libnativeloader/test/src/android/test/productsharedlib/ProductSharedLib.java
+++ b/libnativeloader/test/src/android/test/productsharedlib/ProductSharedLib.java
@@ -17,5 +17,11 @@
package android.test.productsharedlib;
public final class ProductSharedLib {
- public static void loadLibrary(String name) { System.loadLibrary(name); }
+ public static void loadLibrary(String name) {
+ System.loadLibrary(name);
+ }
+
+ public static void load(String path) {
+ System.load(path);
+ }
}
diff --git a/libnativeloader/test/src/android/test/systemextsharedlib/SystemExtSharedLib.java b/libnativeloader/test/src/android/test/systemextsharedlib/SystemExtSharedLib.java
index 1240e12e55..ae9a6da053 100644
--- a/libnativeloader/test/src/android/test/systemextsharedlib/SystemExtSharedLib.java
+++ b/libnativeloader/test/src/android/test/systemextsharedlib/SystemExtSharedLib.java
@@ -17,5 +17,11 @@
package android.test.systemextsharedlib;
public final class SystemExtSharedLib {
- public static void loadLibrary(String name) { System.loadLibrary(name); }
+ public static void loadLibrary(String name) {
+ System.loadLibrary(name);
+ }
+
+ public static void load(String path) {
+ System.load(path);
+ }
}
diff --git a/libnativeloader/test/src/android/test/systemsharedlib/SystemSharedLib.java b/libnativeloader/test/src/android/test/systemsharedlib/SystemSharedLib.java
index 8e2af9f79c..ba8c9e2574 100644
--- a/libnativeloader/test/src/android/test/systemsharedlib/SystemSharedLib.java
+++ b/libnativeloader/test/src/android/test/systemsharedlib/SystemSharedLib.java
@@ -17,5 +17,11 @@
package android.test.systemsharedlib;
public final class SystemSharedLib {
- public static void loadLibrary(String name) { System.loadLibrary(name); }
+ public static void loadLibrary(String name) {
+ System.loadLibrary(name);
+ }
+
+ public static void load(String path) {
+ System.load(path);
+ }
}
diff --git a/libnativeloader/test/src/android/test/vendorsharedlib/VendorSharedLib.java b/libnativeloader/test/src/android/test/vendorsharedlib/VendorSharedLib.java
index 8859b63db4..14223d88f8 100644
--- a/libnativeloader/test/src/android/test/vendorsharedlib/VendorSharedLib.java
+++ b/libnativeloader/test/src/android/test/vendorsharedlib/VendorSharedLib.java
@@ -17,5 +17,11 @@
package android.test.vendorsharedlib;
public final class VendorSharedLib {
- public static void loadLibrary(String name) { System.loadLibrary(name); }
+ public static void loadLibrary(String name) {
+ System.loadLibrary(name);
+ }
+
+ public static void load(String path) {
+ System.load(path);
+ }
}
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 69dc23ae3d..a93ad37e61 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -2109,7 +2109,7 @@ class ImageDumper {
if (obj_class->IsArrayClass()) {
os << StringPrintf("%p: %s length:%d\n", obj, obj_class->PrettyDescriptor().c_str(),
obj->AsArray()->GetLength());
- } else if (obj->IsClass()) {
+ } else if (obj_class->IsClassClass()) {
ObjPtr<mirror::Class> klass = obj->AsClass();
os << StringPrintf("%p: java.lang.Class \"%s\" (",
obj,
@@ -2146,7 +2146,7 @@ class ImageDumper {
(value == nullptr) ? obj_class->GetComponentType() : value->GetClass();
PrettyObjectValue(os, value_class, value);
}
- } else if (obj->IsClass()) {
+ } else if (obj_class->IsClassClass()) {
ObjPtr<mirror::Class> klass = obj->AsClass();
if (kBitstringSubtypeCheckEnabled) {
@@ -2155,6 +2155,16 @@ class ImageDumper {
os << "\n";
}
+ if (klass->ShouldHaveEmbeddedVTable()) {
+ os << "EMBEDDED VTABLE:\n";
+ ScopedIndentation indent2(&vios_);
+ const PointerSize pointer_size = image_header_.GetPointerSize();
+ for (size_t i = 0, length = klass->GetEmbeddedVTableLength(); i != length; ++i) {
+ os << i << ": "
+ << ArtMethod::PrettyMethod(klass->GetEmbeddedVTableEntry(i, pointer_size)) << '\n';
+ }
+ }
+
if (klass->NumStaticFields() != 0) {
os << "STATICS:\n";
ScopedIndentation indent2(&vios_);
diff --git a/oatdump/oatdump_app_test.cc b/oatdump/oatdump_app_test.cc
index 03b43cef11..d4dee4f2f6 100644
--- a/oatdump/oatdump_app_test.cc
+++ b/oatdump/oatdump_app_test.cc
@@ -20,7 +20,6 @@ namespace art {
// Oat file compiled with a boot image. oatdump invoked with a boot image.
TEST_P(OatDumpTest, TestDumpOatWithRuntimeWithBootImage) {
- TEST_DISABLED_FOR_RISCV64();
ASSERT_TRUE(GenerateAppOdexFile(GetParam()));
ASSERT_TRUE(Exec(GetParam(),
kArgOatApp | kArgBootImage | kArgBcp | kArgIsa,
@@ -30,7 +29,6 @@ TEST_P(OatDumpTest, TestDumpOatWithRuntimeWithBootImage) {
// Oat file compiled without a boot image. oatdump invoked without a boot image.
TEST_P(OatDumpTest, TestDumpOatWithRuntimeWithNoBootImage) {
- TEST_DISABLED_FOR_RISCV64();
TEST_DISABLED_FOR_DEBUG_BUILD(); // DCHECK failed.
ASSERT_TRUE(GenerateAppOdexFile(GetParam(), {"--boot-image=/nonx/boot.art"}));
ASSERT_TRUE(Exec(GetParam(),
@@ -41,7 +39,6 @@ TEST_P(OatDumpTest, TestDumpOatWithRuntimeWithNoBootImage) {
// Dex code cannot be found in the vdex file, and no --dex-file is specified. Dump header only.
TEST_P(OatDumpTest, TestDumpOatTryWithRuntimeDexNotFound) {
- TEST_DISABLED_FOR_RISCV64();
ASSERT_TRUE(
GenerateAppOdexFile(GetParam(), {"--dex-location=/nonx/app.jar", "--copy-dex-files=false"}));
ASSERT_TRUE(Exec(GetParam(), kArgOatApp | kArgBootImage | kArgBcp | kArgIsa, {}, kExpectOat));
@@ -49,7 +46,6 @@ TEST_P(OatDumpTest, TestDumpOatTryWithRuntimeDexNotFound) {
// Dex code cannot be found in the vdex file, but can be found in the specified dex file.
TEST_P(OatDumpTest, TestDumpOatWithRuntimeDexSpecified) {
- TEST_DISABLED_FOR_RISCV64();
ASSERT_TRUE(
GenerateAppOdexFile(GetParam(), {"--dex-location=/nonx/app.jar", "--copy-dex-files=false"}));
ASSERT_TRUE(Exec(GetParam(),
@@ -60,7 +56,6 @@ TEST_P(OatDumpTest, TestDumpOatWithRuntimeDexSpecified) {
// Oat file compiled with a boot image. oatdump invoked without a boot image.
TEST_P(OatDumpTest, TestDumpOatWithoutRuntimeBcpMismatch) {
- TEST_DISABLED_FOR_RISCV64();
ASSERT_TRUE(GenerateAppOdexFile(GetParam()));
ASSERT_TRUE(Exec(GetParam(),
kArgOatApp | kArgBcp | kArgIsa,
@@ -70,14 +65,12 @@ TEST_P(OatDumpTest, TestDumpOatWithoutRuntimeBcpMismatch) {
// Bootclasspath not specified.
TEST_P(OatDumpTest, TestDumpOatWithoutRuntimeNoBcp) {
- TEST_DISABLED_FOR_RISCV64();
ASSERT_TRUE(GenerateAppOdexFile(GetParam()));
ASSERT_TRUE(Exec(GetParam(), kArgOatApp, {}, kExpectOat | kExpectCode | kExpectBssOffsetsForBcp));
}
// Dex code cannot be found in the vdex file, and no --dex-file is specified. Dump header only.
TEST_P(OatDumpTest, TestDumpOatWithoutRuntimeDexNotFound) {
- TEST_DISABLED_FOR_RISCV64();
ASSERT_TRUE(
GenerateAppOdexFile(GetParam(), {"--dex-location=/nonx/app.jar", "--copy-dex-files=false"}));
ASSERT_TRUE(Exec(GetParam(), kArgOatApp, {}, kExpectOat));
@@ -85,7 +78,6 @@ TEST_P(OatDumpTest, TestDumpOatWithoutRuntimeDexNotFound) {
// Dex code cannot be found in the vdex file, but can be found in the specified dex file.
TEST_P(OatDumpTest, TestDumpOatWithoutRuntimeDexSpecified) {
- TEST_DISABLED_FOR_RISCV64();
ASSERT_TRUE(
GenerateAppOdexFile(GetParam(), {"--dex-location=/nonx/app.jar", "--copy-dex-files=false"}));
ASSERT_TRUE(Exec(
@@ -93,7 +85,6 @@ TEST_P(OatDumpTest, TestDumpOatWithoutRuntimeDexSpecified) {
}
TEST_P(OatDumpTest, TestDumpAppImageWithBootImage) {
- TEST_DISABLED_FOR_RISCV64();
TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS(); // GC bug, b/126305867
const std::string app_image_arg = "--app-image-file=" + GetAppImageName();
ASSERT_TRUE(GenerateAppOdexFile(GetParam(), {app_image_arg}));
@@ -105,7 +96,6 @@ TEST_P(OatDumpTest, TestDumpAppImageWithBootImage) {
// Deprecated usage, but checked for compatibility.
TEST_P(OatDumpTest, TestDumpAppImageWithBootImageLegacy) {
- TEST_DISABLED_FOR_RISCV64();
TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS(); // GC bug, b/126305867
const std::string app_image_arg = "--app-image-file=" + GetAppImageName();
ASSERT_TRUE(GenerateAppOdexFile(GetParam(), {app_image_arg}));
@@ -116,7 +106,6 @@ TEST_P(OatDumpTest, TestDumpAppImageWithBootImageLegacy) {
}
TEST_P(OatDumpTest, TestDumpAppImageInvalidPath) {
- TEST_DISABLED_FOR_RISCV64();
TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS(); // GC bug, b/126305867
const std::string app_image_arg = "--app-image-file=" + GetAppImageName();
ASSERT_TRUE(GenerateAppOdexFile(GetParam(), {app_image_arg}));
@@ -129,7 +118,6 @@ TEST_P(OatDumpTest, TestDumpAppImageInvalidPath) {
// The runtime can start, but the boot image check should fail.
TEST_P(OatDumpTest, TestDumpAppImageWithWrongBootImage) {
- TEST_DISABLED_FOR_RISCV64();
TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS(); // GC bug, b/126305867
const std::string app_image_arg = "--app-image-file=" + GetAppImageName();
ASSERT_TRUE(GenerateAppOdexFile(GetParam(), {app_image_arg}));
@@ -142,7 +130,6 @@ TEST_P(OatDumpTest, TestDumpAppImageWithWrongBootImage) {
// Not possible.
TEST_P(OatDumpTest, TestDumpAppImageWithoutRuntime) {
- TEST_DISABLED_FOR_RISCV64();
TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS(); // GC bug, b/126305867
const std::string app_image_arg = "--app-image-file=" + GetAppImageName();
ASSERT_TRUE(GenerateAppOdexFile(GetParam(), {app_image_arg}));
@@ -155,7 +142,6 @@ TEST_P(OatDumpTest, TestDumpAppImageWithoutRuntime) {
// Dex code cannot be found in the vdex file, and no --dex-file is specified. Cannot dump app image.
TEST_P(OatDumpTest, TestDumpAppImageDexNotFound) {
- TEST_DISABLED_FOR_RISCV64();
TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS(); // GC bug, b/126305867
const std::string app_image_arg = "--app-image-file=" + GetAppImageName();
ASSERT_TRUE(GenerateAppOdexFile(
@@ -169,7 +155,6 @@ TEST_P(OatDumpTest, TestDumpAppImageDexNotFound) {
// Dex code cannot be found in the vdex file, but can be found in the specified dex file.
TEST_P(OatDumpTest, TestDumpAppImageDexSpecified) {
- TEST_DISABLED_FOR_RISCV64();
TEST_DISABLED_WITHOUT_BAKER_READ_BARRIERS(); // GC bug, b/126305867
const std::string app_image_arg = "--app-image-file=" + GetAppImageName();
ASSERT_TRUE(GenerateAppOdexFile(
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index 88cd044a48..6cd3a9742a 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -66,10 +66,10 @@ TEST_P(OatDumpTest, TestListMethods) {
}
TEST_P(OatDumpTest, TestSymbolize) {
- TEST_DISABLED_FOR_RISCV64();
if (GetParam() == Flavor::kDynamic) {
TEST_DISABLED_FOR_TARGET(); // Can not write files inside the apex directory.
} else {
+ TEST_DISABLED_FOR_RISCV64();
TEST_DISABLED_FOR_ARM_AND_ARM64();
}
std::string error_msg;
@@ -77,8 +77,8 @@ TEST_P(OatDumpTest, TestSymbolize) {
}
TEST_P(OatDumpTest, TestExportDex) {
- TEST_DISABLED_FOR_RISCV64();
if (GetParam() == Flavor::kStatic) {
+ TEST_DISABLED_FOR_RISCV64();
TEST_DISABLED_FOR_ARM_AND_ARM64();
}
std::string error_msg;
diff --git a/odrefresh/Android.bp b/odrefresh/Android.bp
index 2a020da634..7b9aad7913 100644
--- a/odrefresh/Android.bp
+++ b/odrefresh/Android.bp
@@ -41,10 +41,10 @@ cc_defaults {
"art-odrefresh-operator-srcs",
],
shared_libs: [
- "libartpalette",
"libarttools", // Contains "libc++fs".
"libbase",
"liblog",
+ "libselinux",
],
static_libs: [
"libmodules-utils-build",
diff --git a/odrefresh/odrefresh.cc b/odrefresh/odrefresh.cc
index ea624b21bb..f515aefa10 100644
--- a/odrefresh/odrefresh.cc
+++ b/odrefresh/odrefresh.cc
@@ -85,8 +85,7 @@
#include "odr_fs_utils.h"
#include "odr_metrics.h"
#include "odrefresh/odrefresh.h"
-#include "palette/palette.h"
-#include "palette/palette_types.h"
+#include "selinux/selinux.h"
#include "tools/cmdline_builder.h"
namespace art {
@@ -194,6 +193,28 @@ bool MoveOrEraseFiles(const std::vector<std::unique_ptr<File>>& files,
return true;
}
+Result<std::string> CreateStagingDirectory() {
+ std::string staging_dir = GetArtApexData() + "/staging";
+
+ std::error_code ec;
+ if (std::filesystem::exists(staging_dir, ec)) {
+ if (!std::filesystem::remove_all(staging_dir, ec)) {
+ return Errorf(
+ "Could not remove existing staging directory '{}': {}", staging_dir, ec.message());
+ }
+ }
+
+ if (mkdir(staging_dir.c_str(), S_IRWXU) != 0) {
+ return ErrnoErrorf("Could not create staging directory '{}'", staging_dir);
+ }
+
+ if (setfilecon(staging_dir.c_str(), "u:object_r:apex_art_staging_data_file:s0") != 0) {
+ return ErrnoErrorf("Could not set label on staging directory '{}'", staging_dir);
+ }
+
+ return staging_dir;
+}
+
// Gets the `ApexInfo` associated with the currently active ART APEX.
std::optional<apex::ApexInfo> GetArtApexInfo(const std::vector<apex::ApexInfo>& info_list) {
auto it = std::find_if(info_list.begin(), info_list.end(), [](const apex::ApexInfo& info) {
@@ -2045,7 +2066,7 @@ OnDeviceRefresh::CompileSystemServer(const std::string& staging_dir,
WARN_UNUSED ExitCode OnDeviceRefresh::Compile(OdrMetrics& metrics,
CompilationOptions compilation_options) const {
- const char* staging_dir = nullptr;
+ std::string staging_dir;
metrics.SetStage(OdrMetrics::Stage::kPreparation);
// If partial compilation is disabled, we should compile everything regardless of what's in
@@ -2083,13 +2104,16 @@ WARN_UNUSED ExitCode OnDeviceRefresh::Compile(OdrMetrics& metrics,
}
if (!config_.GetStagingDir().empty()) {
- staging_dir = config_.GetStagingDir().c_str();
+ staging_dir = config_.GetStagingDir();
} else {
// Create staging area and assign label for generating compilation artifacts.
- if (PaletteCreateOdrefreshStagingDirectory(&staging_dir) != PALETTE_STATUS_OK) {
+ Result<std::string> res = CreateStagingDirectory();
+ if (!res.ok()) {
+ LOG(ERROR) << res.error().message();
metrics.SetStatus(OdrMetrics::Status::kStagingFailed);
return ExitCode::kCleanupFailed;
}
+ staging_dir = res.value();
}
std::string error_msg;
diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc
index a460b39556..a1f25c466b 100644
--- a/openjdkjvmti/ti_method.cc
+++ b/openjdkjvmti/ti_method.cc
@@ -249,6 +249,7 @@ jvmtiError MethodUtil::GetLocalVariableTable(jvmtiEnv* env,
return OK;
};
+ // To avoid defining visitor in the same line as the `if`. We define the lambda and use std::move.
auto visitor = [&](const art::DexFile::LocalInfo& entry) {
if (err != OK) {
return;
@@ -275,9 +276,8 @@ jvmtiError MethodUtil::GetLocalVariableTable(jvmtiEnv* env,
});
};
- if (!accessor.DecodeDebugLocalInfo(art_method->IsStatic(),
- art_method->GetDexMethodIndex(),
- visitor)) {
+ if (!accessor.DecodeDebugLocalInfo(
+ art_method->IsStatic(), art_method->GetDexMethodIndex(), std::move(visitor))) {
// Something went wrong with decoding the debug information. It might as well not be there.
return ERR(ABSENT_INFORMATION);
}
@@ -754,6 +754,7 @@ jvmtiError CommonLocalVariableClosure::GetSlotType(art::ArtMethod* method,
bool found = false;
*type = art::Primitive::kPrimVoid;
descriptor->clear();
+ // To avoid defining visitor in the same line as the `if`. We define the lambda and use std::move.
auto visitor = [&](const art::DexFile::LocalInfo& entry) {
if (!found && entry.start_address_ <= dex_pc && entry.end_address_ > dex_pc &&
entry.reg_ == slot_) {
@@ -762,7 +763,8 @@ jvmtiError CommonLocalVariableClosure::GetSlotType(art::ArtMethod* method,
*descriptor = entry.descriptor_;
}
};
- if (!accessor.DecodeDebugLocalInfo(method->IsStatic(), method->GetDexMethodIndex(), visitor) ||
+ if (!accessor.DecodeDebugLocalInfo(
+ method->IsStatic(), method->GetDexMethodIndex(), std::move(visitor)) ||
!found) {
// Something went wrong with decoding the debug information. It might as well not be there.
// Try to find the type with the verifier.
diff --git a/runtime/Android.bp b/runtime/Android.bp
index d481d300a2..17f09cf814 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -310,6 +310,7 @@ cc_defaults {
"jit/jit.cc",
"jit/jit_code_cache.cc",
"jit/jit_memory_region.cc",
+ "jit/jit_options.cc",
"jit/profile_saver.cc",
"jit/profiling_info.cc",
"jit/small_pattern_matcher.cc",
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index cca20879c2..297b540ab2 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -19,7 +19,6 @@
#include "art_method.h"
-#include "art_field.h"
#include "base/callee_save_type.h"
#include "class_linker-inl.h"
#include "common_throws.h"
@@ -32,9 +31,7 @@
#include "dex/signature.h"
#include "gc_root-inl.h"
#include "imtable-inl.h"
-#include "intrinsics_enum.h"
-#include "jit/jit.h"
-#include "jit/profiling_info.h"
+#include "jit/jit_options.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/object-inl.h"
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 0f164a7d07..8f9d7a80ff 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -283,6 +283,11 @@ class CheckJniAbortCatcher {
GTEST_SKIP() << "WARNING: TEST DISABLED FOR ARM64"; \
}
+#define TEST_DISABLED_FOR_RISCV64() \
+ if (kRuntimeISA == InstructionSet::kRiscv64) { \
+ GTEST_SKIP() << "WARNING: TEST DISABLED FOR RISCV64"; \
+ }
+
#define TEST_DISABLED_FOR_X86() \
if (kRuntimeISA == InstructionSet::kX86) { \
GTEST_SKIP() << "WARNING: TEST DISABLED FOR X86"; \
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index ccf6fbd288..635de2af69 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -42,12 +42,16 @@ namespace art HIDDEN {
// Static fault manger object accessed by signal handler.
FaultManager fault_manager;
-// This needs to be NO_INLINE since some debuggers do not read the inline-info to set a breakpoint
-// if it isn't.
+// These need to be NO_INLINE since some debuggers do not read the inline-info to set a breakpoint
+// if they aren't.
extern "C" NO_INLINE __attribute__((visibility("default"))) void art_sigsegv_fault() {
// Set a breakpoint here to be informed when a SIGSEGV is unhandled by ART.
VLOG(signals)<< "Caught unknown SIGSEGV in ART fault handler - chaining to next handler.";
}
+extern "C" NO_INLINE __attribute__((visibility("default"))) void art_sigbus_fault() {
+ // Set a breakpoint here to be informed when a SIGBUS is unhandled by ART.
+ VLOG(signals) << "Caught unknown SIGBUS in ART fault handler - chaining to next handler.";
+}
// Signal handler called on SIGSEGV.
static bool art_sigsegv_handler(int sig, siginfo_t* info, void* context) {
@@ -232,7 +236,13 @@ bool FaultManager::HandleSigbusFault(int sig, siginfo_t* info, [[maybe_unused]]
// Simulate a crash in a handler.
raise(SIGBUS);
#endif
- return Runtime::Current()->GetHeap()->MarkCompactCollector()->SigbusHandler(info);
+ if (Runtime::Current()->GetHeap()->MarkCompactCollector()->SigbusHandler(info)) {
+ return true;
+ }
+
+ // Set a breakpoint in this function to catch unhandled signals.
+ art_sigbus_fault();
+ return false;
}
inline void FaultManager::CheckForUnrecognizedImplicitSuspendCheckInBootImage(
diff --git a/runtime/gc/space/image_space_test.cc b/runtime/gc/space/image_space_test.cc
index 4f6c4e9633..2979544d90 100644
--- a/runtime/gc/space/image_space_test.cc
+++ b/runtime/gc/space/image_space_test.cc
@@ -432,7 +432,6 @@ class ImageSpaceLoadingSingleComponentWithProfilesTest
TEST_F(ImageSpaceLoadingSingleComponentWithProfilesTest, Test) {
// Compiling the primary boot image into a single image is not allowed on host.
TEST_DISABLED_FOR_HOST();
- TEST_DISABLED_FOR_RISCV64();
CheckImageSpaceAndOatFile(/*space_count=*/1);
}
@@ -487,7 +486,6 @@ class ImageSpaceLoadingMultipleComponentsWithProfilesTest
TEST_F(ImageSpaceLoadingMultipleComponentsWithProfilesTest, Test) {
// Compiling the primary boot image into a single image is not allowed on host.
TEST_DISABLED_FOR_HOST();
- TEST_DISABLED_FOR_RISCV64();
CheckImageSpaceAndOatFile(/*space_count=*/1);
}
diff --git a/runtime/gc/verification.cc b/runtime/gc/verification.cc
index 926156fd8f..858c62a6e3 100644
--- a/runtime/gc/verification.cc
+++ b/runtime/gc/verification.cc
@@ -19,6 +19,8 @@
#include <iomanip>
#include <sstream>
+#include <android-base/unique_fd.h>
+
#include "art_field-inl.h"
#include "base/file_utils.h"
#include "base/logging.h"
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index ea645a2688..54a56f2939 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -63,103 +63,9 @@ namespace jit {
static constexpr bool kEnableOnStackReplacement = true;
-// Maximum permitted threshold value.
-static constexpr uint32_t kJitMaxThreshold = std::numeric_limits<uint16_t>::max();
-
-static constexpr uint32_t kJitDefaultOptimizeThreshold = 0xffff;
-// Different optimization threshold constants. These default to the equivalent optimization
-// thresholds divided by 2, but can be overridden at the command-line.
-static constexpr uint32_t kJitStressDefaultOptimizeThreshold = kJitDefaultOptimizeThreshold / 2;
-static constexpr uint32_t kJitSlowStressDefaultOptimizeThreshold =
- kJitStressDefaultOptimizeThreshold / 2;
-
-static constexpr uint32_t kJitDefaultWarmupThreshold = 0x3fff;
-// Different warm-up threshold constants. These default to the equivalent warmup thresholds divided
-// by 2, but can be overridden at the command-line.
-static constexpr uint32_t kJitStressDefaultWarmupThreshold = kJitDefaultWarmupThreshold / 2;
-static constexpr uint32_t kJitSlowStressDefaultWarmupThreshold =
- kJitStressDefaultWarmupThreshold / 2;
-
-DEFINE_RUNTIME_DEBUG_FLAG(Jit, kSlowMode);
-
// JIT compiler
JitCompilerInterface* Jit::jit_compiler_ = nullptr;
-JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
- auto* jit_options = new JitOptions;
- jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
- jit_options->use_profiled_jit_compilation_ =
- options.GetOrDefault(RuntimeArgumentMap::UseProfiledJitCompilation);
-
- jit_options->code_cache_initial_capacity_ =
- options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
- jit_options->code_cache_max_capacity_ =
- options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
- jit_options->dump_info_on_shutdown_ =
- options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
- jit_options->profile_saver_options_ =
- options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts);
- jit_options->thread_pool_pthread_priority_ =
- options.GetOrDefault(RuntimeArgumentMap::JITPoolThreadPthreadPriority);
- jit_options->zygote_thread_pool_pthread_priority_ =
- options.GetOrDefault(RuntimeArgumentMap::JITZygotePoolThreadPthreadPriority);
-
- // Set default optimize threshold to aid with checking defaults.
- jit_options->optimize_threshold_ =
- kIsDebugBuild
- ? (Jit::kSlowMode
- ? kJitSlowStressDefaultOptimizeThreshold
- : kJitStressDefaultOptimizeThreshold)
- : kJitDefaultOptimizeThreshold;
-
- // Set default warm-up threshold to aid with checking defaults.
- jit_options->warmup_threshold_ =
- kIsDebugBuild ? (Jit::kSlowMode
- ? kJitSlowStressDefaultWarmupThreshold
- : kJitStressDefaultWarmupThreshold)
- : kJitDefaultWarmupThreshold;
-
- if (options.Exists(RuntimeArgumentMap::JITOptimizeThreshold)) {
- jit_options->optimize_threshold_ = *options.Get(RuntimeArgumentMap::JITOptimizeThreshold);
- }
- DCHECK_LE(jit_options->optimize_threshold_, kJitMaxThreshold);
-
- if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
- jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
- }
- DCHECK_LE(jit_options->warmup_threshold_, kJitMaxThreshold);
-
- if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
- jit_options->priority_thread_weight_ =
- *options.Get(RuntimeArgumentMap::JITPriorityThreadWeight);
- if (jit_options->priority_thread_weight_ > jit_options->warmup_threshold_) {
- LOG(FATAL) << "Priority thread weight is above the warmup threshold.";
- } else if (jit_options->priority_thread_weight_ == 0) {
- LOG(FATAL) << "Priority thread weight cannot be 0.";
- }
- } else {
- jit_options->priority_thread_weight_ = std::max(
- jit_options->warmup_threshold_ / Jit::kDefaultPriorityThreadWeightRatio,
- static_cast<size_t>(1));
- }
-
- if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) {
- jit_options->invoke_transition_weight_ =
- *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight);
- if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) {
- LOG(FATAL) << "Invoke transition weight is above the warmup threshold.";
- } else if (jit_options->invoke_transition_weight_ == 0) {
- LOG(FATAL) << "Invoke transition weight cannot be 0.";
- }
- } else {
- jit_options->invoke_transition_weight_ = std::max(
- jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio,
- static_cast<size_t>(1));
- }
-
- return jit_options;
-}
-
void Jit::DumpInfo(std::ostream& os) {
code_cache_->Dump(os);
cumulative_timings_.Dump(os);
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index e41675f1b4..64b522251d 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -24,14 +24,13 @@
#include "base/histogram-inl.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "base/runtime_debug.h"
#include "base/timing_logger.h"
#include "compilation_kind.h"
#include "handle.h"
#include "offsets.h"
#include "interpreter/mterp/nterp.h"
#include "jit/debugger_interface.h"
-#include "jit/profile_saver_options.h"
+#include "jit_options.h"
#include "obj_ptr.h"
#include "thread_pool.h"
@@ -62,130 +61,6 @@ class JitOptions;
static constexpr int16_t kJitCheckForOSR = -1;
static constexpr int16_t kJitHotnessDisabled = -2;
-// At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
-// See android/os/Process.java.
-static constexpr int kJitPoolThreadPthreadDefaultPriority = 9;
-// At what priority to schedule jit zygote threads compiling profiles in the background.
-// 19 is the lowest background priority on device.
-// See android/os/Process.java.
-static constexpr int kJitZygotePoolThreadPthreadDefaultPriority = 19;
-
-class JitOptions {
- public:
- static JitOptions* CreateFromRuntimeArguments(const RuntimeArgumentMap& options);
-
- uint16_t GetOptimizeThreshold() const {
- return optimize_threshold_;
- }
-
- uint16_t GetWarmupThreshold() const {
- return warmup_threshold_;
- }
-
- uint16_t GetPriorityThreadWeight() const {
- return priority_thread_weight_;
- }
-
- uint16_t GetInvokeTransitionWeight() const {
- return invoke_transition_weight_;
- }
-
- size_t GetCodeCacheInitialCapacity() const {
- return code_cache_initial_capacity_;
- }
-
- size_t GetCodeCacheMaxCapacity() const {
- return code_cache_max_capacity_;
- }
-
- bool DumpJitInfoOnShutdown() const {
- return dump_info_on_shutdown_;
- }
-
- const ProfileSaverOptions& GetProfileSaverOptions() const {
- return profile_saver_options_;
- }
-
- bool GetSaveProfilingInfo() const {
- return profile_saver_options_.IsEnabled();
- }
-
- int GetThreadPoolPthreadPriority() const {
- return thread_pool_pthread_priority_;
- }
-
- int GetZygoteThreadPoolPthreadPriority() const {
- return zygote_thread_pool_pthread_priority_;
- }
-
- bool UseJitCompilation() const {
- return use_jit_compilation_;
- }
-
- bool UseProfiledJitCompilation() const {
- return use_profiled_jit_compilation_;
- }
-
- void SetUseJitCompilation(bool b) {
- use_jit_compilation_ = b;
- }
-
- void SetSaveProfilingInfo(bool save_profiling_info) {
- profile_saver_options_.SetEnabled(save_profiling_info);
- }
-
- void SetWaitForJitNotificationsToSaveProfile(bool value) {
- profile_saver_options_.SetWaitForJitNotificationsToSave(value);
- }
-
- void SetJitAtFirstUse() {
- use_jit_compilation_ = true;
- optimize_threshold_ = 0;
- }
-
- void SetUseBaselineCompiler() {
- use_baseline_compiler_ = true;
- }
-
- bool UseBaselineCompiler() const {
- return use_baseline_compiler_;
- }
-
- private:
- // We add the sample in batches of size kJitSamplesBatchSize.
- // This method rounds the threshold so that it is multiple of the batch size.
- static uint32_t RoundUpThreshold(uint32_t threshold);
-
- bool use_jit_compilation_;
- bool use_profiled_jit_compilation_;
- bool use_baseline_compiler_;
- size_t code_cache_initial_capacity_;
- size_t code_cache_max_capacity_;
- uint32_t optimize_threshold_;
- uint32_t warmup_threshold_;
- uint16_t priority_thread_weight_;
- uint16_t invoke_transition_weight_;
- bool dump_info_on_shutdown_;
- int thread_pool_pthread_priority_;
- int zygote_thread_pool_pthread_priority_;
- ProfileSaverOptions profile_saver_options_;
-
- JitOptions()
- : use_jit_compilation_(false),
- use_profiled_jit_compilation_(false),
- use_baseline_compiler_(false),
- code_cache_initial_capacity_(0),
- code_cache_max_capacity_(0),
- optimize_threshold_(0),
- warmup_threshold_(0),
- priority_thread_weight_(0),
- invoke_transition_weight_(0),
- dump_info_on_shutdown_(false),
- thread_pool_pthread_priority_(kJitPoolThreadPthreadDefaultPriority),
- zygote_thread_pool_pthread_priority_(kJitZygotePoolThreadPthreadDefaultPriority) {}
-
- DISALLOW_COPY_AND_ASSIGN(JitOptions);
-};
// Implemented and provided by the compiler library.
class JitCompilerInterface {
@@ -305,13 +180,9 @@ class JitThreadPool : public AbstractThreadPool {
class Jit {
public:
- static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
- static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500;
// How frequently should the interpreter check to see if OSR compilation is ready.
static constexpr int16_t kJitRecheckOSRThreshold = 101; // Prime number to avoid patterns.
- DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
-
virtual ~Jit();
// Create JIT itself.
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 7bfbe15059..3560ac17ff 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -1429,18 +1429,20 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
ScopedTrace trace(__FUNCTION__);
Thread* self = Thread::Current();
WaitUntilInlineCacheAccessible(self);
- std::vector<ProfilingInfo*> copies;
+ SafeMap<ArtMethod*, ProfilingInfo*> profiling_infos;
+ std::vector<ArtMethod*> copies;
// TODO: Avoid read barriers for potentially dead methods.
// ScopedDebugDisallowReadBarriers sddrb(self);
{
MutexLock mu(self, *Locks::jit_lock_);
- copies.reserve(profiling_infos_.size());
- for (const auto& entry : profiling_infos_) {
+ profiling_infos = profiling_infos_;
+ for (const auto& entry : method_code_map_) {
copies.push_back(entry.second);
}
}
- for (ProfilingInfo* info : copies) {
- ArtMethod* method = info->GetMethod();
+ for (ArtMethod* method : copies) {
+ auto it = profiling_infos.find(method);
+ ProfilingInfo* info = (it == profiling_infos.end()) ? nullptr : it->second;
const DexFile* dex_file = method->GetDexFile();
const std::string base_location = DexFileLoader::GetBaseLocation(dex_file->GetLocation());
if (!ContainsElement(dex_base_locations, base_location)) {
@@ -1449,74 +1451,76 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca
}
std::vector<ProfileMethodInfo::ProfileInlineCache> inline_caches;
- // If the method is still baseline compiled and doesn't meet the inline cache threshold, don't
- // save the inline caches because they might be incomplete.
- // Although we don't deoptimize for incomplete inline caches in AOT-compiled code, inlining
- // leads to larger generated code.
- // If the inline cache is empty the compiler will generate a regular invoke virtual/interface.
- const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
- if (ContainsPc(entry_point) &&
- CodeInfo::IsBaseline(
- OatQuickMethodHeader::FromEntryPoint(entry_point)->GetOptimizedCodeInfoPtr()) &&
- (ProfilingInfo::GetOptimizeThreshold() - info->GetBaselineHotnessCount()) <
- inline_cache_threshold) {
- methods.emplace_back(/*ProfileMethodInfo*/
- MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
- continue;
- }
-
- for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
- std::vector<TypeReference> profile_classes;
- const InlineCache& cache = info->GetInlineCaches()[i];
- ArtMethod* caller = info->GetMethod();
- bool is_missing_types = false;
- for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) {
- mirror::Class* cls = cache.classes_[k].Read();
- if (cls == nullptr) {
- break;
- }
+ if (info != nullptr) {
+ // If the method is still baseline compiled and doesn't meet the inline cache threshold, don't
+ // save the inline caches because they might be incomplete.
+ // Although we don't deoptimize for incomplete inline caches in AOT-compiled code, inlining
+ // leads to larger generated code.
+ // If the inline cache is empty the compiler will generate a regular invoke virtual/interface.
+ const void* entry_point = method->GetEntryPointFromQuickCompiledCode();
+ if (ContainsPc(entry_point) &&
+ CodeInfo::IsBaseline(
+ OatQuickMethodHeader::FromEntryPoint(entry_point)->GetOptimizedCodeInfoPtr()) &&
+ (ProfilingInfo::GetOptimizeThreshold() - info->GetBaselineHotnessCount()) <
+ inline_cache_threshold) {
+ methods.emplace_back(/*ProfileMethodInfo*/
+ MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
+ continue;
+ }
- // Check if the receiver is in the boot class path or if it's in the
- // same class loader as the caller. If not, skip it, as there is not
- // much we can do during AOT.
- if (!cls->IsBootStrapClassLoaded() &&
- caller->GetClassLoader() != cls->GetClassLoader()) {
- is_missing_types = true;
- continue;
- }
+ for (size_t i = 0; i < info->number_of_inline_caches_; ++i) {
+ std::vector<TypeReference> profile_classes;
+ const InlineCache& cache = info->GetInlineCaches()[i];
+ ArtMethod* caller = info->GetMethod();
+ bool is_missing_types = false;
+ for (size_t k = 0; k < InlineCache::kIndividualCacheSize; k++) {
+ mirror::Class* cls = cache.classes_[k].Read();
+ if (cls == nullptr) {
+ break;
+ }
- const DexFile* class_dex_file = nullptr;
- dex::TypeIndex type_index;
+ // Check if the receiver is in the boot class path or if it's in the
+ // same class loader as the caller. If not, skip it, as there is not
+ // much we can do during AOT.
+ if (!cls->IsBootStrapClassLoaded() &&
+ caller->GetClassLoader() != cls->GetClassLoader()) {
+ is_missing_types = true;
+ continue;
+ }
- if (cls->GetDexCache() == nullptr) {
- DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
- // Make a best effort to find the type index in the method's dex file.
- // We could search all open dex files but that might turn expensive
- // and probably not worth it.
- class_dex_file = dex_file;
- type_index = cls->FindTypeIndexInOtherDexFile(*dex_file);
- } else {
- class_dex_file = &(cls->GetDexFile());
- type_index = cls->GetDexTypeIndex();
- }
- if (!type_index.IsValid()) {
- // Could be a proxy class or an array for which we couldn't find the type index.
- is_missing_types = true;
- continue;
+ const DexFile* class_dex_file = nullptr;
+ dex::TypeIndex type_index;
+
+ if (cls->GetDexCache() == nullptr) {
+ DCHECK(cls->IsArrayClass()) << cls->PrettyClass();
+ // Make a best effort to find the type index in the method's dex file.
+ // We could search all open dex files but that might turn expensive
+ // and probably not worth it.
+ class_dex_file = dex_file;
+ type_index = cls->FindTypeIndexInOtherDexFile(*dex_file);
+ } else {
+ class_dex_file = &(cls->GetDexFile());
+ type_index = cls->GetDexTypeIndex();
+ }
+ if (!type_index.IsValid()) {
+ // Could be a proxy class or an array for which we couldn't find the type index.
+ is_missing_types = true;
+ continue;
+ }
+ if (ContainsElement(dex_base_locations,
+ DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) {
+ // Only consider classes from the same apk (including multidex).
+ profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
+ class_dex_file, type_index);
+ } else {
+ is_missing_types = true;
+ }
}
- if (ContainsElement(dex_base_locations,
- DexFileLoader::GetBaseLocation(class_dex_file->GetLocation()))) {
- // Only consider classes from the same apk (including multidex).
- profile_classes.emplace_back(/*ProfileMethodInfo::ProfileClassReference*/
- class_dex_file, type_index);
- } else {
- is_missing_types = true;
+ if (!profile_classes.empty()) {
+ inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/
+ cache.dex_pc_, is_missing_types, profile_classes);
}
}
- if (!profile_classes.empty()) {
- inline_caches.emplace_back(/*ProfileMethodInfo::ProfileInlineCache*/
- cache.dex_pc_, is_missing_types, profile_classes);
- }
}
methods.emplace_back(/*ProfileMethodInfo*/
MethodReference(dex_file, method->GetDexMethodIndex()), inline_caches);
@@ -1535,17 +1539,13 @@ bool JitCodeCache::NotifyCompilationOf(ArtMethod* method,
CompilationKind compilation_kind,
bool prejit) {
const void* existing_entry_point = method->GetEntryPointFromQuickCompiledCode();
- if (compilation_kind != CompilationKind::kOsr && ContainsPc(existing_entry_point)) {
- OatQuickMethodHeader* method_header =
- OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
- bool is_baseline = (compilation_kind == CompilationKind::kBaseline);
- if (CodeInfo::IsBaseline(method_header->GetOptimizedCodeInfoPtr()) == is_baseline) {
- VLOG(jit) << "Not compiling "
- << method->PrettyMethod()
- << " because it has already been compiled"
- << " kind=" << compilation_kind;
- return false;
- }
+ if (compilation_kind == CompilationKind::kBaseline && ContainsPc(existing_entry_point)) {
+ // The existing entry point is either already baseline, or optimized. No
+ // need to compile.
+ VLOG(jit) << "Not compiling "
+ << method->PrettyMethod()
+ << " baseline, because it has already been compiled";
+ return false;
}
if (method->NeedsClinitCheckBeforeCall() && !prejit) {
diff --git a/runtime/jit/jit_options.cc b/runtime/jit/jit_options.cc
new file mode 100644
index 0000000000..3c31d1dfcd
--- /dev/null
+++ b/runtime/jit/jit_options.cc
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jit_options.h"
+
+#include "runtime_options.h"
+
+namespace art HIDDEN {
+namespace jit {
+
+// Maximum permitted threshold value.
+static constexpr uint32_t kJitMaxThreshold = std::numeric_limits<uint16_t>::max();
+
+static constexpr uint32_t kJitDefaultOptimizeThreshold = 0xffff;
+// Different optimization threshold constants. These default to the equivalent optimization
+// thresholds divided by 2, but can be overridden at the command-line.
+static constexpr uint32_t kJitStressDefaultOptimizeThreshold = kJitDefaultOptimizeThreshold / 2;
+static constexpr uint32_t kJitSlowStressDefaultOptimizeThreshold =
+ kJitStressDefaultOptimizeThreshold / 2;
+
+static constexpr uint32_t kJitDefaultWarmupThreshold = 0x3fff;
+// Different warm-up threshold constants. These default to the equivalent warmup thresholds divided
+// by 2, but can be overridden at the command-line.
+static constexpr uint32_t kJitStressDefaultWarmupThreshold = kJitDefaultWarmupThreshold / 2;
+static constexpr uint32_t kJitSlowStressDefaultWarmupThreshold =
+ kJitStressDefaultWarmupThreshold / 2;
+
+static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
+static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500;
+
+DEFINE_RUNTIME_DEBUG_FLAG(JitOptions, kSlowMode);
+
+JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) {
+ auto* jit_options = new JitOptions;
+ jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation);
+ jit_options->use_profiled_jit_compilation_ =
+ options.GetOrDefault(RuntimeArgumentMap::UseProfiledJitCompilation);
+
+ jit_options->code_cache_initial_capacity_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity);
+ jit_options->code_cache_max_capacity_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheMaxCapacity);
+ jit_options->dump_info_on_shutdown_ =
+ options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
+ jit_options->profile_saver_options_ =
+ options.GetOrDefault(RuntimeArgumentMap::ProfileSaverOpts);
+ jit_options->thread_pool_pthread_priority_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITPoolThreadPthreadPriority);
+ jit_options->zygote_thread_pool_pthread_priority_ =
+ options.GetOrDefault(RuntimeArgumentMap::JITZygotePoolThreadPthreadPriority);
+
+ // Set default optimize threshold to aid with checking defaults.
+ jit_options->optimize_threshold_ = kIsDebugBuild
+ ? (kSlowMode ? kJitSlowStressDefaultOptimizeThreshold : kJitStressDefaultOptimizeThreshold)
+ : kJitDefaultOptimizeThreshold;
+
+ // Set default warm-up threshold to aid with checking defaults.
+ jit_options->warmup_threshold_ = kIsDebugBuild
+ ? (kSlowMode ? kJitSlowStressDefaultWarmupThreshold : kJitStressDefaultWarmupThreshold)
+ : kJitDefaultWarmupThreshold;
+
+ if (options.Exists(RuntimeArgumentMap::JITOptimizeThreshold)) {
+ jit_options->optimize_threshold_ = *options.Get(RuntimeArgumentMap::JITOptimizeThreshold);
+ }
+ DCHECK_LE(jit_options->optimize_threshold_, kJitMaxThreshold);
+
+ if (options.Exists(RuntimeArgumentMap::JITWarmupThreshold)) {
+ jit_options->warmup_threshold_ = *options.Get(RuntimeArgumentMap::JITWarmupThreshold);
+ }
+ DCHECK_LE(jit_options->warmup_threshold_, kJitMaxThreshold);
+
+ if (options.Exists(RuntimeArgumentMap::JITPriorityThreadWeight)) {
+ jit_options->priority_thread_weight_ =
+ *options.Get(RuntimeArgumentMap::JITPriorityThreadWeight);
+ if (jit_options->priority_thread_weight_ > jit_options->warmup_threshold_) {
+ LOG(FATAL) << "Priority thread weight is above the warmup threshold.";
+ } else if (jit_options->priority_thread_weight_ == 0) {
+ LOG(FATAL) << "Priority thread weight cannot be 0.";
+ }
+ } else {
+ jit_options->priority_thread_weight_ = std::max(
+ jit_options->warmup_threshold_ / kDefaultPriorityThreadWeightRatio,
+ static_cast<size_t>(1));
+ }
+
+ if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) {
+ jit_options->invoke_transition_weight_ =
+ *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight);
+ if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) {
+ LOG(FATAL) << "Invoke transition weight is above the warmup threshold.";
+ } else if (jit_options->invoke_transition_weight_ == 0) {
+ LOG(FATAL) << "Invoke transition weight cannot be 0.";
+ }
+ } else {
+ jit_options->invoke_transition_weight_ = std::max(
+ jit_options->warmup_threshold_ / kDefaultInvokeTransitionWeightRatio,
+ static_cast<size_t>(1));
+ }
+
+ return jit_options;
+}
+
+} // namespace jit
+} // namespace art
diff --git a/runtime/jit/jit_options.h b/runtime/jit/jit_options.h
new file mode 100644
index 0000000000..be305b4d63
--- /dev/null
+++ b/runtime/jit/jit_options.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JIT_JIT_OPTIONS_H_
+#define ART_RUNTIME_JIT_JIT_OPTIONS_H_
+
+#include "base/macros.h"
+#include "base/runtime_debug.h"
+#include "profile_saver_options.h"
+
+namespace art HIDDEN {
+
+struct RuntimeArgumentMap;
+
+namespace jit {
+
+// At what priority to schedule jit threads. 9 is the lowest foreground priority on device.
+// See android/os/Process.java.
+static constexpr int kJitPoolThreadPthreadDefaultPriority = 9;
+// At what priority to schedule jit zygote threads compiling profiles in the background.
+// 19 is the lowest background priority on device.
+// See android/os/Process.java.
+static constexpr int kJitZygotePoolThreadPthreadDefaultPriority = 19;
+
+class JitOptions {
+ public:
+ DECLARE_RUNTIME_DEBUG_FLAG(kSlowMode);
+
+ static JitOptions* CreateFromRuntimeArguments(const RuntimeArgumentMap& options);
+
+ uint16_t GetOptimizeThreshold() const {
+ return optimize_threshold_;
+ }
+
+ uint16_t GetWarmupThreshold() const {
+ return warmup_threshold_;
+ }
+
+ uint16_t GetPriorityThreadWeight() const {
+ return priority_thread_weight_;
+ }
+
+ uint16_t GetInvokeTransitionWeight() const {
+ return invoke_transition_weight_;
+ }
+
+ size_t GetCodeCacheInitialCapacity() const {
+ return code_cache_initial_capacity_;
+ }
+
+ size_t GetCodeCacheMaxCapacity() const {
+ return code_cache_max_capacity_;
+ }
+
+ bool DumpJitInfoOnShutdown() const {
+ return dump_info_on_shutdown_;
+ }
+
+ const ProfileSaverOptions& GetProfileSaverOptions() const {
+ return profile_saver_options_;
+ }
+
+ bool GetSaveProfilingInfo() const {
+ return profile_saver_options_.IsEnabled();
+ }
+
+ int GetThreadPoolPthreadPriority() const {
+ return thread_pool_pthread_priority_;
+ }
+
+ int GetZygoteThreadPoolPthreadPriority() const {
+ return zygote_thread_pool_pthread_priority_;
+ }
+
+ bool UseJitCompilation() const {
+ return use_jit_compilation_;
+ }
+
+ bool UseProfiledJitCompilation() const {
+ return use_profiled_jit_compilation_;
+ }
+
+ void SetUseJitCompilation(bool b) {
+ use_jit_compilation_ = b;
+ }
+
+ void SetSaveProfilingInfo(bool save_profiling_info) {
+ profile_saver_options_.SetEnabled(save_profiling_info);
+ }
+
+ void SetWaitForJitNotificationsToSaveProfile(bool value) {
+ profile_saver_options_.SetWaitForJitNotificationsToSave(value);
+ }
+
+ void SetJitAtFirstUse() {
+ use_jit_compilation_ = true;
+ optimize_threshold_ = 0;
+ }
+
+ void SetUseBaselineCompiler() {
+ use_baseline_compiler_ = true;
+ }
+
+ bool UseBaselineCompiler() const {
+ return use_baseline_compiler_;
+ }
+
+ private:
+ // We add the sample in batches of size kJitSamplesBatchSize.
+ // This method rounds the threshold so that it is multiple of the batch size.
+ static uint32_t RoundUpThreshold(uint32_t threshold);
+
+ bool use_jit_compilation_;
+ bool use_profiled_jit_compilation_;
+ bool use_baseline_compiler_;
+ size_t code_cache_initial_capacity_;
+ size_t code_cache_max_capacity_;
+ uint32_t optimize_threshold_;
+ uint32_t warmup_threshold_;
+ uint16_t priority_thread_weight_;
+ uint16_t invoke_transition_weight_;
+ bool dump_info_on_shutdown_;
+ int thread_pool_pthread_priority_;
+ int zygote_thread_pool_pthread_priority_;
+ ProfileSaverOptions profile_saver_options_;
+
+ JitOptions()
+ : use_jit_compilation_(false),
+ use_profiled_jit_compilation_(false),
+ use_baseline_compiler_(false),
+ code_cache_initial_capacity_(0),
+ code_cache_max_capacity_(0),
+ optimize_threshold_(0),
+ warmup_threshold_(0),
+ priority_thread_weight_(0),
+ invoke_transition_weight_(0),
+ dump_info_on_shutdown_(false),
+ thread_pool_pthread_priority_(kJitPoolThreadPthreadDefaultPriority),
+ zygote_thread_pool_pthread_priority_(kJitZygotePoolThreadPthreadDefaultPriority) {}
+
+ DISALLOW_COPY_AND_ASSIGN(JitOptions);
+};
+
+} // namespace jit
+} // namespace art
+
+#endif // ART_RUNTIME_JIT_JIT_OPTIONS_H_
diff --git a/runtime/jit/profiling_info_test.cc b/runtime/jit/profiling_info_test.cc
index a8a95774d8..674cb73dcd 100644
--- a/runtime/jit/profiling_info_test.cc
+++ b/runtime/jit/profiling_info_test.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "profiling_info.h"
+
#include <gtest/gtest.h>
#include <stdio.h>
diff --git a/runtime/jni/jni_env_ext.cc b/runtime/jni/jni_env_ext.cc
index 26b154c89a..3b10ce29c9 100644
--- a/runtime/jni/jni_env_ext.cc
+++ b/runtime/jni/jni_env_ext.cc
@@ -142,27 +142,27 @@ void JNIEnvExt::PopFrame() {
// are tests in jni_internal_test to match the results against the actual values.
// This is encoding the knowledge of the structure and layout of JNIEnv fields.
-static size_t JNIEnvSize(size_t pointer_size) {
+static size_t JNIEnvSize(PointerSize pointer_size) {
// A single pointer.
- return pointer_size;
+ return static_cast<size_t>(pointer_size);
}
-inline MemberOffset JNIEnvExt::LocalReferenceTableOffset(size_t pointer_size) {
+inline MemberOffset JNIEnvExt::LocalReferenceTableOffset(PointerSize pointer_size) {
return MemberOffset(JNIEnvSize(pointer_size) +
- 2 * pointer_size); // Thread* self + JavaVMExt* vm
+ 2 * static_cast<size_t>(pointer_size)); // Thread* self + JavaVMExt* vm
}
-MemberOffset JNIEnvExt::SegmentStateOffset(size_t pointer_size) {
+MemberOffset JNIEnvExt::LrtSegmentStateOffset(PointerSize pointer_size) {
return MemberOffset(LocalReferenceTableOffset(pointer_size).SizeValue() +
jni::LocalReferenceTable::SegmentStateOffset().SizeValue());
}
-MemberOffset JNIEnvExt::LocalRefCookieOffset(size_t pointer_size) {
+MemberOffset JNIEnvExt::LrtPreviousStateOffset(PointerSize pointer_size) {
return MemberOffset(LocalReferenceTableOffset(pointer_size).SizeValue() +
jni::LocalReferenceTable::PreviousStateOffset().SizeValue());
}
-MemberOffset JNIEnvExt::SelfOffset(size_t pointer_size) {
+MemberOffset JNIEnvExt::SelfOffset(PointerSize pointer_size) {
return MemberOffset(JNIEnvSize(pointer_size));
}
diff --git a/runtime/jni/jni_env_ext.h b/runtime/jni/jni_env_ext.h
index 2e8b52e4cc..ca5cc0a7ba 100644
--- a/runtime/jni/jni_env_ext.h
+++ b/runtime/jni/jni_env_ext.h
@@ -42,9 +42,9 @@ class JNIEnvExt : public JNIEnv {
// Creates a new JNIEnvExt. Returns null on error, in which case error_msg
// will contain a description of the error.
static JNIEnvExt* Create(Thread* self, JavaVMExt* vm, std::string* error_msg);
- static MemberOffset SegmentStateOffset(size_t pointer_size);
- static MemberOffset LocalRefCookieOffset(size_t pointer_size);
- static MemberOffset SelfOffset(size_t pointer_size);
+ static MemberOffset LrtSegmentStateOffset(PointerSize pointer_size);
+ static MemberOffset LrtPreviousStateOffset(PointerSize pointer_size);
+ static MemberOffset SelfOffset(PointerSize pointer_size);
static jint GetEnvHandler(JavaVMExt* vm, /*out*/void** out, jint version);
~JNIEnvExt();
@@ -147,7 +147,7 @@ class JNIEnvExt : public JNIEnv {
REQUIRES(!Locks::thread_list_lock_, !Locks::jni_function_table_lock_);
private:
- static MemberOffset LocalReferenceTableOffset(size_t pointer_size);
+ static MemberOffset LocalReferenceTableOffset(PointerSize pointer_size);
// Override of function tables. This applies to both default as well as instrumented (CheckJNI)
// function tables.
diff --git a/runtime/jni/jni_internal_test.cc b/runtime/jni/jni_internal_test.cc
index 155048a30d..ed97e4d4c8 100644
--- a/runtime/jni/jni_internal_test.cc
+++ b/runtime/jni/jni_internal_test.cc
@@ -2634,19 +2634,22 @@ TEST_F(JniInternalTest, IndirectReferenceTableOffsets) {
// Test the offset computation of JNIEnvExt offsets. b/26071368.
TEST_F(JniInternalTest, JNIEnvExtOffsets) {
- EXPECT_EQ(OFFSETOF_MEMBER(JNIEnvExt, self_), JNIEnvExt::SelfOffset(sizeof(void*)).Uint32Value());
+ EXPECT_EQ(OFFSETOF_MEMBER(JNIEnvExt, self_),
+ JNIEnvExt::SelfOffset(kRuntimePointerSize).Uint32Value());
// `previous_state_` amd `segment_state_` are private in the IndirectReferenceTable.
// So this test isn't as good as we'd hope it to be.
uint32_t previous_state_now =
OFFSETOF_MEMBER(JNIEnvExt, locals_) +
jni::LocalReferenceTable::PreviousStateOffset().Uint32Value();
- uint32_t previous_state_computed = JNIEnvExt::LocalRefCookieOffset(sizeof(void*)).Uint32Value();
+ uint32_t previous_state_computed =
+ JNIEnvExt::LrtPreviousStateOffset(kRuntimePointerSize).Uint32Value();
EXPECT_EQ(previous_state_now, previous_state_computed);
uint32_t segment_state_now =
OFFSETOF_MEMBER(JNIEnvExt, locals_) +
jni::LocalReferenceTable::SegmentStateOffset().Uint32Value();
- uint32_t segment_state_computed = JNIEnvExt::SegmentStateOffset(sizeof(void*)).Uint32Value();
+ uint32_t segment_state_computed =
+ JNIEnvExt::LrtSegmentStateOffset(kRuntimePointerSize).Uint32Value();
EXPECT_EQ(segment_state_now, segment_state_computed);
}
diff --git a/runtime/metrics/statsd.cc b/runtime/metrics/statsd.cc
index e6d26d9746..67d677be9c 100644
--- a/runtime/metrics/statsd.cc
+++ b/runtime/metrics/statsd.cc
@@ -416,30 +416,18 @@ class StatsdBackend : public MetricsBackend {
std::unique_ptr<MetricsBackend> CreateStatsdBackend() { return std::make_unique<StatsdBackend>(); }
-AStatsManager_PullAtomCallbackReturn DeviceStatusCallback(int32_t atom_tag,
- AStatsEventList* data,
- [[maybe_unused]] void* cookie) {
- if (atom_tag == statsd::ART_DEVICE_STATUS) {
- Runtime* runtime = Runtime::Current();
- int32_t boot_image_status;
- if (runtime->GetHeap()->HasBootImageSpace() && !runtime->HasImageWithProfile()) {
- boot_image_status = statsd::ART_DEVICE_DATUM_REPORTED__BOOT_IMAGE_STATUS__STATUS_FULL;
- } else if (runtime->GetHeap()->HasBootImageSpace() &&
- runtime->GetHeap()->GetBootImageSpaces()[0]->GetProfileFiles().empty()) {
- boot_image_status = statsd::ART_DEVICE_DATUM_REPORTED__BOOT_IMAGE_STATUS__STATUS_MINIMAL;
- } else {
- boot_image_status = statsd::ART_DEVICE_DATUM_REPORTED__BOOT_IMAGE_STATUS__STATUS_NONE;
- }
- statsd::addAStatsEvent(data, atom_tag, boot_image_status);
- return AStatsManager_PULL_SUCCESS;
+void ReportDeviceMetrics() {
+ Runtime* runtime = Runtime::Current();
+ int32_t boot_image_status;
+ if (runtime->GetHeap()->HasBootImageSpace() && !runtime->HasImageWithProfile()) {
+ boot_image_status = statsd::ART_DEVICE_DATUM_REPORTED__BOOT_IMAGE_STATUS__STATUS_FULL;
+ } else if (runtime->GetHeap()->HasBootImageSpace() &&
+ runtime->GetHeap()->GetBootImageSpaces()[0]->GetProfileFiles().empty()) {
+ boot_image_status = statsd::ART_DEVICE_DATUM_REPORTED__BOOT_IMAGE_STATUS__STATUS_MINIMAL;
+ } else {
+ boot_image_status = statsd::ART_DEVICE_DATUM_REPORTED__BOOT_IMAGE_STATUS__STATUS_NONE;
}
-
- return AStatsManager_PULL_SKIP;
-}
-
-void SetupCallbackForDeviceStatus() {
- AStatsManager_setPullAtomCallback(
- statsd::ART_DEVICE_STATUS, /*metadata=*/nullptr, DeviceStatusCallback, /*cookie=*/nullptr);
+ statsd::stats_write(statsd::ART_DEVICE_DATUM_REPORTED, boot_image_status);
}
} // namespace metrics
diff --git a/runtime/metrics/statsd.h b/runtime/metrics/statsd.h
index 00bd595ca1..ae53a22e85 100644
--- a/runtime/metrics/statsd.h
+++ b/runtime/metrics/statsd.h
@@ -29,10 +29,10 @@ class MetricsBackend;
// Statsd is only supported on Android
#ifdef __ANDROID__
std::unique_ptr<MetricsBackend> CreateStatsdBackend();
-void SetupCallbackForDeviceStatus();
+void ReportDeviceMetrics();
#else
inline std::unique_ptr<MetricsBackend> CreateStatsdBackend() { return nullptr; }
-inline void SetupCallbackForDeviceStatus() {}
+inline void ReportDeviceMetrics() {}
#endif
} // namespace metrics
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 15f0fd306f..b69086c971 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -76,10 +76,7 @@ class DexCache;
class Field;
class IfTable;
class Method;
-template <typename T> struct PACKED(8) DexCachePair;
-
-using StringDexCachePair = DexCachePair<String>;
-using StringDexCacheType = std::atomic<StringDexCachePair>;
+template <typename T> struct alignas(8) DexCachePair;
// C++ mirror of java.lang.Class
class EXPORT MANAGED Class final : public Object {
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 921d8028e7..d93464759f 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -53,7 +53,7 @@ class DexCache;
class MethodType;
class String;
-template <typename T> struct PACKED(8) DexCachePair {
+template <typename T> struct alignas(8) DexCachePair {
GcRoot<T> object;
uint32_t index;
// The array is initially [ {0,0}, {0,0}, {0,0} ... ]
@@ -90,7 +90,7 @@ template <typename T> struct PACKED(8) DexCachePair {
T* GetObjectForIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_);
};
-template <typename T> struct PACKED(2 * __SIZEOF_POINTER__) NativeDexCachePair {
+template <typename T> struct alignas(2 * __SIZEOF_POINTER__) NativeDexCachePair {
T* object;
size_t index;
// This is similar to DexCachePair except that we're storing a native pointer
diff --git a/runtime/mirror/object-refvisitor-inl.h b/runtime/mirror/object-refvisitor-inl.h
index fa2fbaec27..d4f474974c 100644
--- a/runtime/mirror/object-refvisitor-inl.h
+++ b/runtime/mirror/object-refvisitor-inl.h
@@ -145,7 +145,6 @@ inline size_t Object::VisitRefsForCompaction(const Visitor& visitor,
VisitInstanceFieldsReferences<kVerifyFlags, kReadBarrierOption>(klass, visitor);
size = kFetchObjSize ? klass->GetObjectSize<kSizeOfFlags>() : 0;
} else if ((class_flags & kClassFlagNoReferenceFields) != 0) {
- CheckNoReferenceField<kVerifyFlags, kReadBarrierOption>(klass);
if ((class_flags & kClassFlagString) != 0) {
size = kFetchObjSize ? static_cast<String*>(this)->SizeOf<kSizeOfFlags>() : 0;
} else if (klass->IsArrayClass<kVerifyFlags>()) {
@@ -168,7 +167,6 @@ inline size_t Object::VisitRefsForCompaction(const Visitor& visitor,
visitor);
size = kFetchObjSize ? as_klass->SizeOf<kSizeOfFlags>() : 0;
} else if (class_flags == kClassFlagObjectArray) {
- DCHECK((klass->IsObjectArrayClass<kVerifyFlags, kReadBarrierOption>()));
ObjPtr<ObjectArray<Object>> obj_arr = ObjPtr<ObjectArray<Object>>::DownCast(this);
obj_arr->VisitReferences(visitor, begin, end);
size = kFetchObjSize ?
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index cbd329f4f3..3458508306 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -1054,8 +1054,11 @@ void Monitor::Inflate(Thread* self, Thread* owner, ObjPtr<mirror::Object> obj, i
}
}
-void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
- uint32_t hash_code) {
+void Monitor::InflateThinLocked(Thread* self,
+ Handle<mirror::Object> obj,
+ LockWord lock_word,
+ uint32_t hash_code,
+ int attempt_of_4) {
DCHECK_EQ(lock_word.GetState(), LockWord::kThinLocked);
uint32_t owner_thread_id = lock_word.ThinLockOwner();
if (owner_thread_id == self->GetThreadId()) {
@@ -1068,7 +1071,8 @@ void Monitor::InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWo
Thread* owner;
{
ScopedThreadSuspension sts(self, ThreadState::kWaitingForLockInflation);
- owner = thread_list->SuspendThreadByThreadId(owner_thread_id, SuspendReason::kInternal);
+ owner = thread_list->SuspendThreadByThreadId(
+ owner_thread_id, SuspendReason::kInternal, attempt_of_4);
}
if (owner != nullptr) {
// We succeeded in suspending the thread, check the lock's status didn't change.
@@ -1107,6 +1111,7 @@ ObjPtr<mirror::Object> Monitor::MonitorEnter(Thread* self,
uint32_t thread_id = self->GetThreadId();
size_t contention_count = 0;
constexpr size_t kExtraSpinIters = 100;
+ int inflation_attempt = 1;
StackHandleScope<1> hs(self);
Handle<mirror::Object> h_obj(hs.NewHandle(obj));
while (true) {
@@ -1153,7 +1158,7 @@ ObjPtr<mirror::Object> Monitor::MonitorEnter(Thread* self,
continue; // Go again.
} else {
// We'd overflow the recursion count, so inflate the monitor.
- InflateThinLocked(self, h_obj, lock_word, 0);
+ InflateThinLocked(self, h_obj, lock_word, 0, inflation_attempt++);
}
} else {
if (trylock) {
@@ -1174,7 +1179,7 @@ ObjPtr<mirror::Object> Monitor::MonitorEnter(Thread* self,
} else {
contention_count = 0;
// No ordering required for initial lockword read. Install rereads it anyway.
- InflateThinLocked(self, h_obj, lock_word, 0);
+ InflateThinLocked(self, h_obj, lock_word, 0, inflation_attempt++);
}
}
continue; // Start from the beginning.
diff --git a/runtime/monitor.h b/runtime/monitor.h
index d142212e18..0ee2dbb765 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -156,8 +156,14 @@ class Monitor {
}
// Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
- static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
- uint32_t hash_code) REQUIRES_SHARED(Locks::mutator_lock_);
+ // attempt_of_4 is in 1..4 inclusive or 0. A non-zero value indicates that we are retrying
+ // up to 4 times, and should only abort on 4. Zero means we are only trying once, with the
+ // full suspend timeout instead of a quarter.
+ static void InflateThinLocked(Thread* self,
+ Handle<mirror::Object> obj,
+ LockWord lock_word,
+ uint32_t hash_code,
+ int attempt_of_4 = 0) REQUIRES_SHARED(Locks::mutator_lock_);
// Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
// does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index e774f07dc9..593b98dc80 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -66,9 +66,7 @@ extern "C" void android_set_application_target_sdk_version(uint32_t version);
#include "thread-inl.h"
#include "thread_list.h"
-// TODO(260881207): should be HIDDEN, but some apps fail to launch
-// (e.g. b/319255249)
-namespace art {
+namespace art HIDDEN {
using android::base::StringPrintf;
@@ -592,6 +590,15 @@ static JNINativeMethod gMethods[] = {
};
void register_dalvik_system_VMRuntime(JNIEnv* env) {
+ if (Runtime::Current()->GetTargetSdkVersion() <= static_cast<uint32_t>(SdkVersion::kU)) {
+ real_register_dalvik_system_VMRuntime(env);
+ } else {
+ Runtime::Current()->Abort(
+ "Call to internal function 'register_dalvik_system_VMRuntime' is not allowed");
+ }
+}
+
+void real_register_dalvik_system_VMRuntime(JNIEnv* env) {
REGISTER_NATIVE_METHODS("dalvik/system/VMRuntime");
}
diff --git a/runtime/native/dalvik_system_VMRuntime.h b/runtime/native/dalvik_system_VMRuntime.h
index d66c107a30..3876a60753 100644
--- a/runtime/native/dalvik_system_VMRuntime.h
+++ b/runtime/native/dalvik_system_VMRuntime.h
@@ -21,11 +21,15 @@
#include "base/macros.h"
-// TODO(260881207): should be HIDDEN, but some apps fail to launch
-// (e.g. b/319255249)
-namespace art {
+namespace art HIDDEN {
-void register_dalvik_system_VMRuntime(JNIEnv* env);
+// TODO(260881207): register_dalvik_system_VMRuntime should be HIDDEN,
+// but some apps fail to launch (e.g. b/319255249).
+// The function is still exported for now, but it does a targetSdk check
+// and aborts for SdkVersion after U. Libart code should use
+// `real_register...` until exported function is removed.
+EXPORT void register_dalvik_system_VMRuntime(JNIEnv* env);
+void real_register_dalvik_system_VMRuntime(JNIEnv* env);
} // namespace art
diff --git a/runtime/native/jdk_internal_misc_Unsafe.cc b/runtime/native/jdk_internal_misc_Unsafe.cc
index 10c6b2da4f..ba64c818c6 100644
--- a/runtime/native/jdk_internal_misc_Unsafe.cc
+++ b/runtime/native/jdk_internal_misc_Unsafe.cc
@@ -491,8 +491,9 @@ static void Unsafe_unpark(JNIEnv* env, jobject, jobject jthread) {
ThrowIllegalArgumentException("Argument to unpark() was not a Thread");
return;
}
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
- art::Thread* thread = art::Thread::FromManagedThread(soa, mirror_thread);
+ Thread* self = soa.Self();
+ art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Thread* thread = art::Thread::FromManagedThread(self, mirror_thread);
if (thread != nullptr) {
thread->Unpark();
} else {
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 573b5a9db2..38fe72555c 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -531,8 +531,9 @@ static void Unsafe_unpark(JNIEnv* env, jobject, jobject jthread) {
ThrowIllegalArgumentException("Argument to unpark() was not a Thread");
return;
}
- art::MutexLock mu(soa.Self(), *art::Locks::thread_list_lock_);
- art::Thread* thread = art::Thread::FromManagedThread(soa, mirror_thread);
+ Thread* self = soa.Self();
+ art::MutexLock mu(self, *art::Locks::thread_list_lock_);
+ art::Thread* thread = art::Thread::FromManagedThread(self, mirror_thread);
if (thread != nullptr) {
thread->Unpark();
} else {
diff --git a/runtime/nterp_helpers.cc b/runtime/nterp_helpers.cc
index cfe5610239..ba93df693d 100644
--- a/runtime/nterp_helpers.cc
+++ b/runtime/nterp_helpers.cc
@@ -234,244 +234,8 @@ bool CanMethodUseNterp(ArtMethod* method, InstructionSet isa) {
method->IsProxyMethod()) {
return false;
}
- if (isa == InstructionSet::kRiscv64) {
- if (method->GetDexFile()->IsCompactDexFile()) {
- return false; // Riscv64 nterp does not support compact dex yet.
- }
- for (DexInstructionPcPair pair : method->DexInstructions()) {
- // TODO(riscv64): Add support for more instructions.
- // Remove the check when all instructions are supported.
- // Cases are listed in opcode order (DEX_INSTRUCTION_LIST).
- switch (pair->Opcode()) {
- case Instruction::NOP:
- case Instruction::MOVE:
- case Instruction::MOVE_FROM16:
- case Instruction::MOVE_16:
- case Instruction::MOVE_WIDE:
- case Instruction::MOVE_WIDE_FROM16:
- case Instruction::MOVE_WIDE_16:
- case Instruction::MOVE_OBJECT:
- case Instruction::MOVE_OBJECT_FROM16:
- case Instruction::MOVE_OBJECT_16:
- case Instruction::MOVE_RESULT:
- case Instruction::MOVE_RESULT_WIDE:
- case Instruction::MOVE_RESULT_OBJECT:
- case Instruction::MOVE_EXCEPTION:
- case Instruction::RETURN_VOID:
- case Instruction::RETURN:
- case Instruction::RETURN_WIDE:
- case Instruction::RETURN_OBJECT:
- case Instruction::CONST_4:
- case Instruction::CONST_16:
- case Instruction::CONST:
- case Instruction::CONST_HIGH16:
- case Instruction::CONST_WIDE_16:
- case Instruction::CONST_WIDE_32:
- case Instruction::CONST_WIDE:
- case Instruction::CONST_WIDE_HIGH16:
- case Instruction::CONST_STRING:
- case Instruction::CONST_STRING_JUMBO:
- case Instruction::CONST_CLASS:
- case Instruction::MONITOR_ENTER:
- case Instruction::MONITOR_EXIT:
- case Instruction::CHECK_CAST:
- case Instruction::INSTANCE_OF:
- case Instruction::ARRAY_LENGTH:
- case Instruction::NEW_INSTANCE:
- case Instruction::NEW_ARRAY:
- case Instruction::FILLED_NEW_ARRAY:
- case Instruction::FILLED_NEW_ARRAY_RANGE:
- case Instruction::FILL_ARRAY_DATA:
- case Instruction::THROW:
- case Instruction::GOTO:
- case Instruction::GOTO_16:
- case Instruction::GOTO_32:
- case Instruction::PACKED_SWITCH:
- case Instruction::SPARSE_SWITCH:
- case Instruction::CMPL_FLOAT:
- case Instruction::CMPG_FLOAT:
- case Instruction::CMPL_DOUBLE:
- case Instruction::CMPG_DOUBLE:
- case Instruction::CMP_LONG:
- case Instruction::IF_EQ:
- case Instruction::IF_NE:
- case Instruction::IF_LT:
- case Instruction::IF_GE:
- case Instruction::IF_GT:
- case Instruction::IF_LE:
- case Instruction::IF_EQZ:
- case Instruction::IF_NEZ:
- case Instruction::IF_LTZ:
- case Instruction::IF_GEZ:
- case Instruction::IF_GTZ:
- case Instruction::IF_LEZ:
- case Instruction::AGET:
- case Instruction::AGET_WIDE:
- case Instruction::AGET_OBJECT:
- case Instruction::AGET_BOOLEAN:
- case Instruction::AGET_BYTE:
- case Instruction::AGET_CHAR:
- case Instruction::AGET_SHORT:
- case Instruction::APUT:
- case Instruction::APUT_WIDE:
- case Instruction::APUT_OBJECT:
- case Instruction::APUT_BOOLEAN:
- case Instruction::APUT_BYTE:
- case Instruction::APUT_CHAR:
- case Instruction::APUT_SHORT:
- case Instruction::IGET:
- case Instruction::IGET_WIDE:
- case Instruction::IGET_OBJECT:
- case Instruction::IGET_BOOLEAN:
- case Instruction::IGET_BYTE:
- case Instruction::IGET_CHAR:
- case Instruction::IGET_SHORT:
- case Instruction::IPUT:
- case Instruction::IPUT_WIDE:
- case Instruction::IPUT_OBJECT:
- case Instruction::IPUT_BOOLEAN:
- case Instruction::IPUT_BYTE:
- case Instruction::IPUT_CHAR:
- case Instruction::IPUT_SHORT:
- case Instruction::SGET:
- case Instruction::SGET_WIDE:
- case Instruction::SGET_OBJECT:
- case Instruction::SGET_BOOLEAN:
- case Instruction::SGET_BYTE:
- case Instruction::SGET_CHAR:
- case Instruction::SGET_SHORT:
- case Instruction::SPUT:
- case Instruction::SPUT_WIDE:
- case Instruction::SPUT_OBJECT:
- case Instruction::SPUT_BOOLEAN:
- case Instruction::SPUT_BYTE:
- case Instruction::SPUT_CHAR:
- case Instruction::SPUT_SHORT:
- case Instruction::INVOKE_VIRTUAL:
- case Instruction::INVOKE_SUPER:
- case Instruction::INVOKE_DIRECT:
- case Instruction::INVOKE_STATIC:
- case Instruction::INVOKE_INTERFACE:
- case Instruction::INVOKE_VIRTUAL_RANGE:
- case Instruction::INVOKE_SUPER_RANGE:
- case Instruction::INVOKE_DIRECT_RANGE:
- case Instruction::INVOKE_STATIC_RANGE:
- case Instruction::INVOKE_INTERFACE_RANGE:
- case Instruction::NEG_INT:
- case Instruction::NOT_INT:
- case Instruction::NEG_LONG:
- case Instruction::NOT_LONG:
- case Instruction::NEG_FLOAT:
- case Instruction::NEG_DOUBLE:
- case Instruction::INT_TO_LONG:
- case Instruction::INT_TO_FLOAT:
- case Instruction::INT_TO_DOUBLE:
- case Instruction::LONG_TO_INT:
- case Instruction::LONG_TO_FLOAT:
- case Instruction::LONG_TO_DOUBLE:
- case Instruction::FLOAT_TO_INT:
- case Instruction::FLOAT_TO_LONG:
- case Instruction::FLOAT_TO_DOUBLE:
- case Instruction::DOUBLE_TO_INT:
- case Instruction::DOUBLE_TO_LONG:
- case Instruction::DOUBLE_TO_FLOAT:
- case Instruction::INT_TO_BYTE:
- case Instruction::INT_TO_CHAR:
- case Instruction::INT_TO_SHORT:
- case Instruction::ADD_INT:
- case Instruction::SUB_INT:
- case Instruction::MUL_INT:
- case Instruction::DIV_INT:
- case Instruction::REM_INT:
- case Instruction::AND_INT:
- case Instruction::OR_INT:
- case Instruction::XOR_INT:
- case Instruction::SHL_INT:
- case Instruction::SHR_INT:
- case Instruction::USHR_INT:
- case Instruction::ADD_LONG:
- case Instruction::SUB_LONG:
- case Instruction::MUL_LONG:
- case Instruction::DIV_LONG:
- case Instruction::REM_LONG:
- case Instruction::AND_LONG:
- case Instruction::OR_LONG:
- case Instruction::XOR_LONG:
- case Instruction::SHL_LONG:
- case Instruction::SHR_LONG:
- case Instruction::USHR_LONG:
- case Instruction::ADD_FLOAT:
- case Instruction::SUB_FLOAT:
- case Instruction::MUL_FLOAT:
- case Instruction::DIV_FLOAT:
- case Instruction::REM_FLOAT:
- case Instruction::ADD_DOUBLE:
- case Instruction::SUB_DOUBLE:
- case Instruction::MUL_DOUBLE:
- case Instruction::DIV_DOUBLE:
- case Instruction::REM_DOUBLE:
- case Instruction::ADD_INT_2ADDR:
- case Instruction::SUB_INT_2ADDR:
- case Instruction::MUL_INT_2ADDR:
- case Instruction::DIV_INT_2ADDR:
- case Instruction::REM_INT_2ADDR:
- case Instruction::AND_INT_2ADDR:
- case Instruction::OR_INT_2ADDR:
- case Instruction::XOR_INT_2ADDR:
- case Instruction::SHL_INT_2ADDR:
- case Instruction::SHR_INT_2ADDR:
- case Instruction::USHR_INT_2ADDR:
- case Instruction::ADD_LONG_2ADDR:
- case Instruction::SUB_LONG_2ADDR:
- case Instruction::MUL_LONG_2ADDR:
- case Instruction::DIV_LONG_2ADDR:
- case Instruction::REM_LONG_2ADDR:
- case Instruction::AND_LONG_2ADDR:
- case Instruction::OR_LONG_2ADDR:
- case Instruction::XOR_LONG_2ADDR:
- case Instruction::SHL_LONG_2ADDR:
- case Instruction::SHR_LONG_2ADDR:
- case Instruction::USHR_LONG_2ADDR:
- case Instruction::ADD_FLOAT_2ADDR:
- case Instruction::SUB_FLOAT_2ADDR:
- case Instruction::MUL_FLOAT_2ADDR:
- case Instruction::DIV_FLOAT_2ADDR:
- case Instruction::REM_FLOAT_2ADDR:
- case Instruction::ADD_DOUBLE_2ADDR:
- case Instruction::SUB_DOUBLE_2ADDR:
- case Instruction::MUL_DOUBLE_2ADDR:
- case Instruction::DIV_DOUBLE_2ADDR:
- case Instruction::REM_DOUBLE_2ADDR:
- case Instruction::ADD_INT_LIT16:
- case Instruction::RSUB_INT:
- case Instruction::MUL_INT_LIT16:
- case Instruction::DIV_INT_LIT16:
- case Instruction::REM_INT_LIT16:
- case Instruction::AND_INT_LIT16:
- case Instruction::OR_INT_LIT16:
- case Instruction::XOR_INT_LIT16:
- case Instruction::ADD_INT_LIT8:
- case Instruction::RSUB_INT_LIT8:
- case Instruction::MUL_INT_LIT8:
- case Instruction::DIV_INT_LIT8:
- case Instruction::REM_INT_LIT8:
- case Instruction::AND_INT_LIT8:
- case Instruction::OR_INT_LIT8:
- case Instruction::XOR_INT_LIT8:
- case Instruction::SHL_INT_LIT8:
- case Instruction::SHR_INT_LIT8:
- case Instruction::USHR_INT_LIT8:
- case Instruction::INVOKE_POLYMORPHIC:
- case Instruction::INVOKE_POLYMORPHIC_RANGE:
- case Instruction::INVOKE_CUSTOM:
- case Instruction::INVOKE_CUSTOM_RANGE:
- case Instruction::CONST_METHOD_HANDLE:
- case Instruction::CONST_METHOD_TYPE:
- continue;
- default:
- return false;
- }
- }
+ if (isa == InstructionSet::kRiscv64 && method->GetDexFile()->IsCompactDexFile()) {
+ return false; // Riscv64 nterp does not support compact dex yet.
}
// There is no need to add the alignment padding size for comparison with aligned limit.
size_t frame_size_without_padding = NterpGetFrameSizeWithoutPadding(method, isa);
diff --git a/runtime/oat/elf_file.cc b/runtime/oat/elf_file.cc
index 96d7d0f4ac..30e0197470 100644
--- a/runtime/oat/elf_file.cc
+++ b/runtime/oat/elf_file.cc
@@ -1082,6 +1082,8 @@ static InstructionSet GetInstructionSetFromELF(uint16_t e_machine,
return InstructionSet::kArm;
case EM_AARCH64:
return InstructionSet::kArm64;
+ case EM_RISCV:
+ return InstructionSet::kRiscv64;
case EM_386:
return InstructionSet::kX86;
case EM_X86_64:
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index f368b5478a..c292f2a7f8 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -34,7 +34,7 @@
#include "scoped_thread_state_change-inl.h"
#include "stack_reference.h"
#include "thread-inl.h"
-#include "well_known_classes.h"
+#include "well_known_classes-inl.h"
namespace art HIDDEN {
namespace {
@@ -259,15 +259,15 @@ class ArgArray {
}
}
-#define DO_FIRST_ARG(match_descriptor, get_fn, append) { \
+#define DO_FIRST_ARG(boxed, get_fn, append) { \
if (LIKELY(arg != nullptr && \
- arg->GetClass()->DescriptorEquals(match_descriptor))) { \
+ arg->GetClass() == WellKnownClasses::java_lang_##boxed)) { \
ArtField* primitive_field = arg->GetClass()->GetInstanceField(0); \
append(primitive_field-> get_fn(arg.Get()));
-#define DO_ARG(match_descriptor, get_fn, append) \
+#define DO_ARG(boxed, get_fn, append) \
} else if (LIKELY(arg != nullptr && \
- arg->GetClass<>()->DescriptorEquals(match_descriptor))) { \
+ arg->GetClass() == WellKnownClasses::java_lang_##boxed)) { \
ArtField* primitive_field = arg->GetClass()->GetInstanceField(0); \
append(primitive_field-> get_fn(arg.Get()));
@@ -293,54 +293,54 @@ class ArgArray {
Append(arg.Get());
break;
case 'Z':
- DO_FIRST_ARG("Ljava/lang/Boolean;", GetBoolean, Append)
+ DO_FIRST_ARG(Boolean, GetBoolean, Append)
DO_FAIL("boolean")
break;
case 'B':
- DO_FIRST_ARG("Ljava/lang/Byte;", GetByte, Append)
+ DO_FIRST_ARG(Byte, GetByte, Append)
DO_FAIL("byte")
break;
case 'C':
- DO_FIRST_ARG("Ljava/lang/Character;", GetChar, Append)
+ DO_FIRST_ARG(Character, GetChar, Append)
DO_FAIL("char")
break;
case 'S':
- DO_FIRST_ARG("Ljava/lang/Short;", GetShort, Append)
- DO_ARG("Ljava/lang/Byte;", GetByte, Append)
+ DO_FIRST_ARG(Short, GetShort, Append)
+ DO_ARG(Byte, GetByte, Append)
DO_FAIL("short")
break;
case 'I':
- DO_FIRST_ARG("Ljava/lang/Integer;", GetInt, Append)
- DO_ARG("Ljava/lang/Character;", GetChar, Append)
- DO_ARG("Ljava/lang/Short;", GetShort, Append)
- DO_ARG("Ljava/lang/Byte;", GetByte, Append)
+ DO_FIRST_ARG(Integer, GetInt, Append)
+ DO_ARG(Character, GetChar, Append)
+ DO_ARG(Short, GetShort, Append)
+ DO_ARG(Byte, GetByte, Append)
DO_FAIL("int")
break;
case 'J':
- DO_FIRST_ARG("Ljava/lang/Long;", GetLong, AppendWide)
- DO_ARG("Ljava/lang/Integer;", GetInt, AppendWide)
- DO_ARG("Ljava/lang/Character;", GetChar, AppendWide)
- DO_ARG("Ljava/lang/Short;", GetShort, AppendWide)
- DO_ARG("Ljava/lang/Byte;", GetByte, AppendWide)
+ DO_FIRST_ARG(Long, GetLong, AppendWide)
+ DO_ARG(Integer, GetInt, AppendWide)
+ DO_ARG(Character, GetChar, AppendWide)
+ DO_ARG(Short, GetShort, AppendWide)
+ DO_ARG(Byte, GetByte, AppendWide)
DO_FAIL("long")
break;
case 'F':
- DO_FIRST_ARG("Ljava/lang/Float;", GetFloat, AppendFloat)
- DO_ARG("Ljava/lang/Long;", GetLong, AppendFloat)
- DO_ARG("Ljava/lang/Integer;", GetInt, AppendFloat)
- DO_ARG("Ljava/lang/Character;", GetChar, AppendFloat)
- DO_ARG("Ljava/lang/Short;", GetShort, AppendFloat)
- DO_ARG("Ljava/lang/Byte;", GetByte, AppendFloat)
+ DO_FIRST_ARG(Float, GetFloat, AppendFloat)
+ DO_ARG(Long, GetLong, AppendFloat)
+ DO_ARG(Integer, GetInt, AppendFloat)
+ DO_ARG(Character, GetChar, AppendFloat)
+ DO_ARG(Short, GetShort, AppendFloat)
+ DO_ARG(Byte, GetByte, AppendFloat)
DO_FAIL("float")
break;
case 'D':
- DO_FIRST_ARG("Ljava/lang/Double;", GetDouble, AppendDouble)
- DO_ARG("Ljava/lang/Float;", GetFloat, AppendDouble)
- DO_ARG("Ljava/lang/Long;", GetLong, AppendDouble)
- DO_ARG("Ljava/lang/Integer;", GetInt, AppendDouble)
- DO_ARG("Ljava/lang/Character;", GetChar, AppendDouble)
- DO_ARG("Ljava/lang/Short;", GetShort, AppendDouble)
- DO_ARG("Ljava/lang/Byte;", GetByte, AppendDouble)
+ DO_FIRST_ARG(Double, GetDouble, AppendDouble)
+ DO_ARG(Float, GetFloat, AppendDouble)
+ DO_ARG(Long, GetLong, AppendDouble)
+ DO_ARG(Integer, GetInt, AppendDouble)
+ DO_ARG(Character, GetChar, AppendDouble)
+ DO_ARG(Short, GetShort, AppendDouble)
+ DO_ARG(Byte, GetByte, AppendDouble)
DO_FAIL("double")
break;
#ifndef NDEBUG
@@ -952,28 +952,28 @@ static bool UnboxPrimitive(ObjPtr<mirror::Object> o,
ObjPtr<mirror::Class> klass = o->GetClass();
Primitive::Type primitive_type;
ArtField* primitive_field = &klass->GetIFieldsPtr()->At(0);
- if (klass->DescriptorEquals("Ljava/lang/Boolean;")) {
+ if (klass == WellKnownClasses::java_lang_Boolean) {
primitive_type = Primitive::kPrimBoolean;
boxed_value.SetZ(primitive_field->GetBoolean(o));
- } else if (klass->DescriptorEquals("Ljava/lang/Byte;")) {
+ } else if (klass == WellKnownClasses::java_lang_Byte) {
primitive_type = Primitive::kPrimByte;
boxed_value.SetB(primitive_field->GetByte(o));
- } else if (klass->DescriptorEquals("Ljava/lang/Character;")) {
+ } else if (klass == WellKnownClasses::java_lang_Character) {
primitive_type = Primitive::kPrimChar;
boxed_value.SetC(primitive_field->GetChar(o));
- } else if (klass->DescriptorEquals("Ljava/lang/Float;")) {
+ } else if (klass == WellKnownClasses::java_lang_Float) {
primitive_type = Primitive::kPrimFloat;
boxed_value.SetF(primitive_field->GetFloat(o));
- } else if (klass->DescriptorEquals("Ljava/lang/Double;")) {
+ } else if (klass == WellKnownClasses::java_lang_Double) {
primitive_type = Primitive::kPrimDouble;
boxed_value.SetD(primitive_field->GetDouble(o));
- } else if (klass->DescriptorEquals("Ljava/lang/Integer;")) {
+ } else if (klass == WellKnownClasses::java_lang_Integer) {
primitive_type = Primitive::kPrimInt;
boxed_value.SetI(primitive_field->GetInt(o));
- } else if (klass->DescriptorEquals("Ljava/lang/Long;")) {
+ } else if (klass == WellKnownClasses::java_lang_Long) {
primitive_type = Primitive::kPrimLong;
boxed_value.SetJ(primitive_field->GetLong(o));
- } else if (klass->DescriptorEquals("Ljava/lang/Short;")) {
+ } else if (klass == WellKnownClasses::java_lang_Short) {
primitive_type = Primitive::kPrimShort;
boxed_value.SetS(primitive_field->GetShort(o));
} else {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index e6f8c576dc..b7587c0a91 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1317,7 +1317,7 @@ void Runtime::InitNonZygoteOrPostFork(
if (!odrefresh::UploadStatsIfAvailable(&err)) {
LOG(WARNING) << "Failed to upload odrefresh metrics: " << err;
}
- metrics::SetupCallbackForDeviceStatus();
+ metrics::ReportDeviceMetrics();
}
if (LIKELY(automatically_set_jni_ids_indirection_) && CanSetJniIdType()) {
@@ -2381,7 +2381,7 @@ void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
register_dalvik_system_DexFile(env);
register_dalvik_system_BaseDexClassLoader(env);
register_dalvik_system_VMDebug(env);
- register_dalvik_system_VMRuntime(env);
+ real_register_dalvik_system_VMRuntime(env);
register_dalvik_system_VMStack(env);
register_dalvik_system_ZygoteHooks(env);
register_java_lang_Class(env);
diff --git a/runtime/runtime_image.cc b/runtime/runtime_image.cc
index 0c85261829..5d304698bf 100644
--- a/runtime/runtime_image.cc
+++ b/runtime/runtime_image.cc
@@ -23,6 +23,7 @@
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
#include "arch/instruction_set.h"
+#include "arch/instruction_set_features.h"
#include "base/arena_allocator.h"
#include "base/arena_containers.h"
#include "base/bit_utils.h"
@@ -603,8 +604,11 @@ class RuntimeImageHelper {
}
for (Handle<mirror::Class> cls : classes_to_write) {
- ScopedAssertNoThreadSuspension sants("Writing class");
- CopyClass(cls.Get());
+ {
+ ScopedAssertNoThreadSuspension sants("Writing class");
+ CopyClass(cls.Get());
+ }
+ self->AllowThreadSuspension();
}
// Relocate the type array entries. We do this now before creating image
diff --git a/runtime/runtime_options.h b/runtime/runtime_options.h
index 6ece459dad..3cadd09bf8 100644
--- a/runtime/runtime_options.h
+++ b/runtime/runtime_options.h
@@ -29,8 +29,8 @@
#include "gc/collector_type.h"
#include "gc/space/large_object_space.h"
#include "hidden_api.h"
-#include "jit/jit.h"
#include "jit/jit_code_cache.h"
+#include "jit/jit_options.h"
#include "jit/profile_saver_options.h"
#include "verifier/verifier_enums.h"
diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h
index 83ab469c68..2fcc4b065b 100644
--- a/runtime/thread-inl.h
+++ b/runtime/thread-inl.h
@@ -284,10 +284,20 @@ inline void Thread::AddSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
tlsPtr_.active_suspend1_barriers = suspend1_barrier;
}
-inline void Thread::RemoveFirstSuspend1Barrier() {
+inline void Thread::RemoveFirstSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier) {
+ DCHECK_EQ(tlsPtr_.active_suspend1_barriers, suspend1_barrier);
tlsPtr_.active_suspend1_barriers = tlsPtr_.active_suspend1_barriers->next_;
}
+inline void Thread::RemoveSuspend1Barrier(WrappedSuspend1Barrier* barrier) {
+ // 'barrier' should be in the list. If not, we will get a SIGSEGV with fault address of 4 or 8.
+ WrappedSuspend1Barrier** last = &tlsPtr_.active_suspend1_barriers;
+ while (*last != barrier) {
+ last = &((*last)->next_);
+ }
+ *last = (*last)->next_;
+}
+
inline bool Thread::HasActiveSuspendBarrier() {
return tlsPtr_.active_suspend1_barriers != nullptr ||
tlsPtr_.active_suspendall_barrier != nullptr;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e1ee9007bf..03632887d0 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -675,16 +675,15 @@ void* Thread::CreateCallback(void* arg) {
return nullptr;
}
-Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
- ObjPtr<mirror::Object> thread_peer) {
+Thread* Thread::FromManagedThread(Thread* self, ObjPtr<mirror::Object> thread_peer) {
ArtField* f = WellKnownClasses::java_lang_Thread_nativePeer;
Thread* result = reinterpret_cast64<Thread*>(f->GetLong(thread_peer));
// Check that if we have a result it is either suspended or we hold the thread_list_lock_
// to stop it from going away.
if (kIsDebugBuild) {
- MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
+ MutexLock mu(self, *Locks::thread_suspend_count_lock_);
if (result != nullptr && !result->IsSuspended()) {
- Locks::thread_list_lock_->AssertHeld(soa.Self());
+ Locks::thread_list_lock_->AssertHeld(self);
}
}
return result;
@@ -692,7 +691,7 @@ Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
jobject java_thread) {
- return FromManagedThread(soa, soa.Decode<mirror::Object>(java_thread));
+ return FromManagedThread(soa.Self(), soa.Decode<mirror::Object>(java_thread));
}
static size_t FixStackSize(size_t stack_size) {
@@ -1518,20 +1517,27 @@ bool Thread::PassActiveSuspendBarriers() {
}
tlsPtr_.active_suspend1_barriers = nullptr;
AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
+ CHECK_GT(pass_barriers.size(), 0U); // Since kActiveSuspendBarrier was set.
+ // Decrement suspend barrier(s) while we still hold the lock, since SuspendThread may
+ // remove and deallocate suspend barriers while holding suspend_count_lock_ .
+ // There will typically only be a single barrier to pass here.
+ for (AtomicInteger*& barrier : pass_barriers) {
+ int32_t old_val = barrier->fetch_sub(1, std::memory_order_release);
+ CHECK_GT(old_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << old_val;
+ if (old_val != 1) {
+ // We're done with it.
+ barrier = nullptr;
+ }
+ }
}
-
- uint32_t barrier_count = 0;
+ // Finally do futex_wakes after releasing the lock.
for (AtomicInteger* barrier : pass_barriers) {
- ++barrier_count;
- int32_t old_val = barrier->fetch_sub(1, std::memory_order_release);
- CHECK_GT(old_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << old_val;
#if ART_USE_FUTEXES
- if (old_val == 1) {
+ if (barrier != nullptr) {
futex(barrier->Address(), FUTEX_WAKE_PRIVATE, INT_MAX, nullptr, nullptr, 0);
}
#endif
}
- CHECK_GT(barrier_count, 0U);
return true;
}
@@ -1721,7 +1727,7 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState wait_st
Locks::thread_list_lock_->ExclusiveUnlock(self);
if (IsSuspended()) {
// See the discussion in mutator_gc_coord.md and SuspendAllInternal for the race here.
- RemoveFirstSuspend1Barrier();
+ RemoveFirstSuspend1Barrier(&wrapped_barrier);
if (!HasActiveSuspendBarrier()) {
AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
}
@@ -2613,10 +2619,6 @@ void Thread::Destroy(bool should_run_callbacks) {
runtime->GetRuntimeCallbacks()->ThreadDeath(self);
}
- if (UNLIKELY(self->GetMethodTraceBuffer() != nullptr)) {
- Trace::FlushThreadBuffer(self);
- }
-
// this.nativePeer = 0;
SetNativePeer</*kSupportTransaction=*/ true>(tlsPtr_.opeer, nullptr);
@@ -2631,12 +2633,17 @@ void Thread::Destroy(bool should_run_callbacks) {
ObjectLock<mirror::Object> locker(self, h_obj);
locker.NotifyAll();
}
+
tlsPtr_.opeer = nullptr;
}
{
ScopedObjectAccess soa(self);
Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
+
+ if (UNLIKELY(self->GetMethodTraceBuffer() != nullptr)) {
+ Trace::FlushThreadBuffer(self);
+ }
}
// Mark-stack revocation must be performed at the very end. No
// checkpoint/flip-function or read-barrier should be called after this.
@@ -2685,9 +2692,7 @@ Thread::~Thread() {
SetCachedThreadName(nullptr); // Deallocate name.
delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample;
- if (tlsPtr_.method_trace_buffer != nullptr) {
- delete[] tlsPtr_.method_trace_buffer;
- }
+ CHECK_EQ(tlsPtr_.method_trace_buffer, nullptr);
Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
diff --git a/runtime/thread.h b/runtime/thread.h
index 5a9c55aa14..a59b10ae13 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -200,7 +200,7 @@ enum class WeakRefAccessState : int32_t {
// See Thread.tlsPtr_.active_suspend1_barriers below for explanation.
struct WrappedSuspend1Barrier {
WrappedSuspend1Barrier() : barrier_(1), next_(nullptr) {}
- AtomicInteger barrier_;
+ AtomicInteger barrier_; // Only updated while holding thread_suspend_count_lock_ .
struct WrappedSuspend1Barrier* next_ GUARDED_BY(Locks::thread_suspend_count_lock_);
};
@@ -300,8 +300,7 @@ class EXPORT Thread {
void CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex);
void CheckEmptyCheckpointFromMutex();
- static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
- ObjPtr<mirror::Object> thread_peer)
+ static Thread* FromManagedThread(Thread* self, ObjPtr<mirror::Object> thread_peer)
REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
@@ -1762,7 +1761,14 @@ class EXPORT Thread {
// Remove last-added entry from active_suspend1_barriers.
// Only makes sense if we're still holding thread_suspend_count_lock_ since insertion.
- ALWAYS_INLINE void RemoveFirstSuspend1Barrier() REQUIRES(Locks::thread_suspend_count_lock_);
+ // We redundantly pass in the barrier to be removed in order to enable a DCHECK.
+ ALWAYS_INLINE void RemoveFirstSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
+ REQUIRES(Locks::thread_suspend_count_lock_);
+
+ // Remove the "barrier" from the list no matter where it appears. Called only under exceptional
+ // circumstances. The barrier must be in the list.
+ ALWAYS_INLINE void RemoveSuspend1Barrier(WrappedSuspend1Barrier* suspend1_barrier)
+ REQUIRES(Locks::thread_suspend_count_lock_);
ALWAYS_INLINE bool HasActiveSuspendBarrier() REQUIRES(Locks::thread_suspend_count_lock_);
@@ -1946,7 +1952,7 @@ class EXPORT Thread {
// first if possible.
/***********************************************************************************************/
- struct PACKED(4) tls_32bit_sized_values {
+ struct alignas(4) tls_32bit_sized_values {
// We have no control over the size of 'bool', but want our boolean fields
// to be 4-byte quantities.
using bool32_t = uint32_t;
@@ -2074,7 +2080,7 @@ class EXPORT Thread {
uint32_t shared_method_hotness;
} tls32_;
- struct PACKED(8) tls_64bit_sized_values {
+ struct alignas(8) tls_64bit_sized_values {
tls_64bit_sized_values() : trace_clock_base(0) {
}
@@ -2084,7 +2090,7 @@ class EXPORT Thread {
RuntimeStats stats;
} tls64_;
- struct PACKED(sizeof(void*)) tls_ptr_sized_values {
+ struct alignas(sizeof(void*)) tls_ptr_sized_values {
tls_ptr_sized_values() : card_table(nullptr),
exception(nullptr),
stack_end(nullptr),
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index d5face195d..5e63b27b20 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -659,8 +659,9 @@ void ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
// failures are expected.
static constexpr bool kShortSuspendTimeouts = false;
+static constexpr unsigned kSuspendBarrierIters = kShortSuspendTimeouts ? 5 : 20;
+
#if ART_USE_FUTEXES
-static constexpr int kSuspendBarrierIters = 5;
// Returns true if it timed out.
static bool WaitOnceForSuspendBarrier(AtomicInteger* barrier,
@@ -669,8 +670,9 @@ static bool WaitOnceForSuspendBarrier(AtomicInteger* barrier,
timespec wait_timeout;
if (kShortSuspendTimeouts) {
timeout_ns = MsToNs(kSuspendBarrierIters);
+ CHECK_GE(NsToMs(timeout_ns / kSuspendBarrierIters), 1ul);
} else {
- DCHECK_GE(NsToMs(timeout_ns / kSuspendBarrierIters), 100ul);
+ DCHECK_GE(NsToMs(timeout_ns / kSuspendBarrierIters), 10ul);
}
InitTimeSpec(false, CLOCK_MONOTONIC, NsToMs(timeout_ns / kSuspendBarrierIters), 0, &wait_timeout);
if (futex(barrier->Address(), FUTEX_WAIT_PRIVATE, cur_val, &wait_timeout, nullptr, 0) != 0) {
@@ -684,16 +686,15 @@ static bool WaitOnceForSuspendBarrier(AtomicInteger* barrier,
}
#else
-static constexpr int kSuspendBarrierIters = 10;
static bool WaitOnceForSuspendBarrier(AtomicInteger* barrier,
int32_t cur_val,
uint64_t timeout_ns) {
- static constexpr int kIters = kShortSuspendTimeouts ? 10'000 : 1'000'000;
- if (!kShortSuspendTimeouts) {
- DCHECK_GE(NsToMs(timeout_ns / kSuspendBarrierIters), 100ul);
- }
- for (int i = 0; i < kIters; ++i) {
+ // In the normal case, aim for a couple of hundred milliseconds.
+ static constexpr unsigned kInnerIters =
+ kShortSuspendTimeouts ? 1'000 : (timeout_ns / 1000) / kSuspendBarrierIters;
+ DCHECK_GE(kInnerIters, 1'000u);
+ for (int i = 0; i < kInnerIters; ++i) {
sched_yield();
if (barrier->load(std::memory_order_acquire) == 0) {
return false;
@@ -701,7 +702,8 @@ static bool WaitOnceForSuspendBarrier(AtomicInteger* barrier,
}
return true;
}
-#endif
+
+#endif // ART_USE_FUTEXES
// Return a short string describing the scheduling state of the thread with the given tid.
static std::string GetThreadState(pid_t t) {
@@ -726,18 +728,23 @@ static std::string GetThreadState(pid_t t) {
#endif
}
-std::optional<std::string> ThreadList::WaitForSuspendBarrier(AtomicInteger* barrier, pid_t t) {
+std::optional<std::string> ThreadList::WaitForSuspendBarrier(AtomicInteger* barrier,
+ pid_t t,
+ int attempt_of_4) {
// Only fail after kIter timeouts, to make us robust against app freezing.
#if ART_USE_FUTEXES
const uint64_t start_time = NanoTime();
#endif
+ uint64_t timeout_ns =
+ attempt_of_4 == 0 ? thread_suspend_timeout_ns_ : thread_suspend_timeout_ns_ / 4;
+ bool collect_state = (t != 0 && (attempt_of_4 == 0 || attempt_of_4 == 4));
int32_t cur_val = barrier->load(std::memory_order_acquire);
if (cur_val <= 0) {
DCHECK_EQ(cur_val, 0);
return std::nullopt;
}
- int i = 0;
- if (WaitOnceForSuspendBarrier(barrier, cur_val, thread_suspend_timeout_ns_)) {
+ unsigned i = 0;
+ if (WaitOnceForSuspendBarrier(barrier, cur_val, timeout_ns)) {
i = 1;
}
cur_val = barrier->load(std::memory_order_acquire);
@@ -747,14 +754,13 @@ std::optional<std::string> ThreadList::WaitForSuspendBarrier(AtomicInteger* barr
}
// Long wait; gather information in case of timeout.
- std::string sampled_state = t == 0 ? "" : GetThreadState(t);
+ std::string sampled_state = collect_state ? GetThreadState(t) : "";
while (i < kSuspendBarrierIters) {
- if (WaitOnceForSuspendBarrier(barrier, cur_val, thread_suspend_timeout_ns_)) {
+ if (WaitOnceForSuspendBarrier(barrier, cur_val, timeout_ns)) {
++i;
#if ART_USE_FUTEXES
if (!kShortSuspendTimeouts) {
- CHECK_GE(NanoTime() - start_time,
- i * thread_suspend_timeout_ns_ / kSuspendBarrierIters - 1'000'000);
+ CHECK_GE(NanoTime() - start_time, i * timeout_ns / kSuspendBarrierIters - 1'000'000);
}
#endif
}
@@ -764,11 +770,10 @@ std::optional<std::string> ThreadList::WaitForSuspendBarrier(AtomicInteger* barr
return std::nullopt;
}
}
- std::string result = t == 0 ? "" :
- "Target states: [" + sampled_state + ", " + GetThreadState(t) +
- "]" + std::to_string(cur_val) + "@" +
- std::to_string((uintptr_t)barrier) + "->";
- return result + std::to_string(barrier->load(std::memory_order_acquire));
+ return collect_state ? "Target states: [" + sampled_state + ", " + GetThreadState(t) + "]" +
+ std::to_string(cur_val) + "@" + std::to_string((uintptr_t)barrier) +
+ " Final wait time: " + PrettyDuration(NanoTime() - start_time) :
+ "";
}
void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
@@ -834,7 +839,6 @@ void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
// Ensures all threads running Java suspend and that those not running Java don't start.
void ThreadList::SuspendAllInternal(Thread* self, SuspendReason reason) {
// self can be nullptr if this is an unregistered thread.
- const uint64_t start_time = NanoTime();
Locks::mutator_lock_->AssertNotExclusiveHeld(self);
Locks::thread_list_lock_->AssertNotHeld(self);
Locks::thread_suspend_count_lock_->AssertNotHeld(self);
@@ -914,24 +918,47 @@ void ThreadList::SuspendAllInternal(Thread* self, SuspendReason reason) {
// We're already not runnable, so an attempt to suspend us should succeed.
}
- if (WaitForSuspendBarrier(&pending_threads).has_value()) {
- const uint64_t wait_time = NanoTime() - start_time;
- MutexLock mu(self, *Locks::thread_list_lock_);
- MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
- std::ostringstream oss;
- oss << "Unsuspended threads: ";
- Thread* culprit = nullptr;
- for (const auto& thread : list_) {
- if (thread != self && !thread->IsSuspended()) {
- culprit = thread;
- oss << *thread << ", ";
- }
+ Thread* culprit = nullptr;
+ pid_t tid = 0;
+ std::ostringstream oss;
+ for (int attempt_of_4 = 1; attempt_of_4 <= 4; ++attempt_of_4) {
+ auto result = WaitForSuspendBarrier(&pending_threads, tid, attempt_of_4);
+ if (!result.has_value()) {
+ // Wait succeeded.
+ break;
}
- oss << "waited for " << PrettyDuration(wait_time);
- if (culprit == nullptr) {
- LOG(FATAL) << "SuspendAll timeout. " << oss.str();
- } else {
- culprit->AbortInThis("SuspendAll timeout. " + oss.str());
+ if (attempt_of_4 == 3) {
+ // Second to the last attempt; Try to gather more information in case we time out.
+ MutexLock mu(self, *Locks::thread_list_lock_);
+ MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+ oss << "Unsuspended threads: ";
+ for (const auto& thread : list_) {
+ if (thread != self && !thread->IsSuspended()) {
+ culprit = thread;
+ oss << *thread << ", ";
+ }
+ }
+ if (culprit != nullptr) {
+ tid = culprit->GetTid();
+ }
+ } else if (attempt_of_4 == 4) {
+ // Final attempt still timed out.
+ if (culprit == nullptr) {
+ LOG(FATAL) << "SuspendAll timeout. Couldn't find holdouts.";
+ } else {
+ std::string name;
+ culprit->GetThreadName(name);
+ oss << "Info for " << *culprit << ":";
+ std::string thr_descr =
+ StringPrintf("%s tid: %d, state&flags: 0x%x, priority: %d, barrier value: %d, ",
+ name.c_str(),
+ tid,
+ culprit->GetStateAndFlags(std::memory_order_relaxed).GetValue(),
+ culprit->GetNativePriority(),
+ pending_threads.load());
+ oss << thr_descr << result.value();
+ culprit->AbortInThis("SuspendAll timeout: " + oss.str());
+ }
}
}
}
@@ -1007,15 +1034,15 @@ bool ThreadList::Resume(Thread* thread, SuspendReason reason) {
// To check IsSuspended.
MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
if (UNLIKELY(!thread->IsSuspended())) {
- LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread)
- << ") thread not suspended";
+ LOG(reason == SuspendReason::kForUserCode ? ERROR : FATAL)
+ << "Resume(" << reinterpret_cast<void*>(thread) << ") thread not suspended";
return false;
}
if (!Contains(thread)) {
// We only expect threads within the thread-list to have been suspended otherwise we can't
// stop such threads from delete-ing themselves.
- LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread)
- << ") thread not within thread list";
+ LOG(reason == SuspendReason::kForUserCode ? ERROR : FATAL)
+ << "Resume(" << reinterpret_cast<void*>(thread) << ") thread not within thread list";
return false;
}
thread->DecrementSuspendCount(self, /*for_user_code=*/(reason == SuspendReason::kForUserCode));
@@ -1026,135 +1053,42 @@ bool ThreadList::Resume(Thread* thread, SuspendReason reason) {
return true;
}
-static void ThreadSuspendByPeerWarning(ScopedObjectAccess& soa,
- LogSeverity severity,
- const char* message,
- jobject peer) REQUIRES_SHARED(Locks::mutator_lock_) {
- ObjPtr<mirror::Object> name =
- WellKnownClasses::java_lang_Thread_name->GetObject(soa.Decode<mirror::Object>(peer));
- if (name == nullptr) {
- LOG(severity) << message << ": " << peer;
- } else {
- LOG(severity) << message << ": " << peer << ":" << name->AsString()->ToModifiedUtf8();
- }
-}
-
-Thread* ThreadList::SuspendThreadByPeer(jobject peer, SuspendReason reason) {
- bool is_suspended = false;
- Thread* const self = Thread::Current();
- VLOG(threads) << "SuspendThreadByPeer starting";
- Thread* thread;
- WrappedSuspend1Barrier wrapped_barrier{};
- for (int iter_count = 1;; ++iter_count) {
- {
- // Note: this will transition to runnable and potentially suspend.
- ScopedObjectAccess soa(self);
- MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
- thread = Thread::FromManagedThread(soa, peer);
- if (thread == nullptr) {
- ThreadSuspendByPeerWarning(soa,
- ::android::base::WARNING,
- "No such thread for suspend",
- peer);
- return nullptr;
- }
- if (!Contains(thread)) {
- VLOG(threads) << "SuspendThreadByPeer failed for unattached thread: "
- << reinterpret_cast<void*>(thread);
- return nullptr;
- }
- // IsSuspended on the current thread will fail as the current thread is changed into
- // Runnable above. As the suspend count is now raised if this is the current thread
- // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
- // to just explicitly handle the current thread in the callers to this code.
- CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
- VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
- {
- MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
- if (LIKELY(self->GetSuspendCount() == 0)) {
- thread->IncrementSuspendCount(self, nullptr, &wrapped_barrier, reason);
- if (thread->IsSuspended()) {
- // See the discussion in mutator_gc_coord.md and SuspendAllInternal for the race here.
- thread->RemoveFirstSuspend1Barrier();
- if (!thread->HasActiveSuspendBarrier()) {
- thread->AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
- }
- is_suspended = true;
- }
- DCHECK_GT(thread->GetSuspendCount(), 0);
- break;
- }
- // Else we hold the suspend count lock but another thread is trying to suspend us,
- // making it unsafe to try to suspend another thread in case we get a cycle.
- // We start the loop again, which will allow this thread to be suspended.
- }
- }
- // All locks are released, and we should quickly exit the suspend-unfriendly state. Retry.
- if (iter_count >= kMaxSuspendRetries) {
- LOG(FATAL) << "Too many suspend retries";
- }
- usleep(kThreadSuspendSleepUs);
- }
- // Now wait for target to decrement suspend barrier.
- if (is_suspended || !WaitForSuspendBarrier(&wrapped_barrier.barrier_).has_value()) {
- // wrapped_barrier.barrier_ has been decremented and will no longer be accessed.
- VLOG(threads) << "SuspendThreadByPeer thread suspended: " << *thread;
- if (ATraceEnabled()) {
- std::string name;
- thread->GetThreadName(name);
- ATraceBegin(
- StringPrintf("SuspendThreadByPeer suspended %s for peer=%p", name.c_str(), peer).c_str());
- }
- DCHECK(thread->IsSuspended());
- return thread;
- } else {
- LOG(WARNING) << "Suspended thread state_and_flags: " << thread->StateAndFlagsAsHexString();
- // thread still has a pointer to wrapped_barrier. Returning and continuing would be unsafe
- // without additional cleanup.
- {
- ScopedObjectAccess soa(self);
- ThreadSuspendByPeerWarning(
- soa, ::android::base::FATAL, "SuspendThreadByPeer timed out", peer);
- }
- UNREACHABLE();
- }
-}
-
-Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, SuspendReason reason) {
+bool ThreadList::SuspendThread(Thread* self,
+ Thread* thread,
+ SuspendReason reason,
+ ThreadState self_state,
+ const char* func_name,
+ int attempt_of_4) {
bool is_suspended = false;
- Thread* const self = Thread::Current();
- CHECK_NE(thread_id, kInvalidThreadId);
- VLOG(threads) << "SuspendThreadByThreadId starting";
- Thread* thread;
- pid_t tid;
+ VLOG(threads) << func_name << "starting";
+ pid_t tid = thread->GetTid();
uint8_t suspended_count;
uint8_t checkpoint_count;
WrappedSuspend1Barrier wrapped_barrier{};
static_assert(sizeof wrapped_barrier.barrier_ == sizeof(uint32_t));
- for (int iter_count = 1;; ++iter_count) {
+ ThreadExitFlag tef;
+ bool exited = false;
+ thread->NotifyOnThreadExit(&tef);
+ int iter_count = 1;
+ do {
{
+ Locks::mutator_lock_->AssertSharedHeld(self);
+ Locks::thread_list_lock_->AssertHeld(self);
// Note: this will transition to runnable and potentially suspend.
- ScopedObjectAccess soa(self);
- MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
- thread = FindThreadByThreadId(thread_id);
- if (thread == nullptr) {
- // There's a race in inflating a lock and the owner giving up ownership and then dying.
- LOG(WARNING) << StringPrintf("No such thread id %d for suspend", thread_id);
- return nullptr;
- }
DCHECK(Contains(thread));
- CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
- VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
+ // This implementation fails if thread == self. Let the clients handle that case
+ // appropriately.
+ CHECK_NE(thread, self) << func_name << "(self)";
+ VLOG(threads) << func_name << " suspending: " << *thread;
{
MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
if (LIKELY(self->GetSuspendCount() == 0)) {
- tid = thread->GetTid();
suspended_count = thread->suspended_count_;
checkpoint_count = thread->checkpoint_count_;
thread->IncrementSuspendCount(self, nullptr, &wrapped_barrier, reason);
if (thread->IsSuspended()) {
// See the discussion in mutator_gc_coord.md and SuspendAllInternal for the race here.
- thread->RemoveFirstSuspend1Barrier();
+ thread->RemoveFirstSuspend1Barrier(&wrapped_barrier);
if (!thread->HasActiveSuspendBarrier()) {
thread->AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
}
@@ -1172,7 +1106,23 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, SuspendReason re
if (iter_count >= kMaxSuspendRetries) {
LOG(FATAL) << "Too many suspend retries";
}
- usleep(kThreadSuspendSleepUs);
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
+ {
+ ScopedThreadSuspension sts(self, ThreadState::kSuspended);
+ usleep(kThreadSuspendSleepUs);
+ ++iter_count;
+ }
+ Locks::thread_list_lock_->ExclusiveLock(self);
+ exited = tef.HasExited();
+ } while (!exited);
+ thread->UnregisterThreadExitFlag(&tef);
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
+ self->TransitionFromRunnableToSuspended(self_state);
+ if (exited) {
+ // This is OK: There's a race in inflating a lock and the owner giving up ownership and then
+ // dying.
+ LOG(WARNING) << StringPrintf("Thread with tid %d exited before suspending", tid);
+ return false;
}
// Now wait for target to decrement suspend barrier.
std::optional<std::string> failure_info;
@@ -1180,26 +1130,31 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, SuspendReason re
// As an experiment, redundantly trigger suspension. TODO: Remove this.
std::atomic_thread_fence(std::memory_order_seq_cst);
thread->TriggerSuspend();
- failure_info = WaitForSuspendBarrier(&wrapped_barrier.barrier_, tid);
+ failure_info = WaitForSuspendBarrier(&wrapped_barrier.barrier_, tid, attempt_of_4);
if (!failure_info.has_value()) {
is_suspended = true;
}
}
- if (is_suspended) {
- // wrapped_barrier.barrier_ has been decremented and will no longer be accessed.
- VLOG(threads) << "SuspendThreadByThreadId thread suspended: " << *thread;
- if (ATraceEnabled()) {
- std::string name;
- thread->GetThreadName(name);
- ATraceBegin(
- StringPrintf("SuspendThreadByPeer suspended %s for id=%d", name.c_str(), thread_id)
- .c_str());
+ while (!is_suspended) {
+ if (attempt_of_4 > 0 && attempt_of_4 < 4) {
+ // Caller will try again. Give up and resume the thread for now. We need to make sure
+ // that wrapped_barrier is removed from the list before we deallocate it.
+ MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
+ if (wrapped_barrier.barrier_.load() == 0) {
+ // Succeeded in the meantime.
+ is_suspended = true;
+ continue;
+ }
+ thread->RemoveSuspend1Barrier(&wrapped_barrier);
+ if (!thread->HasActiveSuspendBarrier()) {
+ thread->AtomicClearFlag(ThreadFlag::kActiveSuspendBarrier);
+ }
+ // Do not call Resume(), since we are probably not fully suspended.
+ thread->DecrementSuspendCount(self,
+ /*for_user_code=*/(reason == SuspendReason::kForUserCode));
+ Thread::resume_cond_->Broadcast(self);
+ return false;
}
- DCHECK(thread->IsSuspended());
- return thread;
- } else {
- // thread still has a pointer to wrapped_barrier. Returning and continuing would be unsafe
- // without additional cleanup.
std::string name;
thread->GetThreadName(name);
WrappedSuspend1Barrier* first_barrier;
@@ -1207,12 +1162,13 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, SuspendReason re
MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
first_barrier = thread->tlsPtr_.active_suspend1_barriers;
}
- // 'thread' should still be suspended, and hence stick around. Try to abort there, since its
- // stack trace is much more interesting than ours.
- thread->AbortInThis(StringPrintf(
- "Caused SuspendThreadByThreadId to time out: %d (%s), state&flags: 0x%x, priority: %d,"
+ // 'thread' should still have a suspend request pending, and hence stick around. Try to abort
+ // there, since its stack trace is much more interesting than ours.
+ std::string message = StringPrintf(
+ "%s timed out: %d (%s), state&flags: 0x%x, priority: %d,"
" barriers: %p, ours: %p, barrier value: %d, nsusps: %d, ncheckpts: %d, thread_info: %s",
- thread_id,
+ func_name,
+ thread->GetTid(),
name.c_str(),
thread->GetStateAndFlags(std::memory_order_relaxed).GetValue(),
thread->GetNativePriority(),
@@ -1221,9 +1177,81 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, SuspendReason re
wrapped_barrier.barrier_.load(),
thread->suspended_count_ - suspended_count,
thread->checkpoint_count_ - checkpoint_count,
- failure_info.value().c_str()));
- UNREACHABLE();
+ failure_info.value().c_str());
+ // Check one last time whether thread passed the suspend barrier. Empirically this seems to
+ // happen maybe between 1 and 5% of the time.
+ if (wrapped_barrier.barrier_.load() != 0) {
+ // thread still has a pointer to wrapped_barrier. Returning and continuing would be unsafe
+ // without additional cleanup.
+ thread->AbortInThis(message);
+ UNREACHABLE();
+ }
+ is_suspended = true;
}
+ // wrapped_barrier.barrier_ has been decremented and will no longer be accessed.
+ VLOG(threads) << func_name << " suspended: " << *thread;
+ if (ATraceEnabled()) {
+ std::string name;
+ thread->GetThreadName(name);
+ ATraceBegin(
+ StringPrintf("%s suspended %s for tid=%d", func_name, name.c_str(), thread->GetTid())
+ .c_str());
+ }
+ DCHECK(thread->IsSuspended());
+ return true;
+}
+
+Thread* ThreadList::SuspendThreadByPeer(jobject peer, SuspendReason reason) {
+ Thread* const self = Thread::Current();
+ ThreadState old_self_state = self->GetState();
+ self->TransitionFromSuspendedToRunnable();
+ Locks::thread_list_lock_->ExclusiveLock(self);
+ ObjPtr<mirror::Object> thread_ptr = self->DecodeJObject(peer);
+ Thread* thread = Thread::FromManagedThread(self, thread_ptr);
+ if (thread == nullptr || !Contains(thread)) {
+ if (thread == nullptr) {
+ ObjPtr<mirror::Object> name = WellKnownClasses::java_lang_Thread_name->GetObject(thread_ptr);
+ std::string thr_name = (name == nullptr ? "<unknown>" : name->AsString()->ToModifiedUtf8());
+ LOG(WARNING) << "No such thread for suspend"
+ << ": " << peer << ":" << thr_name;
+ } else {
+ LOG(WARNING) << "SuspendThreadByPeer failed for unattached thread: "
+ << reinterpret_cast<void*>(thread);
+ }
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
+ self->TransitionFromRunnableToSuspended(old_self_state);
+ return nullptr;
+ }
+ VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
+ // Releases thread_list_lock_ and mutator lock.
+ bool success = SuspendThread(self, thread, reason, old_self_state, __func__, 0);
+ Locks::thread_list_lock_->AssertNotHeld(self);
+ return success ? thread : nullptr;
+}
+
+Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
+ SuspendReason reason,
+ int attempt_of_4) {
+ Thread* const self = Thread::Current();
+ ThreadState old_self_state = self->GetState();
+ CHECK_NE(thread_id, kInvalidThreadId);
+ VLOG(threads) << "SuspendThreadByThreadId starting";
+ self->TransitionFromSuspendedToRunnable();
+ Locks::thread_list_lock_->ExclusiveLock(self);
+ Thread* thread = FindThreadByThreadId(thread_id);
+ if (thread == nullptr) {
+ // There's a race in inflating a lock and the owner giving up ownership and then dying.
+ LOG(WARNING) << StringPrintf("No such thread id %d for suspend", thread_id);
+ Locks::thread_list_lock_->ExclusiveUnlock(self);
+ self->TransitionFromRunnableToSuspended(old_self_state);
+ return nullptr;
+ }
+ DCHECK(Contains(thread));
+ VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
+ // Releases thread_list_lock_ and mutator lock.
+ bool success = SuspendThread(self, thread, reason, old_self_state, __func__, attempt_of_4);
+ Locks::thread_list_lock_->AssertNotHeld(self);
+ return success ? thread : nullptr;
}
Thread* ThreadList::FindThreadByThreadId(uint32_t thread_id) {
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index cb7744056f..8be2d4e02f 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -17,6 +17,10 @@
#ifndef ART_RUNTIME_THREAD_LIST_H_
#define ART_RUNTIME_THREAD_LIST_H_
+#include <bitset>
+#include <list>
+#include <vector>
+
#include "barrier.h"
#include "base/histogram.h"
#include "base/mutex.h"
@@ -25,10 +29,7 @@
#include "jni.h"
#include "reflective_handle_scope.h"
#include "suspend_reason.h"
-
-#include <bitset>
-#include <list>
-#include <vector>
+#include "thread_state.h"
namespace art HIDDEN {
namespace gc {
@@ -94,8 +95,10 @@ class ThreadList {
// Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the
// thread on success else null. The thread id is used to identify the thread to avoid races with
// the thread terminating. Note that as thread ids are recycled this may not suspend the expected
- // thread, that may be terminating.
- Thread* SuspendThreadByThreadId(uint32_t thread_id, SuspendReason reason)
+ // thread, that may be terminating. 'attempt_of_4' is zero if this is the only
+ // attempt, or 1..4 to try 4 times with fractional timeouts.
+ // TODO: Reconsider the use of thread_id, now that we have ThreadExitFlag.
+ Thread* SuspendThreadByThreadId(uint32_t thread_id, SuspendReason reason, int attempt_of_4 = 0)
REQUIRES(!Locks::mutator_lock_,
!Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_);
@@ -217,7 +220,9 @@ class ThreadList {
// the diagnostic information. If 0 is passed, we return an empty string on timeout. Normally
// the caller does not hold the mutator lock. See the comment at the call in
// RequestSynchronousCheckpoint for the only exception.
- std::optional<std::string> WaitForSuspendBarrier(AtomicInteger* barrier, pid_t t = 0)
+ std::optional<std::string> WaitForSuspendBarrier(AtomicInteger* barrier,
+ pid_t t = 0,
+ int attempt_of_4 = 0)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
private:
@@ -234,6 +239,19 @@ class ThreadList {
REQUIRES(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_)
UNLOCK_FUNCTION(Locks::mutator_lock_);
+ // Helper to actually suspend a single thread. This is called with thread_list_lock_ held and
+ // the caller guarantees that *thread is valid until that is released. We "release the mutator
+ // lock", by switching to self_state. 'attempt_of_4' is 0 if we only attempt once, and 1..4 if
+ // we are going to try 4 times with a quarter of the full timeout. 'func_name' is used only to
+ // identify ourselves for logging.
+ bool SuspendThread(Thread* self,
+ Thread* thread,
+ SuspendReason reason,
+ ThreadState self_state,
+ const char* func_name,
+ int attempt_of_4) RELEASE(Locks::thread_list_lock_)
+ RELEASE_SHARED(Locks::mutator_lock_);
+
void SuspendAllInternal(Thread* self, SuspendReason reason = SuspendReason::kInternal)
REQUIRES(!Locks::thread_list_lock_,
!Locks::thread_suspend_count_lock_,
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 3a410ef9d4..04f4de6c89 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -21,10 +21,10 @@
#include "android-base/macros.h"
#include "android-base/stringprintf.h"
-
#include "art_method-inl.h"
#include "base/casts.h"
#include "base/enums.h"
+#include "base/leb128.h"
#include "base/os.h"
#include "base/stl_util.h"
#include "base/systrace.h"
@@ -53,6 +53,13 @@
namespace art HIDDEN {
+struct MethodTraceRecord {
+ ArtMethod* method;
+ TraceAction action;
+ uint32_t wall_clock_time;
+ uint32_t thread_cpu_time;
+};
+
using android::base::StringPrintf;
static constexpr size_t TraceActionBits = MinimumBitsToStore(
@@ -70,6 +77,22 @@ static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2
static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps
static const size_t kNumTracePoolBuffers = 32;
+// Packet type encoding for the new method tracing format.
+static const int kThreadInfoHeaderV2 = 0;
+static const int kMethodInfoHeaderV2 = 1;
+static const int kEntryHeaderV2 = 2;
+static const int kSummaryHeaderV2 = 3;
+
+// Packet sizes for the new method trace format.
+static const uint16_t kTraceHeaderLengthV2 = 32;
+static const uint16_t kTraceRecordSizeSingleClockV2 = 6;
+static const uint16_t kTraceRecordSizeDualClockV2 = kTraceRecordSizeSingleClockV2 + 2;
+static const uint16_t kEntryHeaderSizeSingleClockV2 = 17;
+static const uint16_t kEntryHeaderSizeDualClockV2 = kEntryHeaderSizeSingleClockV2 + 4;
+
+static const uint16_t kTraceVersionSingleClockV2 = 4;
+static const uint16_t kTraceVersionDualClockV2 = 5;
+
TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource;
Trace* volatile Trace::the_trace_ = nullptr;
@@ -310,14 +333,24 @@ void Trace::SetDefaultClockSource(TraceClockSource clock_source) {
#endif
}
-static uint16_t GetTraceVersion(TraceClockSource clock_source) {
- return (clock_source == TraceClockSource::kDual) ? kTraceVersionDualClock
- : kTraceVersionSingleClock;
+static uint16_t GetTraceVersion(TraceClockSource clock_source, int version) {
+ if (version == Trace::kFormatV1) {
+ return (clock_source == TraceClockSource::kDual) ? kTraceVersionDualClock :
+ kTraceVersionSingleClock;
+ } else {
+ return (clock_source == TraceClockSource::kDual) ? kTraceVersionDualClockV2 :
+ kTraceVersionSingleClockV2;
+ }
}
-static uint16_t GetRecordSize(TraceClockSource clock_source) {
- return (clock_source == TraceClockSource::kDual) ? kTraceRecordSizeDualClock
- : kTraceRecordSizeSingleClock;
+static uint16_t GetRecordSize(TraceClockSource clock_source, int version) {
+ if (version == Trace::kFormatV1) {
+ return (clock_source == TraceClockSource::kDual) ? kTraceRecordSizeDualClock :
+ kTraceRecordSizeSingleClock;
+ } else {
+ return (clock_source == TraceClockSource::kDual) ? kTraceRecordSizeDualClockV2 :
+ kTraceRecordSizeSingleClockV2;
+ }
}
static uint16_t GetNumEntries(TraceClockSource clock_source) {
@@ -699,7 +732,12 @@ void Trace::StopTracing(bool flush_entries) {
// We also flush the buffer when destroying a thread which expects the_trace_ to be valid so
// make sure that the per-thread buffer is reset before resetting the_trace_.
{
+ MutexLock mu(self, *Locks::trace_lock_);
MutexLock tl_lock(Thread::Current(), *Locks::thread_list_lock_);
+ // Flush the per-thread buffers and reset the trace inside the trace_lock_ to avoid any
+ // race if the thread is detaching and trying to flush the buffer too. Since we hold the
+ // trace_lock_ both here and when flushing on a thread detach only one of them will succeed
+ // in actually flushing the buffer.
for (Thread* thread : Runtime::Current()->GetThreadList()->GetList()) {
if (thread->GetMethodTraceBuffer() != nullptr) {
// We may have pending requests to flush the data. So just enqueue a
@@ -709,12 +747,9 @@ void Trace::StopTracing(bool flush_entries) {
thread, /* is_sync= */ false, /* free_buffer= */ true);
}
}
+ the_trace_ = nullptr;
+ sampling_pthread_ = 0U;
}
-
- // Reset the_trace_ by taking a trace_lock
- MutexLock mu(self, *Locks::trace_lock_);
- the_trace_ = nullptr;
- sampling_pthread_ = 0U;
}
// At this point, code may read buf_ as its writers are shutdown
@@ -731,6 +766,12 @@ void Trace::StopTracing(bool flush_entries) {
void Trace::FlushThreadBuffer(Thread* self) {
MutexLock mu(self, *Locks::trace_lock_);
+ // Check if we still need to flush inside the trace_lock_. If we are stopping tracing it is
+ // possible we already deleted the trace and flushed the buffer too.
+ if (the_trace_ == nullptr) {
+ DCHECK_EQ(self->GetMethodTraceBuffer(), nullptr);
+ return;
+ }
the_trace_->trace_writer_->FlushBuffer(self, /* is_sync= */ false, /* free_buffer= */ true);
}
@@ -791,6 +832,11 @@ TraceClockSource GetClockSourceFromFlags(int flags) {
}
}
+int GetTraceFormatVersionFromFlags(int flags) {
+ int version = (flags & Trace::kTraceFormatVersionFlagMask) >> Trace::kTraceFormatVersionShift;
+ return version;
+}
+
} // namespace
TraceWriter::TraceWriter(File* trace_file,
@@ -798,34 +844,46 @@ TraceWriter::TraceWriter(File* trace_file,
TraceClockSource clock_source,
size_t buffer_size,
int num_trace_buffers,
+ int trace_format_version,
uint32_t clock_overhead_ns)
: trace_file_(trace_file),
trace_output_mode_(output_mode),
clock_source_(clock_source),
buf_(new uint8_t[std::max(kMinBufSize, buffer_size)]()),
buffer_size_(std::max(kMinBufSize, buffer_size)),
+ trace_format_version_(trace_format_version),
start_time_(GetMicroTime(GetTimestamp())),
overflow_(false),
+ num_records_(0),
clock_overhead_ns_(clock_overhead_ns),
owner_tids_(num_trace_buffers),
tracing_lock_("tracing lock", LockLevel::kTracingStreamingLock) {
- uint16_t trace_version = GetTraceVersion(clock_source_);
+ uint16_t trace_version = GetTraceVersion(clock_source_, trace_format_version_);
if (output_mode == TraceOutputMode::kStreaming) {
trace_version |= 0xF0U;
}
+
// Set up the beginning of the trace.
- memset(buf_.get(), 0, kTraceHeaderLength);
- Append4LE(buf_.get(), kTraceMagicValue);
- Append2LE(buf_.get() + 4, trace_version);
- Append2LE(buf_.get() + 6, kTraceHeaderLength);
- Append8LE(buf_.get() + 8, start_time_);
- if (trace_version >= kTraceVersionDualClock) {
- uint16_t record_size = GetRecordSize(clock_source_);
- Append2LE(buf_.get() + 16, record_size);
- }
- static_assert(18 <= kMinBufSize, "Minimum buffer size not large enough for trace header");
+ if (trace_format_version_ == Trace::kFormatV1) {
+ memset(buf_.get(), 0, kTraceHeaderLength);
+ Append4LE(buf_.get(), kTraceMagicValue);
+ Append2LE(buf_.get() + 4, trace_version);
+ Append2LE(buf_.get() + 6, kTraceHeaderLength);
+ Append8LE(buf_.get() + 8, start_time_);
+ if (trace_version >= kTraceVersionDualClock) {
+ uint16_t record_size = GetRecordSize(clock_source_, trace_format_version_);
+ Append2LE(buf_.get() + 16, record_size);
+ }
+ static_assert(18 <= kMinBufSize, "Minimum buffer size not large enough for trace header");
- cur_offset_ = kTraceHeaderLength;
+ cur_offset_ = kTraceHeaderLength;
+ } else {
+ memset(buf_.get(), 0, kTraceHeaderLengthV2);
+ Append4LE(buf_.get(), kTraceMagicValue);
+ Append2LE(buf_.get() + 4, trace_version);
+ Append8LE(buf_.get() + 6, start_time_);
+ cur_offset_ = kTraceHeaderLengthV2;
+ }
if (output_mode == TraceOutputMode::kStreaming) {
// Flush the header information to the file. We use a per thread buffer, so
@@ -863,6 +921,7 @@ Trace::Trace(File* trace_file,
stop_tracing_(false) {
CHECK_IMPLIES(trace_file == nullptr, output_mode == TraceOutputMode::kDDMS);
+ int trace_format_version = GetTraceFormatVersionFromFlags(flags_);
// In streaming mode, we only need a buffer big enough to store data per each
// thread buffer. In non-streaming mode this is specified by the user and we
// stop tracing when the buffer is full.
@@ -874,6 +933,7 @@ Trace::Trace(File* trace_file,
clock_source_,
buf_size,
kNumTracePoolBuffers,
+ trace_format_version,
GetClockOverheadNanoSeconds()));
}
@@ -903,7 +963,7 @@ void TraceWriter::FinishTracing(int flags, bool flush_entries) {
std::ostringstream os;
os << StringPrintf("%cversion\n", kTraceTokenChar);
- os << StringPrintf("%d\n", GetTraceVersion(clock_source_));
+ os << StringPrintf("%d\n", GetTraceVersion(clock_source_, trace_format_version_));
os << StringPrintf("data-file-overflow=%s\n", overflow_ ? "true" : "false");
if (UseThreadCpuClock(clock_source_)) {
if (UseWallClock(clock_source_)) {
@@ -916,8 +976,7 @@ void TraceWriter::FinishTracing(int flags, bool flush_entries) {
}
os << StringPrintf("elapsed-time-usec=%" PRIu64 "\n", elapsed);
if (trace_output_mode_ != TraceOutputMode::kStreaming) {
- size_t num_records = (final_offset - kTraceHeaderLength) / GetRecordSize(clock_source_);
- os << StringPrintf("num-method-calls=%zd\n", num_records);
+ os << StringPrintf("num-method-calls=%zd\n", num_records_);
}
os << StringPrintf("clock-call-overhead-nsec=%d\n", clock_overhead_ns_);
os << StringPrintf("vm=art\n");
@@ -927,17 +986,13 @@ void TraceWriter::FinishTracing(int flags, bool flush_entries) {
os << "alloc-size=" << Runtime::Current()->GetStat(KIND_ALLOCATED_BYTES) << "\n";
os << "gc-count=" << Runtime::Current()->GetStat(KIND_GC_INVOCATIONS) << "\n";
}
- os << StringPrintf("%cthreads\n", kTraceTokenChar);
- {
- // TODO(b/280558212): Moving the Mutexlock out of DumpThreadList to try and
- // narrow down where seg fault is happening. Change this after the bug is
- // fixed.
- CHECK_NE(self, nullptr);
- MutexLock mu(self, tracing_lock_);
+
+ if (trace_format_version_ == Trace::kFormatV1) {
+ os << StringPrintf("%cthreads\n", kTraceTokenChar);
DumpThreadList(os);
+ os << StringPrintf("%cmethods\n", kTraceTokenChar);
+ DumpMethodList(os);
}
- os << StringPrintf("%cmethods\n", kTraceTokenChar);
- DumpMethodList(os);
os << StringPrintf("%cend\n", kTraceTokenChar);
std::string header(os.str());
@@ -947,15 +1002,27 @@ void TraceWriter::FinishTracing(int flags, bool flush_entries) {
// cannot be any writes to trace_file_ after finish tracing.
// Write a special token to mark the end of trace records and the start of
// trace summary.
- uint8_t buf[7];
- Append2LE(buf, 0);
- buf[2] = kOpTraceSummary;
- Append4LE(buf + 3, static_cast<uint32_t>(header.length()));
- // Write the trace summary. The summary is identical to the file header when
- // the output mode is not streaming (except for methods).
- if (!trace_file_->WriteFully(buf, sizeof(buf)) ||
- !trace_file_->WriteFully(header.c_str(), header.length())) {
- PLOG(WARNING) << "Failed streaming a tracing event.";
+ if (trace_format_version_ == Trace::kFormatV1) {
+ uint8_t buf[7];
+ Append2LE(buf, 0);
+ buf[2] = kOpTraceSummary;
+ Append4LE(buf + 3, static_cast<uint32_t>(header.length()));
+ // Write the trace summary. The summary is identical to the file header when
+ // the output mode is not streaming (except for methods).
+ if (!trace_file_->WriteFully(buf, sizeof(buf)) ||
+ !trace_file_->WriteFully(header.c_str(), header.length())) {
+ PLOG(WARNING) << "Failed streaming a tracing event.";
+ }
+ } else {
+ uint8_t buf[3];
+ buf[0] = kSummaryHeaderV2;
+ Append2LE(buf + 1, static_cast<uint32_t>(header.length()));
+ // Write the trace summary. Reports information about tracing mode, number of records and
+ // clock overhead in plain text format.
+ if (!trace_file_->WriteFully(buf, sizeof(buf)) ||
+ !trace_file_->WriteFully(header.c_str(), header.length())) {
+ PLOG(WARNING) << "Failed streaming a tracing event.";
+ }
}
} else {
if (trace_file_.get() == nullptr) {
@@ -1140,9 +1207,14 @@ void TraceWriter::RecordThreadInfo(Thread* thread) {
static constexpr size_t kThreadNameHeaderSize = 7;
uint8_t header[kThreadNameHeaderSize];
- Append2LE(header, 0);
- header[2] = kOpNewThread;
- Append2LE(header + 3, GetThreadEncoding(thread->GetTid()));
+ if (trace_format_version_ == Trace::kFormatV1) {
+ Append2LE(header, 0);
+ header[2] = kOpNewThread;
+ Append2LE(header + 3, GetThreadEncoding(thread->GetTid()));
+ } else {
+ header[0] = kThreadInfoHeaderV2;
+ Append4LE(header + 1, thread->GetTid());
+ }
DCHECK(thread_name.length() < (1 << 16));
Append2LE(header + 5, static_cast<uint16_t>(thread_name.length()));
@@ -1179,20 +1251,34 @@ void TraceWriter::PreProcessTraceForMethodInfos(
}
void TraceWriter::RecordMethodInfo(const std::string& method_info_line, uint32_t method_id) {
- std::string method_line(GetMethodLine(method_info_line, method_id));
// Write a special block with the name.
- static constexpr size_t kMethodNameHeaderSize = 5;
- uint8_t method_header[kMethodNameHeaderSize];
- DCHECK_LT(kMethodNameHeaderSize, kPerThreadBufSize);
- Append2LE(method_header, 0);
- method_header[2] = kOpNewMethod;
-
+ std::string method_line;
+ size_t header_size;
+ static constexpr size_t kMaxMethodNameHeaderSize = 7;
+ uint8_t method_header[kMaxMethodNameHeaderSize];
uint16_t method_line_length = static_cast<uint16_t>(method_line.length());
DCHECK(method_line.length() < (1 << 16));
- Append2LE(method_header + 3, method_line_length);
+ if (trace_format_version_ == Trace::kFormatV1) {
+ // Write a special block with the name.
+ static constexpr size_t kMethodNameHeaderSize = 5;
+ DCHECK_LT(kMethodNameHeaderSize, kPerThreadBufSize);
+ Append2LE(method_header, 0);
+ method_header[2] = kOpNewMethod;
+ method_line = GetMethodLine(method_info_line, method_id);
+ method_line_length = static_cast<uint16_t>(method_line.length());
+ Append2LE(method_header + 3, method_line_length);
+ header_size = kMethodNameHeaderSize;
+ } else {
+ method_line = method_info_line;
+ method_line_length = static_cast<uint16_t>(method_line.length());
+ method_header[0] = kMethodInfoHeaderV2;
+ Append4LE(method_header + 1, method_id);
+ Append2LE(method_header + 5, method_line_length);
+ header_size = 7;
+ }
const uint8_t* ptr = reinterpret_cast<const uint8_t*>(method_line.c_str());
- if (!trace_file_->WriteFully(method_header, kMethodNameHeaderSize) ||
+ if (!trace_file_->WriteFully(method_header, header_size) ||
!trace_file_->WriteFully(ptr, method_line_length)) {
PLOG(WARNING) << "Failed streaming a tracing event.";
}
@@ -1289,9 +1375,11 @@ void TraceWriter::FlushBuffer(Thread* thread, bool is_sync, bool release) {
// This is a synchronous flush, so no need to allocate a new buffer. This is used either
// when the tracing has finished or in non-streaming mode.
// Just reset the buffer pointer to the initial value, so we can reuse the same buffer.
- *current_offset = kPerThreadBufSize;
if (release) {
thread->SetMethodTraceBuffer(nullptr);
+ *current_offset = 0;
+ } else {
+ *current_offset = kPerThreadBufSize;
}
} else {
int old_index = GetMethodTraceIndex(method_trace_entries);
@@ -1300,17 +1388,156 @@ void TraceWriter::FlushBuffer(Thread* thread, bool is_sync, bool release) {
thread_pool_->AddTask(
Thread::Current(),
new TraceWriterTask(this, old_index, method_trace_entries, *current_offset, tid));
- *current_offset = kPerThreadBufSize;
if (release) {
thread->SetMethodTraceBuffer(nullptr);
+ *current_offset = 0;
} else {
thread->SetMethodTraceBuffer(AcquireTraceBuffer(tid));
+ *current_offset = kPerThreadBufSize;
}
}
return;
}
+void TraceWriter::ReadValuesFromRecord(uintptr_t* method_trace_entries,
+ size_t record_index,
+ MethodTraceRecord& record,
+ bool has_thread_cpu_clock,
+ bool has_wall_clock) {
+ uintptr_t method_and_action = method_trace_entries[record_index++];
+ record.method = reinterpret_cast<ArtMethod*>(method_and_action & kMaskTraceAction);
+ CHECK(record.method != nullptr);
+ record.action = DecodeTraceAction(method_and_action);
+
+ record.thread_cpu_time = 0;
+ record.wall_clock_time = 0;
+ if (has_thread_cpu_clock) {
+ record.thread_cpu_time = method_trace_entries[record_index++];
+ }
+ if (has_wall_clock) {
+ uint64_t timestamp = method_trace_entries[record_index++];
+ if (art::kRuntimePointerSize == PointerSize::k32) {
+ // On 32-bit architectures timestamp is stored as two 32-bit values.
+ uint64_t high_timestamp = method_trace_entries[record_index++];
+ timestamp = (high_timestamp << 32 | timestamp);
+ }
+ record.wall_clock_time = GetMicroTime(timestamp) - start_time_;
+ }
+}
+
+void TraceWriter::FlushEntriesFormatV1(
+ uintptr_t* method_trace_entries,
+ size_t tid,
+ const std::unordered_map<ArtMethod*, std::string>& method_infos,
+ size_t end_offset,
+ size_t* current_index,
+ uint8_t* buffer_ptr) {
+ uint16_t thread_id = GetThreadEncoding(tid);
+ bool has_thread_cpu_clock = UseThreadCpuClock(clock_source_);
+ bool has_wall_clock = UseWallClock(clock_source_);
+ size_t buffer_index = *current_index;
+ size_t num_entries = GetNumEntries(clock_source_);
+ const size_t record_size = GetRecordSize(clock_source_, trace_format_version_);
+
+ for (size_t entry_index = kPerThreadBufSize; entry_index != end_offset;) {
+ entry_index -= num_entries;
+
+ MethodTraceRecord record;
+ ReadValuesFromRecord(
+ method_trace_entries, entry_index, record, has_thread_cpu_clock, has_wall_clock);
+
+ auto [method_id, is_new_method] = GetMethodEncoding(record.method);
+ if (is_new_method && trace_output_mode_ == TraceOutputMode::kStreaming) {
+ RecordMethodInfo(method_infos.find(record.method)->second, method_id);
+ }
+
+ DCHECK_LT(buffer_index + record_size, buffer_size_);
+ EncodeEventEntry(buffer_ptr + buffer_index,
+ thread_id,
+ method_id,
+ record.action,
+ record.thread_cpu_time,
+ record.wall_clock_time);
+ buffer_index += record_size;
+ }
+ *current_index = buffer_index;
+}
+
+void TraceWriter::FlushEntriesFormatV2(
+ uintptr_t* method_trace_entries,
+ size_t tid,
+ const std::unordered_map<ArtMethod*, std::string>& method_infos,
+ size_t num_records,
+ size_t* current_index,
+ uint8_t* init_buffer_ptr) {
+ bool has_thread_cpu_clock = UseThreadCpuClock(clock_source_);
+ bool has_wall_clock = UseWallClock(clock_source_);
+ size_t num_entries = GetNumEntries(clock_source_);
+ uint32_t prev_wall_timestamp = 0;
+ uint32_t prev_thread_timestamp = 0;
+ int32_t init_method_action_encoding = 0;
+ bool is_first_entry = true;
+ uint8_t* current_buffer_ptr = init_buffer_ptr;
+ uint32_t header_size = (clock_source_ == TraceClockSource::kDual) ? kEntryHeaderSizeDualClockV2 :
+ kEntryHeaderSizeSingleClockV2;
+
+ size_t entry_index = kPerThreadBufSize;
+ for (size_t i = 0; i < num_records; i++) {
+ entry_index -= num_entries;
+
+ MethodTraceRecord record;
+ ReadValuesFromRecord(
+ method_trace_entries, entry_index, record, has_thread_cpu_clock, has_wall_clock);
+
+ // TODO(mythria): Explore the possibility of using method pointer instead of having an encoding.
+ // On 64-bit this means method ids would use 8 bytes but that is okay since we only encode the
+ // full method id in the header and then encode the diff against the method id in the header.
+ // The diff is usually expected to be small.
+ auto [method_id, is_new_method] = GetMethodEncoding(record.method);
+ if (is_new_method && trace_output_mode_ == TraceOutputMode::kStreaming) {
+ RecordMethodInfo(method_infos.find(record.method)->second, method_id);
+ }
+ DCHECK(method_id < (1 << (31 - TraceActionBits)));
+ uint32_t method_action_encoding = (method_id << TraceActionBits) | record.action;
+
+ if (is_first_entry) {
+ prev_wall_timestamp = record.wall_clock_time;
+ prev_thread_timestamp = record.thread_cpu_time;
+ init_method_action_encoding = method_action_encoding;
+ is_first_entry = false;
+
+ EncodeEventBlockHeader(init_buffer_ptr,
+ tid,
+ method_action_encoding,
+ prev_thread_timestamp,
+ prev_wall_timestamp,
+ num_records);
+ current_buffer_ptr += header_size;
+ } else {
+ current_buffer_ptr = EncodeSignedLeb128(current_buffer_ptr,
+ (method_action_encoding - init_method_action_encoding));
+
+ if (has_wall_clock) {
+ current_buffer_ptr =
+ EncodeUnsignedLeb128(current_buffer_ptr, (record.wall_clock_time - prev_wall_timestamp));
+ prev_wall_timestamp = record.wall_clock_time;
+ }
+
+ if (has_thread_cpu_clock) {
+ current_buffer_ptr =
+ EncodeUnsignedLeb128(current_buffer_ptr, (record.thread_cpu_time - prev_thread_timestamp));
+ prev_thread_timestamp = record.thread_cpu_time;
+ }
+ }
+ }
+
+ // Update the total size of the block excluding header size.
+ uint8_t* total_size_loc = init_buffer_ptr + header_size - 2;
+ Append2LE(total_size_loc, current_buffer_ptr - (init_buffer_ptr + header_size));
+ *current_index += current_buffer_ptr - init_buffer_ptr;
+}
+
void TraceWriter::FlushBuffer(uintptr_t* method_trace_entries,
size_t current_offset,
size_t tid,
@@ -1319,67 +1546,36 @@ void TraceWriter::FlushBuffer(uintptr_t* method_trace_entries,
// method id for each method. We do that by maintaining a map from id to method for each newly
// seen method. tracing_lock_ is required to serialize these.
MutexLock mu(Thread::Current(), tracing_lock_);
- size_t current_index;
+ size_t current_index = 0;
uint8_t* buffer_ptr = buf_.get();
size_t buffer_size = buffer_size_;
- if (trace_output_mode_ == TraceOutputMode::kStreaming) {
- // In streaming mode, we flush the data to file each time we flush the per-thread buffer.
- // Just reuse the entire buffer.
- current_index = 0;
- } else {
- // In non-streaming mode we only flush at the end, so retain the earlier data. If the buffer
- // is full we don't process any more entries.
- current_index = cur_offset_;
- }
- uint16_t thread_id = GetThreadEncoding(tid);
- bool has_thread_cpu_clock = UseThreadCpuClock(clock_source_);
- bool has_wall_clock = UseWallClock(clock_source_);
- const size_t record_size = GetRecordSize(clock_source_);
- DCHECK_LT(record_size, kPerThreadBufSize);
size_t num_entries = GetNumEntries(clock_source_);
size_t num_records = (kPerThreadBufSize - current_offset) / num_entries;
DCHECK_EQ((kPerThreadBufSize - current_offset) % num_entries, 0u);
+ const size_t record_size = GetRecordSize(clock_source_, trace_format_version_);
+ DCHECK_LT(record_size, kPerThreadBufSize);
- // Check if there is sufficient place in the buffer for non-streaming case. If not return early.
- if (cur_offset_ + record_size * num_records >= buffer_size &&
- trace_output_mode_ != TraceOutputMode::kStreaming) {
- overflow_ = true;
- return;
- }
-
- DCHECK_GT(buffer_size_, record_size * num_entries);
- for (size_t entry_index = kPerThreadBufSize; entry_index != current_offset;) {
- entry_index -= num_entries;
- size_t record_index = entry_index;
- uintptr_t method_and_action = method_trace_entries[record_index++];
- ArtMethod* method = reinterpret_cast<ArtMethod*>(method_and_action & kMaskTraceAction);
- CHECK(method != nullptr);
- TraceAction action = DecodeTraceAction(method_and_action);
- uint32_t thread_time = 0;
- uint32_t wall_time = 0;
- if (has_thread_cpu_clock) {
- thread_time = method_trace_entries[record_index++];
- }
- if (has_wall_clock) {
- uint64_t timestamp = method_trace_entries[record_index++];
- if (art::kRuntimePointerSize == PointerSize::k32) {
- // On 32-bit architectures timestamp is stored as two 32-bit values.
- uint64_t high_timestamp = method_trace_entries[record_index++];
- timestamp = (high_timestamp << 32 | timestamp);
- }
- wall_time = GetMicroTime(timestamp) - start_time_;
- }
+ if (trace_output_mode_ != TraceOutputMode::kStreaming) {
+ // In non-streaming mode we only flush to file at the end, so retain the earlier data. If the
+ // buffer is full we don't process any more entries.
+ current_index = cur_offset_;
- auto [method_id, is_new_method] = GetMethodEncoding(method);
- if (is_new_method && trace_output_mode_ == TraceOutputMode::kStreaming) {
- RecordMethodInfo(method_infos.find(method)->second, method_id);
+ // Check if there is sufficient place in the buffer for non-streaming case. If not return early.
+ if (cur_offset_ + record_size * num_records >= buffer_size) {
+ overflow_ = true;
+ return;
}
+ }
+ num_records_ += num_records;
- DCHECK_LT(current_index + record_size, buffer_size);
- EncodeEventEntry(
- buffer_ptr + current_index, thread_id, method_id, action, thread_time, wall_time);
- current_index += record_size;
+ DCHECK_GT(buffer_size_, record_size * num_entries);
+ if (trace_format_version_ == Trace::kFormatV1) {
+ FlushEntriesFormatV1(
+ method_trace_entries, tid, method_infos, current_offset, &current_index, buffer_ptr);
+ } else {
+ FlushEntriesFormatV2(
+ method_trace_entries, tid, method_infos, num_records, &current_index, buffer_ptr);
}
if (trace_output_mode_ == TraceOutputMode::kStreaming) {
@@ -1476,6 +1672,30 @@ void TraceWriter::EncodeEventEntry(uint8_t* ptr,
static_assert(kPacketSize == 2 + 4 + 4 + 4, "Packet size incorrect.");
}
+void TraceWriter::EncodeEventBlockHeader(uint8_t* ptr,
+ uint32_t thread_id,
+ uint32_t init_method_index,
+ uint32_t init_thread_clock,
+ uint32_t init_wall_clock,
+ uint16_t num_records) {
+ ptr[0] = kEntryHeaderV2;
+ Append4LE(ptr + 1, thread_id);
+ Append4LE(ptr + 5, init_method_index);
+ ptr += 9;
+
+ if (UseThreadCpuClock(clock_source_)) {
+ Append4LE(ptr, init_thread_clock);
+ ptr += 4;
+ }
+ if (UseWallClock(clock_source_)) {
+ Append4LE(ptr, init_wall_clock);
+ ptr += 4;
+ }
+ // This specifies the total number of records encoded in the block using lebs. We encode the first
+ // entry in the header, so the block contains one less than num_records.
+ Append2LE(ptr, num_records - 1);
+}
+
void TraceWriter::EnsureSpace(uint8_t* buffer,
size_t* current_index,
size_t buffer_size,
@@ -1498,6 +1718,7 @@ void TraceWriter::DumpMethodList(std::ostream& os) {
}
void TraceWriter::DumpThreadList(std::ostream& os) {
+ MutexLock mu(Thread::Current(), tracing_lock_);
for (const auto& it : threads_list_) {
os << it.first << "\t" << it.second << "\n";
}
diff --git a/runtime/trace.h b/runtime/trace.h
index 7ff68132fc..227955ff2d 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -48,6 +48,8 @@ class DexFile;
class ShadowFrame;
class Thread;
+struct MethodTraceRecord;
+
using DexIndexBitSet = std::bitset<65536>;
enum TracingMode {
@@ -146,6 +148,7 @@ class TraceWriter {
TraceClockSource clock_source,
size_t buffer_size,
int num_trace_buffers,
+ int trace_format_version,
uint32_t clock_overhead_ns);
// This encodes all the events in the per-thread trace buffer and writes it to the trace file /
@@ -210,6 +213,25 @@ class TraceWriter {
int GetMethodTraceIndex(uintptr_t* current_buffer);
private:
+ void ReadValuesFromRecord(uintptr_t* method_trace_entries,
+ size_t record_index,
+ MethodTraceRecord& record,
+ bool has_thread_cpu_clock,
+ bool has_wall_clock);
+
+ void FlushEntriesFormatV2(uintptr_t* method_trace_entries,
+ size_t tid,
+ const std::unordered_map<ArtMethod*, std::string>& method_infos,
+ size_t num_records,
+ size_t* current_index,
+ uint8_t* init_buffer_ptr) REQUIRES(tracing_lock_);
+
+ void FlushEntriesFormatV1(uintptr_t* method_trace_entries,
+ size_t tid,
+ const std::unordered_map<ArtMethod*, std::string>& method_infos,
+ size_t end_offset,
+ size_t* current_index,
+ uint8_t* buffer_ptr) REQUIRES(tracing_lock_);
// Get a 32-bit id for the method and specify if the method hasn't been seen before. If this is
// the first time we see this method record information (like method name, declaring class etc.,)
// about the method.
@@ -237,6 +259,15 @@ class TraceWriter {
uint32_t thread_clock_diff,
uint32_t wall_clock_diff) REQUIRES(tracing_lock_);
+ // Encodes the header for the events block. This assumes that there is enough space reserved to
+ // encode the entry.
+ void EncodeEventBlockHeader(uint8_t* ptr,
+ uint32_t thread_id,
+ uint32_t method_index,
+ uint32_t init_thread_clock_time,
+ uint32_t init_wall_clock_time,
+ uint16_t num_records) REQUIRES(tracing_lock_);
+
// Ensures there is sufficient space in the buffer to record the requested_size. If there is not
// enough sufficient space the current contents of the buffer are written to the file and
// current_index is reset to 0. This doesn't check if buffer_size is big enough to hold the
@@ -253,7 +284,7 @@ class TraceWriter {
// Methods to output traced methods and threads.
void DumpMethodList(std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!tracing_lock_);
- void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_) REQUIRES(tracing_lock_);
+ void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_, !tracing_lock_);
// File to write trace data out to, null if direct to ddms.
std::unique_ptr<File> trace_file_;
@@ -291,12 +322,18 @@ class TraceWriter {
// Size of buf_.
const size_t buffer_size_;
+ // Version of trace output
+ const int trace_format_version_;
+
// Time trace was created.
const uint64_t start_time_;
// Did we overflow the buffer recording traces?
bool overflow_;
+ // Total number of records flushed to file.
+ size_t num_records_;
+
// Clock overhead.
const uint32_t clock_overhead_ns_;
@@ -322,6 +359,11 @@ class Trace final : public instrumentation::InstrumentationListener {
kTraceClockSourceThreadCpu = 0x100,
};
+ static const int kFormatV1 = 0;
+ static const int kFormatV2 = 1;
+ static const int kTraceFormatVersionFlagMask = 0b110;
+ static const int kTraceFormatVersionShift = 1;
+
enum class TraceMode {
kMethodTracing,
kSampling
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index 75be6747e8..bd8bbe0108 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -268,11 +268,14 @@ struct EXPORT WellKnownClasses {
java_lang_Integer_IntegerCache;
static constexpr ClassFromField<&java_lang_Long_LongCache_cache> java_lang_Long_LongCache;
+ static constexpr ClassFromMethod<&java_lang_Boolean_valueOf> java_lang_Boolean;
static constexpr ClassFromMethod<&java_lang_Byte_valueOf> java_lang_Byte;
static constexpr ClassFromMethod<&java_lang_Character_valueOf> java_lang_Character;
static constexpr ClassFromMethod<&java_lang_Short_valueOf> java_lang_Short;
static constexpr ClassFromMethod<&java_lang_Integer_valueOf> java_lang_Integer;
+ static constexpr ClassFromMethod<&java_lang_Float_valueOf> java_lang_Float;
static constexpr ClassFromMethod<&java_lang_Long_valueOf> java_lang_Long;
+ static constexpr ClassFromMethod<&java_lang_Double_valueOf> java_lang_Double;
};
} // namespace art
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 3dcc1fcc4a..be3a2011d9 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -30,6 +30,7 @@
#include <jni.h>
#include <nativebridge/native_bridge.h>
+#include "base/casts.h"
#include "base/macros.h"
struct NativeBridgeMethod {
@@ -309,7 +310,9 @@ class SignalHandlerTestStatus {
void AssertState(TestStatus expected) {
if (state_ != expected) {
- printf("ERROR: unexpected state, was %d, expected %d\n", state_, expected);
+ printf("ERROR: unexpected state, was %d, expected %d\n",
+ art::enum_cast<int>(state_),
+ art::enum_cast<int>(expected));
}
}
};
diff --git a/test/2246-trace-v2/Android.bp b/test/2246-trace-v2/Android.bp
new file mode 100644
index 0000000000..6655926552
--- /dev/null
+++ b/test/2246-trace-v2/Android.bp
@@ -0,0 +1,40 @@
+// Generated by `regen-test-files`. Do not edit manually.
+
+// Build rules for ART run-test `2246-trace-v2`.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "art_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["art_license"],
+}
+
+// Test's Dex code.
+java_test {
+ name: "art-run-test-2246-trace-v2",
+ defaults: ["art-run-test-defaults"],
+ test_config_template: ":art-run-test-target-no-test-suite-tag-template",
+ srcs: ["src/**/*.java"],
+ data: [
+ ":art-run-test-2246-trace-v2-expected-stdout",
+ ":art-run-test-2246-trace-v2-expected-stderr",
+ ],
+}
+
+// Test's expected standard output.
+genrule {
+ name: "art-run-test-2246-trace-v2-expected-stdout",
+ out: ["art-run-test-2246-trace-v2-expected-stdout.txt"],
+ srcs: ["expected-stdout.txt"],
+ cmd: "cp -f $(in) $(out)",
+}
+
+// Test's expected standard error.
+genrule {
+ name: "art-run-test-2246-trace-v2-expected-stderr",
+ out: ["art-run-test-2246-trace-v2-expected-stderr.txt"],
+ srcs: ["expected-stderr.txt"],
+ cmd: "cp -f $(in) $(out)",
+}
diff --git a/test/2246-trace-v2/dump_trace.cc b/test/2246-trace-v2/dump_trace.cc
new file mode 100644
index 0000000000..6a094921fe
--- /dev/null
+++ b/test/2246-trace-v2/dump_trace.cc
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+
+#include <map>
+#include <memory>
+
+#include "base/leb128.h"
+#include "base/os.h"
+#include "base/unix_file/fd_file.h"
+#include "jni.h"
+
+namespace art {
+namespace {
+
+static const int kMagicValue = 0x574f4c53;
+static const int kVersionDualClock = 0xf5;
+static const int kThreadInfo = 0;
+static const int kMethodInfo = 1;
+static const int kTraceEntries = 2;
+static const int kTraceActionBits = 2;
+static const int kSummary = 3;
+
+int ReadNumber(int num_bytes, uint8_t* header) {
+ int number = 0;
+ for (int i = 0; i < num_bytes; i++) {
+ number += header[i] << (i * 8);
+ }
+ return number;
+}
+
+bool ProcessThreadOrMethodInfo(std::unique_ptr<File>& file, std::map<int, std::string>& name_map) {
+ uint8_t header[6];
+ if (!file->ReadFully(&header, sizeof(header))) {
+ printf("Couldn't read header\n");
+ return false;
+ }
+ int id = ReadNumber(4, header);
+ int length = ReadNumber(2, header + 4);
+
+ char* name = new char[length];
+ if (!file->ReadFully(name, length)) {
+ delete[] name;
+ return false;
+ }
+ std::string str(name, length);
+ std::replace(str.begin(), str.end(), '\t', ' ');
+ name_map.emplace(id, str);
+ delete[] name;
+ return true;
+}
+
+void print_trace_entry(const std::string& thread_name,
+ const std::string& method_name,
+ int* current_depth,
+ int event_type) {
+ std::string entry;
+ for (int i = 0; i < *current_depth; i++) {
+ entry.push_back('.');
+ }
+ if (event_type == 0) {
+ *current_depth += 1;
+ entry.append(".>> ");
+ } else if (event_type == 1) {
+ *current_depth -= 1;
+ entry.append("<< ");
+ } else if (event_type == 2) {
+ *current_depth -= 1;
+ entry.append("<<E ");
+ } else {
+ entry.append("?? ");
+ }
+ entry.append(thread_name);
+ entry.append(" ");
+ entry.append(method_name);
+ printf("%s", entry.c_str());
+}
+
+bool ProcessTraceEntries(std::unique_ptr<File>& file,
+ std::map<int, int>& current_depth_map,
+ std::map<int, std::string>& thread_map,
+ std::map<int, std::string>& method_map,
+ bool is_dual_clock,
+ const char* thread_name_filter) {
+ uint8_t header[20];
+ int header_size = is_dual_clock ? 20 : 16;
+ if (!file->ReadFully(header, header_size)) {
+ return false;
+ }
+
+ uint32_t thread_id = ReadNumber(4, header);
+ uint32_t method_value = ReadNumber(4, header + 4);
+ int offset = 8;
+ if (is_dual_clock) {
+ // Read timestamp.
+ ReadNumber(4, header + offset);
+ offset += 4;
+ }
+ // Read timestamp.
+ ReadNumber(4, header + offset);
+ offset += 4;
+ int num_records = ReadNumber(2, header + offset);
+ offset += 2;
+ int total_size = ReadNumber(2, header + offset);
+ uint8_t* buffer = new uint8_t[total_size];
+ if (!file->ReadFully(buffer, total_size)) {
+ delete[] buffer;
+ return false;
+ }
+
+ const uint8_t* current_buffer_ptr = buffer;
+ int32_t method_id = method_value >> kTraceActionBits;
+ uint8_t event_type = method_value & 0x3;
+ int current_depth = 0;
+ if (current_depth_map.find(thread_id) != current_depth_map.end()) {
+ // Get the current call stack depth. If it is the first method we are seeing on this thread
+ // then this map wouldn't haven an entry we start with the depth of 0.
+ current_depth = current_depth_map[thread_id];
+ }
+ std::string thread_name = thread_map[thread_id];
+ bool print_thread_events = (thread_name.compare(thread_name_filter) == 0);
+ if (method_map.find(method_id) == method_map.end()) {
+ LOG(FATAL) << "No entry for init method " << method_id;
+ }
+ if (print_thread_events) {
+ print_trace_entry(thread_name, method_map[method_id], &current_depth, event_type);
+ }
+ for (int i = 0; i < num_records; i++) {
+ int32_t diff = 0;
+ bool success = DecodeSignedLeb128Checked(&current_buffer_ptr, buffer + total_size - 1, &diff);
+ if (!success) {
+ LOG(FATAL) << "Reading past the buffer???";
+ }
+ int32_t curr_method_value = method_value + diff;
+ method_id = curr_method_value >> kTraceActionBits;
+ event_type = curr_method_value & 0x3;
+ if (print_thread_events) {
+ print_trace_entry(thread_name, method_map[method_id], &current_depth, event_type);
+ }
+ // Read timestamps
+ DecodeUnsignedLeb128(&current_buffer_ptr);
+ if (is_dual_clock) {
+ DecodeUnsignedLeb128(&current_buffer_ptr);
+ }
+ }
+ current_depth_map[thread_id] = current_depth;
+ return true;
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_dumpTrace(JNIEnv* env,
+ jclass,
+ jstring fileName,
+ jstring threadName) {
+ const char* file_name = env->GetStringUTFChars(fileName, nullptr);
+ const char* thread_name = env->GetStringUTFChars(threadName, nullptr);
+ std::map<int, std::string> thread_map;
+ std::map<int, std::string> method_map;
+ std::map<int, int> current_depth_map;
+
+ std::unique_ptr<File> file(OS::OpenFileForReading(file_name));
+ if (file == nullptr) {
+ printf("Couldn't open file\n");
+ return;
+ }
+
+ uint8_t header[32];
+ const bool success = file->ReadFully(&header, sizeof(header));
+ if (!success) {
+ printf("Couldn't read header\n");
+ return;
+ }
+ int magic_value = ReadNumber(4, header);
+ if (magic_value != kMagicValue) {
+ printf("Incorrect magic value\n");
+ return;
+ }
+ int version = ReadNumber(2, header + 4);
+ if (success) {
+ printf("version=%0x\n", version);
+ }
+
+ bool is_dual_clock = (version == kVersionDualClock);
+ bool has_entries = true;
+ while (has_entries) {
+ uint8_t entry_header;
+ if (!file->ReadFully(&entry_header, sizeof(entry_header))) {
+ break;
+ }
+ switch (entry_header) {
+ case kThreadInfo:
+ if (!ProcessThreadOrMethodInfo(file, thread_map)) {
+ has_entries = false;
+ }
+ break;
+ case kMethodInfo:
+ if (!ProcessThreadOrMethodInfo(file, method_map)) {
+ has_entries = false;
+ }
+ break;
+ case kTraceEntries:
+ ProcessTraceEntries(
+ file, current_depth_map, thread_map, method_map, is_dual_clock, thread_name);
+ break;
+ case kSummary:
+ has_entries = false;
+ break;
+ default:
+ printf("Invalid Header %d\n", entry_header);
+ has_entries = false;
+ break;
+ }
+ }
+
+ env->ReleaseStringUTFChars(fileName, file_name);
+ env->ReleaseStringUTFChars(threadName, thread_name);
+}
+
+} // namespace
+} // namespace art
diff --git a/test/2246-trace-v2/expected-stderr.txt b/test/2246-trace-v2/expected-stderr.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/2246-trace-v2/expected-stderr.txt
diff --git a/test/2246-trace-v2/expected-stdout.txt b/test/2246-trace-v2/expected-stdout.txt
new file mode 100644
index 0000000000..53430a6195
--- /dev/null
+++ b/test/2246-trace-v2/expected-stdout.txt
@@ -0,0 +1,773 @@
+JNI_OnLoad called
+***** streaming test - dual clock *******
+version=f5
+.>> TestThread2246 java.lang.Thread run ()V Thread.java
+..>> TestThread2246 Main$$ExternalSyntheticLambda0 run ()V D8$$SyntheticClass
+...>> TestThread2246 Main lambda$testTracing$0 (IZLMain;)V Main.java
+....>> TestThread2246 Main$VMDebug startMethodTracingV2 (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V Main.java
+.....>> TestThread2246 java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+......>> TestThread2246 dalvik.system.VMDebug startMethodTracing (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V VMDebug.java
+.......>> TestThread2246 dalvik.system.VMDebug startMethodTracingFd (Ljava/lang/String;IIIZIZ)V VMDebug.java
+.......<< TestThread2246 dalvik.system.VMDebug startMethodTracingFd (Ljava/lang/String;IIIZIZ)V VMDebug.java
+......<< TestThread2246 dalvik.system.VMDebug startMethodTracing (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V VMDebug.java
+.....<< TestThread2246 java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+....<< TestThread2246 Main$VMDebug startMethodTracingV2 (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V Main.java
+....>> TestThread2246 Main <init> ()V Main.java
+.....>> TestThread2246 java.lang.Object <init> ()V Object.java
+.....<< TestThread2246 java.lang.Object <init> ()V Object.java
+....<< TestThread2246 Main <init> ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWork ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWork ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main$VMDebug $noinline$stopMethodTracing ()V Main.java
+.....>> TestThread2246 java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+......>> TestThread2246 dalvik.system.VMDebug stopMethodTracing ()V VMDebug.java
+version=f5
+.>> main Main main ([Ljava/lang/String;)V Main.java
+..>> main Main testTracing (ZII)V Main.java
+...>> main Main$VMDebug startMethodTracingV2 (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V Main.java
+....>> main java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+.....>> main dalvik.system.VMDebug startMethodTracing (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V VMDebug.java
+......>> main dalvik.system.VMDebug startMethodTracingFd (Ljava/lang/String;IIIZIZ)V VMDebug.java
+......<< main dalvik.system.VMDebug startMethodTracingFd (Ljava/lang/String;IIIZIZ)V VMDebug.java
+.....<< main dalvik.system.VMDebug startMethodTracing (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V VMDebug.java
+....<< main java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+...<< main Main$VMDebug startMethodTracingV2 (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V Main.java
+...>> main Main $noinline$doSomeWork ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWork ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main doSomeWorkThrow ()V Main.java
+....>> main Main callThrowFunction ()V Main.java
+.....>> main java.lang.Exception <init> (Ljava/lang/String;)V Exception.java
+......>> main java.lang.Throwable <init> (Ljava/lang/String;)V Throwable.java
+.......>> main java.lang.Object <init> ()V Object.java
+.......<< main java.lang.Object <init> ()V Object.java
+.......>> main java.util.Collections emptyList ()Ljava/util/List; Collections.java
+.......<< main java.util.Collections emptyList ()Ljava/util/List; Collections.java
+.......>> main java.lang.Throwable fillInStackTrace ()Ljava/lang/Throwable; Throwable.java
+........>> main java.lang.Throwable nativeFillInStackTrace ()Ljava/lang/Object; Throwable.java
+........<< main java.lang.Throwable nativeFillInStackTrace ()Ljava/lang/Object; Throwable.java
+.......<< main java.lang.Throwable fillInStackTrace ()Ljava/lang/Throwable; Throwable.java
+......<< main java.lang.Throwable <init> (Ljava/lang/String;)V Throwable.java
+.....<< main java.lang.Exception <init> (Ljava/lang/String;)V Exception.java
+....<<E main Main callThrowFunction ()V Main.java
+...<< main Main doSomeWorkThrow ()V Main.java
+...>> main Main$VMDebug $noinline$stopMethodTracing ()V Main.java
+....>> main java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+.....>> main dalvik.system.VMDebug stopMethodTracing ()V VMDebug.java
+***** streaming test - wall clock *******
+version=f4
+.>> TestThread2246 java.lang.Thread run ()V Thread.java
+..>> TestThread2246 Main$$ExternalSyntheticLambda0 run ()V D8$$SyntheticClass
+...>> TestThread2246 Main lambda$testTracing$0 (IZLMain;)V Main.java
+....>> TestThread2246 Main$VMDebug startMethodTracingV2 (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V Main.java
+.....>> TestThread2246 java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+......>> TestThread2246 dalvik.system.VMDebug startMethodTracing (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V VMDebug.java
+.......>> TestThread2246 dalvik.system.VMDebug startMethodTracingFd (Ljava/lang/String;IIIZIZ)V VMDebug.java
+.......<< TestThread2246 dalvik.system.VMDebug startMethodTracingFd (Ljava/lang/String;IIIZIZ)V VMDebug.java
+......<< TestThread2246 dalvik.system.VMDebug startMethodTracing (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V VMDebug.java
+.....<< TestThread2246 java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+....<< TestThread2246 Main$VMDebug startMethodTracingV2 (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V Main.java
+....>> TestThread2246 Main <init> ()V Main.java
+.....>> TestThread2246 java.lang.Object <init> ()V Object.java
+.....<< TestThread2246 java.lang.Object <init> ()V Object.java
+....<< TestThread2246 Main <init> ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWork ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWork ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+.....>> TestThread2246 Main callOuterFunction ()V Main.java
+......>> TestThread2246 Main callLeafFunction ()V Main.java
+......<< TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callOuterFunction ()V Main.java
+.....>> TestThread2246 Main callLeafFunction ()V Main.java
+.....<< TestThread2246 Main callLeafFunction ()V Main.java
+....<< TestThread2246 Main $noinline$doSomeWorkJIT ()V Main.java
+....>> TestThread2246 Main$VMDebug $noinline$stopMethodTracing ()V Main.java
+.....>> TestThread2246 java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+......>> TestThread2246 dalvik.system.VMDebug stopMethodTracing ()V VMDebug.java
+version=f4
+.>> main Main main ([Ljava/lang/String;)V Main.java
+..>> main Main testTracing (ZII)V Main.java
+...>> main Main$VMDebug startMethodTracingV2 (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V Main.java
+....>> main java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+.....>> main dalvik.system.VMDebug startMethodTracing (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V VMDebug.java
+......>> main dalvik.system.VMDebug startMethodTracingFd (Ljava/lang/String;IIIZIZ)V VMDebug.java
+......<< main dalvik.system.VMDebug startMethodTracingFd (Ljava/lang/String;IIIZIZ)V VMDebug.java
+.....<< main dalvik.system.VMDebug startMethodTracing (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V VMDebug.java
+....<< main java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+...<< main Main$VMDebug startMethodTracingV2 (Ljava/lang/String;Ljava/io/FileDescriptor;IIZIZ)V Main.java
+...>> main Main $noinline$doSomeWork ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWork ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main $noinline$doSomeWorkJIT ()V Main.java
+....>> main Main callOuterFunction ()V Main.java
+.....>> main Main callLeafFunction ()V Main.java
+.....<< main Main callLeafFunction ()V Main.java
+....<< main Main callOuterFunction ()V Main.java
+....>> main Main callLeafFunction ()V Main.java
+....<< main Main callLeafFunction ()V Main.java
+...<< main Main $noinline$doSomeWorkJIT ()V Main.java
+...>> main Main doSomeWorkThrow ()V Main.java
+....>> main Main callThrowFunction ()V Main.java
+.....>> main java.lang.Exception <init> (Ljava/lang/String;)V Exception.java
+......>> main java.lang.Throwable <init> (Ljava/lang/String;)V Throwable.java
+.......>> main java.lang.Object <init> ()V Object.java
+.......<< main java.lang.Object <init> ()V Object.java
+.......>> main java.util.Collections emptyList ()Ljava/util/List; Collections.java
+.......<< main java.util.Collections emptyList ()Ljava/util/List; Collections.java
+.......>> main java.lang.Throwable fillInStackTrace ()Ljava/lang/Throwable; Throwable.java
+........>> main java.lang.Throwable nativeFillInStackTrace ()Ljava/lang/Object; Throwable.java
+........<< main java.lang.Throwable nativeFillInStackTrace ()Ljava/lang/Object; Throwable.java
+.......<< main java.lang.Throwable fillInStackTrace ()Ljava/lang/Throwable; Throwable.java
+......<< main java.lang.Throwable <init> (Ljava/lang/String;)V Throwable.java
+.....<< main java.lang.Exception <init> (Ljava/lang/String;)V Exception.java
+....<<E main Main callThrowFunction ()V Main.java
+...<< main Main doSomeWorkThrow ()V Main.java
+...>> main Main$VMDebug $noinline$stopMethodTracing ()V Main.java
+....>> main java.lang.reflect.Method invoke (Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; Method.java
+.....>> main dalvik.system.VMDebug stopMethodTracing ()V VMDebug.java
diff --git a/test/2246-trace-v2/info.txt b/test/2246-trace-v2/info.txt
new file mode 100644
index 0000000000..fa93a971b4
--- /dev/null
+++ b/test/2246-trace-v2/info.txt
@@ -0,0 +1,2 @@
+Tests streaming method tracing. It verifies the format of the generated file is
+as expected.
diff --git a/test/2246-trace-v2/run.py b/test/2246-trace-v2/run.py
new file mode 100644
index 0000000000..4c0d8584f7
--- /dev/null
+++ b/test/2246-trace-v2/run.py
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright (C) 2024 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def run(ctx, args):
+ # The expected output on non debuggable configurations isn't consistent.
+ # TODO(mythria): Investigate why the output is different and update the test to work for
+ # non debuggable runtimes too.
+ ctx.default_run(args, Xcompiler_option=["--debuggable"])
diff --git a/test/2246-trace-v2/src/Main.java b/test/2246-trace-v2/src/Main.java
new file mode 100644
index 0000000000..b5ccb9d0ec
--- /dev/null
+++ b/test/2246-trace-v2/src/Main.java
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.FileDescriptor;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.lang.reflect.Method;
+
+public class Main {
+ private static final String TEMP_FILE_NAME_PREFIX = "test";
+ private static final String TEMP_FILE_NAME_SUFFIX = ".trace";
+ private static final int WALL_CLOCK_FLAG = 0x010;
+ private static final int TRACE_OUTPUT_V2_FLAG = 0b010;
+ private static final int STREAMING_DUAL_CLOCK_VERSION = 1;
+ private static final int STREAMING_WALL_CLOCK_VERSION = 1;
+ private static File file;
+
+ public static void main(String[] args) throws Exception {
+ System.loadLibrary(args[0]);
+ String name = System.getProperty("java.vm.name");
+ if (!"Dalvik".equals(name)) {
+ System.out.println("This test is not supported on " + name);
+ return;
+ }
+
+ ensureJitCompiled(Main.class, "$noinline$doSomeWorkJIT");
+
+ System.out.println("***** streaming test - dual clock *******");
+ testTracing(
+ /* streaming=*/true, /* flags= */ 0, STREAMING_DUAL_CLOCK_VERSION);
+
+ System.out.println("***** streaming test - wall clock *******");
+ testTracing(
+ /* streaming=*/true, /* flags= */ WALL_CLOCK_FLAG, STREAMING_WALL_CLOCK_VERSION);
+ }
+
+ public static void testTracing(boolean streaming, int flags, int expected_version)
+ throws Exception {
+ Main m = new Main();
+ Thread t = new Thread(() -> {
+ try {
+ file = createTempFile();
+ FileOutputStream out_file = new FileOutputStream(file);
+ VMDebug.startMethodTracingV2(
+ file.getPath(), out_file.getFD(), 0, flags, false, 0, streaming);
+ Main m1 = new Main();
+ m1.$noinline$doSomeWork();
+ // Call JITed code multiple times to flush out any issues with timestamps.
+ for (int i = 0; i < 20; i++) {
+ m.$noinline$doSomeWorkJIT();
+ }
+ VMDebug.$noinline$stopMethodTracing();
+ out_file.close();
+ dumpTrace(file.getAbsolutePath(), "TestThread2246");
+ file.delete();
+ } catch (Exception e) {
+ System.out.println("Exception in thread " + e);
+ e.printStackTrace();
+ } finally {
+ file.delete();
+ }
+ }, "TestThread2246");
+ try {
+ if (VMDebug.getMethodTracingMode() != 0) {
+ VMDebug.$noinline$stopMethodTracing();
+ }
+
+ t.start();
+ t.join();
+
+ file = createTempFile();
+ FileOutputStream main_out_file = new FileOutputStream(file);
+ VMDebug.startMethodTracingV2(
+ file.getPath(), main_out_file.getFD(), 0, flags, false, 0, streaming);
+ m.$noinline$doSomeWork();
+ // Call JITed code multiple times to flush out any issues with timestamps.
+ for (int i = 0; i < 20; i++) {
+ m.$noinline$doSomeWorkJIT();
+ }
+ m.doSomeWorkThrow();
+ VMDebug.$noinline$stopMethodTracing();
+ main_out_file.close();
+ dumpTrace(file.getAbsolutePath(), "main");
+ file.delete();
+ } finally {
+ file.delete();
+ }
+ }
+
+ private static File createTempFile() throws Exception {
+ try {
+ return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+ } catch (IOException e) {
+ System.setProperty("java.io.tmpdir", "/data/local/tmp");
+ try {
+ return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+ } catch (IOException e2) {
+ System.setProperty("java.io.tmpdir", "/sdcard");
+ return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX);
+ }
+ }
+ }
+
+ public void callOuterFunction() {
+ callLeafFunction();
+ }
+
+ public void callLeafFunction() {}
+
+ public void $noinline$doSomeWork() {
+ callOuterFunction();
+ callLeafFunction();
+ }
+
+ public void $noinline$doSomeWorkJIT() {
+ callOuterFunction();
+ callLeafFunction();
+ }
+
+ public void callThrowFunction() throws Exception {
+ throw new Exception("test");
+ }
+
+ public void doSomeWorkThrow() {
+ try {
+ callThrowFunction();
+ } catch (Exception e) {
+ }
+ }
+
+ private static class VMDebug {
+ private static final Method startMethodTracingMethod;
+ private static final Method stopMethodTracingMethod;
+ private static final Method getMethodTracingModeMethod;
+ static {
+ try {
+ Class<?> c = Class.forName("dalvik.system.VMDebug");
+ startMethodTracingMethod = c.getDeclaredMethod("startMethodTracing", String.class,
+ FileDescriptor.class, Integer.TYPE, Integer.TYPE, Boolean.TYPE,
+ Integer.TYPE, Boolean.TYPE);
+ stopMethodTracingMethod = c.getDeclaredMethod("stopMethodTracing");
+ getMethodTracingModeMethod = c.getDeclaredMethod("getMethodTracingMode");
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static void startMethodTracingV2(String filename, FileDescriptor fd, int bufferSize,
+ int flags, boolean samplingEnabled, int intervalUs, boolean streaming)
+ throws Exception {
+ startMethodTracingMethod.invoke(null, filename, fd, bufferSize,
+ flags | TRACE_OUTPUT_V2_FLAG, samplingEnabled, intervalUs, streaming);
+ }
+ public static void $noinline$stopMethodTracing() throws Exception {
+ stopMethodTracingMethod.invoke(null);
+ }
+ public static int getMethodTracingMode() throws Exception {
+ return (int) getMethodTracingModeMethod.invoke(null);
+ }
+ }
+
+ private static native void ensureJitCompiled(Class<?> cls, String methodName);
+ private static native void dumpTrace(String fileName, String threadName);
+}
diff --git a/test/2247-checker-write-barrier-elimination/Android.bp b/test/2247-checker-write-barrier-elimination/Android.bp
index 5848cb496e..c9744e9b00 100644
--- a/test/2247-checker-write-barrier-elimination/Android.bp
+++ b/test/2247-checker-write-barrier-elimination/Android.bp
@@ -15,7 +15,7 @@ package {
java_test {
name: "art-run-test-2247-checker-write-barrier-elimination",
defaults: ["art-run-test-defaults"],
- test_config_template: ":art-run-test-target-no-test-suite-tag-template",
+ test_config_template: ":art-run-test-target-template",
srcs: ["src/**/*.java"],
data: [
":art-run-test-2247-checker-write-barrier-elimination-expected-stdout",
diff --git a/test/2272-checker-codegen-honor-write-barrier-kind/Android.bp b/test/2272-checker-codegen-honor-write-barrier-kind/Android.bp
new file mode 100644
index 0000000000..dc4292b4d8
--- /dev/null
+++ b/test/2272-checker-codegen-honor-write-barrier-kind/Android.bp
@@ -0,0 +1,43 @@
+// Generated by `regen-test-files`. Do not edit manually.
+
+// Build rules for ART run-test `2272-checker-codegen-honor-write-barrier-kind`.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "art_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["art_license"],
+}
+
+// Test's Dex code.
+java_test {
+ name: "art-run-test-2272-checker-codegen-honor-write-barrier-kind",
+ defaults: ["art-run-test-defaults"],
+ test_config_template: ":art-run-test-target-no-test-suite-tag-template",
+ srcs: ["src/**/*.java"],
+ data: [
+ ":art-run-test-2272-checker-codegen-honor-write-barrier-kind-expected-stdout",
+ ":art-run-test-2272-checker-codegen-honor-write-barrier-kind-expected-stderr",
+ ],
+ // Include the Java source files in the test's artifacts, to make Checker assertions
+ // available to the TradeFed test runner.
+ include_srcs: true,
+}
+
+// Test's expected standard output.
+genrule {
+ name: "art-run-test-2272-checker-codegen-honor-write-barrier-kind-expected-stdout",
+ out: ["art-run-test-2272-checker-codegen-honor-write-barrier-kind-expected-stdout.txt"],
+ srcs: ["expected-stdout.txt"],
+ cmd: "cp -f $(in) $(out)",
+}
+
+// Test's expected standard error.
+genrule {
+ name: "art-run-test-2272-checker-codegen-honor-write-barrier-kind-expected-stderr",
+ out: ["art-run-test-2272-checker-codegen-honor-write-barrier-kind-expected-stderr.txt"],
+ srcs: ["expected-stderr.txt"],
+ cmd: "cp -f $(in) $(out)",
+}
diff --git a/test/2272-checker-codegen-honor-write-barrier-kind/src/Main.java b/test/2272-checker-codegen-honor-write-barrier-kind/src/Main.java
index f07286b5b9..7d67249c6d 100644
--- a/test/2272-checker-codegen-honor-write-barrier-kind/src/Main.java
+++ b/test/2272-checker-codegen-honor-write-barrier-kind/src/Main.java
@@ -42,21 +42,20 @@ public class Main {
/// CHECK-START: java.lang.String[] Main.$noinline$testArraySetsHonorWriteBarrier(java.lang.String[], java.lang.String) prepare_for_register_allocation (before)
/// CHECK: <<Null:l\d+>> NullConstant
/// CHECK: <<BT:l\d+>> BoundType [<<Null>>]
- /// CHECK: ArraySet [<<arr:l\d+>>,<<index:i\d+>>,<<BT>>] value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:EmitBeingReliedOn
- /// CHECK: ArraySet value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:DontEmit
+ /// CHECK: ArraySet [<<arr:l\d+>>,<<index:i\d+>>,<<BT>>] value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:DontEmit
+ /// CHECK: ArraySet value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:EmitNotBeingReliedOn
/// CHECK-START: java.lang.String[] Main.$noinline$testArraySetsHonorWriteBarrier(java.lang.String[], java.lang.String) prepare_for_register_allocation (after)
/// CHECK: <<Null:l\d+>> NullConstant
- /// CHECK: ArraySet [<<arr:l\d+>>,<<index:i\d+>>,<<Null>>] value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:EmitBeingReliedOn
- /// CHECK: ArraySet value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:DontEmit
+ /// CHECK: ArraySet [<<arr:l\d+>>,<<index:i\d+>>,<<Null>>] value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:DontEmit
+ /// CHECK: ArraySet value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:EmitNotBeingReliedOn
/// CHECK-START: java.lang.String[] Main.$noinline$testArraySetsHonorWriteBarrier(java.lang.String[], java.lang.String) prepare_for_register_allocation (after)
/// CHECK-NOT: BoundType
/// CHECK-START: java.lang.String[] Main.$noinline$testArraySetsHonorWriteBarrier(java.lang.String[], java.lang.String) disassembly (after)
- /// CHECK: ArraySet value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:EmitBeingReliedOn
- // / CHECK: ; card_table
/// CHECK: ArraySet value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:DontEmit
+ /// CHECK: ArraySet value_can_be_null:true needs_type_check:false can_trigger_gc:false write_barrier_kind:EmitNotBeingReliedOn
private static java.lang.String[] $noinline$testArraySetsHonorWriteBarrier(
String[] arr, String o2) {
Object o = null;
diff --git a/test/2273-checker-unreachable-intrinsics/Android.bp b/test/2273-checker-unreachable-intrinsics/Android.bp
new file mode 100644
index 0000000000..be4dd29437
--- /dev/null
+++ b/test/2273-checker-unreachable-intrinsics/Android.bp
@@ -0,0 +1,43 @@
+// Generated by `regen-test-files`. Do not edit manually.
+
+// Build rules for ART run-test `2273-checker-unreachable-intrinsics`.
+
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "art_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["art_license"],
+}
+
+// Test's Dex code.
+java_test {
+ name: "art-run-test-2273-checker-unreachable-intrinsics",
+ defaults: ["art-run-test-defaults"],
+ test_config_template: ":art-run-test-target-template",
+ srcs: ["src/**/*.java"],
+ data: [
+ ":art-run-test-2273-checker-unreachable-intrinsics-expected-stdout",
+ ":art-run-test-2273-checker-unreachable-intrinsics-expected-stderr",
+ ],
+ // Include the Java source files in the test's artifacts, to make Checker assertions
+ // available to the TradeFed test runner.
+ include_srcs: true,
+}
+
+// Test's expected standard output.
+genrule {
+ name: "art-run-test-2273-checker-unreachable-intrinsics-expected-stdout",
+ out: ["art-run-test-2273-checker-unreachable-intrinsics-expected-stdout.txt"],
+ srcs: ["expected-stdout.txt"],
+ cmd: "cp -f $(in) $(out)",
+}
+
+// Test's expected standard error.
+genrule {
+ name: "art-run-test-2273-checker-unreachable-intrinsics-expected-stderr",
+ out: ["art-run-test-2273-checker-unreachable-intrinsics-expected-stderr.txt"],
+ srcs: ["expected-stderr.txt"],
+ cmd: "cp -f $(in) $(out)",
+}
diff --git a/test/2274-checker-bitwise-gvn/expected-stderr.txt b/test/2274-checker-bitwise-gvn/expected-stderr.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/2274-checker-bitwise-gvn/expected-stderr.txt
diff --git a/test/2274-checker-bitwise-gvn/expected-stdout.txt b/test/2274-checker-bitwise-gvn/expected-stdout.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/2274-checker-bitwise-gvn/expected-stdout.txt
diff --git a/test/2274-checker-bitwise-gvn/info.txt b/test/2274-checker-bitwise-gvn/info.txt
new file mode 100644
index 0000000000..1874dd6109
--- /dev/null
+++ b/test/2274-checker-bitwise-gvn/info.txt
@@ -0,0 +1,2 @@
+Tests that GVN doesn't deduplicate HBitwiseNegatedRight
+instructions with different kind.
diff --git a/test/2274-checker-bitwise-gvn/src/Main.java b/test/2274-checker-bitwise-gvn/src/Main.java
new file mode 100644
index 0000000000..57cdf15c65
--- /dev/null
+++ b/test/2274-checker-bitwise-gvn/src/Main.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2024 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ // Test with even/odd input, and even/odd amount of loop iterations.
+ assertEquals(-33, $noinline$TwoBitwiseOperations(32, 4));
+ assertEquals(1, $noinline$TwoBitwiseOperations(32, 5));
+ assertEquals(-31, $noinline$TwoBitwiseOperations(31, 4));
+ assertEquals(0, $noinline$TwoBitwiseOperations(31, 5));
+ }
+
+ /// CHECK-START-ARM: int Main.$noinline$TwoBitwiseOperations(int, int) instruction_simplifier_arm (after)
+ /// CHECK: BitwiseNegatedRight kind:And
+ /// CHECK: BitwiseNegatedRight kind:Or
+
+ /// CHECK-START-ARM64: int Main.$noinline$TwoBitwiseOperations(int, int) instruction_simplifier_arm64 (after)
+ /// CHECK: BitwiseNegatedRight kind:And
+ /// CHECK: BitwiseNegatedRight kind:Or
+
+ /// CHECK-START-{ARM,ARM64}: int Main.$noinline$TwoBitwiseOperations(int, int) disassembly (after)
+ /// CHECK: BitwiseNegatedRight kind:And
+ /// CHECK: BitwiseNegatedRight kind:Or
+ private static int $noinline$TwoBitwiseOperations(int a, int n) {
+ int result = 0;
+ for (int i = 0; i < n; ++i) {
+ if (i % 2 == 0) {
+ result = (~a) & 1;
+ } else {
+ result = (~a) | 1;
+ }
+ }
+ return result;
+ }
+
+ public static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new Error("Expected: " + expected + ", found: " + actual);
+ }
+ }
+}
diff --git a/test/458-checker-instruct-simplification/src/Main.java b/test/458-checker-instruct-simplification/src/Main.java
index 76239be741..784acf7949 100644
--- a/test/458-checker-instruct-simplification/src/Main.java
+++ b/test/458-checker-instruct-simplification/src/Main.java
@@ -3692,6 +3692,206 @@ public class Main {
return arg << 25 >> 26;
}
+ // Check that we don't introduce new implicit type conversions so the following pattern
+ // does not occur in the graph:
+ //
+ // <<ImplicitConv>> TypeConversion
+ // <<ExplicitConv>> TypeConversonn [<<ImplicitConv>>]
+ //
+ // That will lead to a crash because InstructionSimplifier removes implicit type conversions
+ // and during visiting TypeConversion instruction expects that its inputs have been already
+ // simplified.
+ //
+ // The structure of the following tests is
+ //
+ // (T) ((x << N) >> N) or (T) ((x << N) >>> N)
+ //
+ // where
+ // * K is a type of x
+ // * Shifts correspond to implicit type conversion K -> M
+ // * M -> T conversion is explicit
+ //
+ // T itself doesn't matter, the only important thing is that M -> T is explicit.
+ //
+ // We check cases when shifts correspond to the following implicit type conversions:
+ // byte -> byte
+ // byte -> short
+ // unsigned byte -> unsigned byte
+ // unsigned byte -> short
+ // unsigned byte -> char
+ // short -> short
+ // char -> char
+ //
+ // To produce unsigned byte bitwise AND with 0xFF is used.
+
+ /// CHECK-START: int Main.$noinline$testByteToByteToChar(byte) instruction_simplifier (before)
+ /// CHECK: <<Param:b\d+>> ParameterValue
+ /// CHECK: <<Const24:i\d+>> IntConstant 24
+ /// CHECK: <<Shl:i\d+>> Shl [<<Param>>,<<Const24>>]
+ /// CHECK: <<Shr:i\d+>> Shr [<<Shl>>,<<Const24>>]
+ /// CHECK: <<Conv:c\d+>> TypeConversion [<<Shr>>]
+ /// CHECK: <<Return:v\d+>> Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$testByteToByteToChar(byte) instruction_simplifier (after)
+ /// CHECK: <<Param:b\d+>> ParameterValue
+ /// CHECK: <<Conv:c\d+>> TypeConversion [<<Param>>]
+ /// CHECK: <<Return:v\d+>> Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$testByteToByteToChar(byte) instruction_simplifier (after)
+ /// CHECK-NOT: Shl
+
+ /// CHECK-START: int Main.$noinline$testByteToByteToChar(byte) instruction_simplifier (after)
+ /// CHECK-NOT: Shr
+ private static int $noinline$testByteToByteToChar(byte arg) {
+ return (char) ((arg << 24) >> 24);
+ }
+
+ /// CHECK-START: int Main.$noinline$testByteToShortToByte(byte) instruction_simplifier (before)
+ /// CHECK: <<Param:b\d+>> ParameterValue
+ /// CHECK: <<Const16:i\d+>> IntConstant 16
+ /// CHECK: <<Shl:i\d+>> Shl [<<Param>>,<<Const16>>]
+ /// CHECK: <<Shr:i\d+>> Shr [<<Shl>>,<<Const16>>]
+ /// CHECK: <<Conv:b\d+>> TypeConversion [<<Shr>>]
+ /// CHECK: <<Return:v\d+>> Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$testByteToShortToByte(byte) instruction_simplifier (after)
+ /// CHECK: <<Param:b\d+>> ParameterValue
+ /// CHECK: <<Return:v\d+>> Return [<<Param>>]
+
+ /// CHECK-START: int Main.$noinline$testByteToShortToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: Shl
+
+ /// CHECK-START: int Main.$noinline$testByteToShortToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: Shr
+
+ /// CHECK-START: int Main.$noinline$testByteToShortToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: TypeConversion
+ private static int $noinline$testByteToShortToByte(byte arg) {
+ return (byte) ((arg << 16) >> 16);
+ }
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToUnsignedByteToByte(byte) instruction_simplifier (before)
+ /// CHECK: <<Param:b\d+>> ParameterValue
+ /// CHECK: <<Const255:i\d+>> IntConstant 255
+ /// CHECK: <<Const24:i\d+>> IntConstant 24
+ /// CHECK: <<And:i\d+>> And [<<Param>>,<<Const255>>]
+ /// CHECK: <<Shl:i\d+>> Shl [<<And>>,<<Const24>>]
+ /// CHECK: <<UShr:i\d+>> UShr [<<Shl>>,<<Const24>>]
+ /// CHECK: <<Conv:b\d+>> TypeConversion [<<UShr>>]
+ /// CHECK: <<Return:v\d+>> Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToUnsignedByteToByte(byte) instruction_simplifier (after)
+ /// CHECK: <<Param:b\d+>> ParameterValue
+ /// CHECK: <<Return:v\d+>> Return [<<Param>>]
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToUnsignedByteToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: Shl
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToUnsignedByteToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: Shr
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToUnsignedByteToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: TypeConversion
+ private static int $noinline$testUnsignedByteToUnsignedByteToByte(byte arg) {
+ return (byte) (((arg & 0xFF) << 24) >>> 24);
+ }
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToShortToByte(byte) instruction_simplifier (before)
+ /// CHECK: <<Param:b\d+>> ParameterValue
+ /// CHECK: <<Const255:i\d+>> IntConstant 255
+ /// CHECK: <<Const16:i\d+>> IntConstant 16
+ /// CHECK: <<And:i\d+>> And [<<Param>>,<<Const255>>]
+ /// CHECK: <<Shl:i\d+>> Shl [<<And>>,<<Const16>>]
+ /// CHECK: <<Shr:i\d+>> Shr [<<Shl>>,<<Const16>>]
+ /// CHECK: <<Conv:b\d+>> TypeConversion [<<Shr>>]
+ /// CHECK: <<Return:v\d+>> Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToShortToByte(byte) instruction_simplifier (after)
+ /// CHECK: <<Param:b\d+>> ParameterValue
+ /// CHECK: <<Return:v\d+>> Return [<<Param>>]
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToShortToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: Shl
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToShortToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: Shr
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToShortToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: TypeConversion
+ private static int $noinline$testUnsignedByteToShortToByte(byte arg) {
+ return (byte) (((arg & 0xFF) << 16) >> 16);
+ }
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToCharToByte(byte) instruction_simplifier (before)
+ /// CHECK: <<Param:b\d+>> ParameterValue
+ /// CHECK: <<Const255:i\d+>> IntConstant 255
+ /// CHECK: <<Const16:i\d+>> IntConstant 16
+ /// CHECK: <<And:i\d+>> And [<<Param>>,<<Const255>>]
+ /// CHECK: <<Shl:i\d+>> Shl [<<And>>,<<Const16>>]
+ /// CHECK: <<UShr:i\d+>> UShr [<<Shl>>,<<Const16>>]
+ /// CHECK: <<Conv:b\d+>> TypeConversion [<<UShr>>]
+ /// CHECK: <<Return:v\d+>> Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToCharToByte(byte) instruction_simplifier (after)
+ /// CHECK: <<Param:b\d+>> ParameterValue
+ /// CHECK: <<Return:v\d+>> Return [<<Param>>]
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToCharToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: Shl
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToCharToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: Shr
+
+ /// CHECK-START: int Main.$noinline$testUnsignedByteToCharToByte(byte) instruction_simplifier (after)
+ /// CHECK-NOT: TypeConversion
+ private static int $noinline$testUnsignedByteToCharToByte(byte arg) {
+ return (byte) (((arg & 0xFF) << 16) >>> 16);
+ }
+
+ /// CHECK-START: int Main.$noinline$testShortToShortToByte(short) instruction_simplifier (before)
+ /// CHECK: <<Param:s\d+>> ParameterValue
+ /// CHECK: <<Const16:i\d+>> IntConstant 16
+ /// CHECK: <<Shl:i\d+>> Shl [<<Param>>,<<Const16>>]
+ /// CHECK: <<Shr:i\d+>> Shr [<<Shl>>,<<Const16>>]
+ /// CHECK: <<Conv:b\d+>> TypeConversion [<<Shr>>]
+ /// CHECK: <<Return:v\d+>> Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$testShortToShortToByte(short) instruction_simplifier (after)
+ /// CHECK: <<Param:s\d+>> ParameterValue
+ /// CHECK: <<Conv:b\d+>> TypeConversion [<<Param>>]
+ /// CHECK: <<Return:v\d+>> Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$testShortToShortToByte(short) instruction_simplifier (after)
+ /// CHECK-NOT: Shl
+
+ /// CHECK-START: int Main.$noinline$testShortToShortToByte(short) instruction_simplifier (after)
+ /// CHECK-NOT: Shr
+ private static int $noinline$testShortToShortToByte(short arg) {
+ return (byte) ((arg << 16) >> 16);
+ }
+
+ /// CHECK-START: int Main.$noinline$testCharToCharToByte(char) instruction_simplifier (before)
+ /// CHECK: <<Param:c\d+>> ParameterValue
+ /// CHECK: <<Const16:i\d+>> IntConstant 16
+ /// CHECK: <<Shl:i\d+>> Shl [<<Param>>,<<Const16>>]
+ /// CHECK: <<UShr:i\d+>> UShr [<<Shl>>,<<Const16>>]
+ /// CHECK: <<Conv:b\d+>> TypeConversion [<<UShr>>]
+ /// CHECK: <<Return:v\d+>> Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$testCharToCharToByte(char) instruction_simplifier (after)
+ /// CHECK: <<Param:c\d+>> ParameterValue
+ /// CHECK: <<Conv:b\d+>> TypeConversion [<<Param>>]
+ /// CHECK: <<Return:v\d+>> Return [<<Conv>>]
+
+ /// CHECK-START: int Main.$noinline$testCharToCharToByte(char) instruction_simplifier (after)
+ /// CHECK-NOT: Shl
+
+ /// CHECK-START: int Main.$noinline$testCharToCharToByte(char) instruction_simplifier (after)
+ /// CHECK-NOT: Shr
+ private static int $noinline$testCharToCharToByte(char arg) {
+ return (byte) ((arg << 16) >>> 16);
+ }
+
public static void main(String[] args) throws Exception {
Class smaliTests2 = Class.forName("SmaliTests2");
Method $noinline$XorAllOnes = smaliTests2.getMethod("$noinline$XorAllOnes", int.class);
@@ -4107,6 +4307,27 @@ public class Main {
$noinline$testUnsignedPromotionPatternWithDifferentShiftAmountConstants(0xaabbccdd));
assertIntEquals(0xffffffee,
$noinline$testSignedPromotionPatternWithDifferentShiftAmountConstants(0xaabbccdd));
+
+ assertIntEquals(0xffaa, $noinline$testByteToByteToChar((byte) 0xaa));
+ assertIntEquals(0x0a, $noinline$testByteToByteToChar((byte) 0x0a));
+
+ assertIntEquals(0x0a, $noinline$testByteToShortToByte((byte) 0x0a));
+ assertIntEquals(0xffffffaa, $noinline$testByteToShortToByte((byte) 0xaa));
+
+ assertIntEquals(0x0a, $noinline$testUnsignedByteToUnsignedByteToByte((byte) 0x0a));
+ assertIntEquals(0xffffffaa, $noinline$testUnsignedByteToUnsignedByteToByte((byte) 0xaa));
+
+ assertIntEquals(0x0a, $noinline$testUnsignedByteToShortToByte((byte) 0x0a));
+ assertIntEquals(0xffffffaa, $noinline$testUnsignedByteToShortToByte((byte) 0xaa));
+
+ assertIntEquals(0x0a, $noinline$testUnsignedByteToCharToByte((byte) 0x0a));
+ assertIntEquals(0xffffffaa, $noinline$testUnsignedByteToCharToByte((byte) 0xaa));
+
+ assertIntEquals(0x0b, $noinline$testShortToShortToByte((short) 0xaa0b));
+ assertIntEquals(0xffffffbb, $noinline$testShortToShortToByte((short) 0xaabb));
+
+ assertIntEquals(0x0b, $noinline$testCharToCharToByte((char) 0xaa0b));
+ assertIntEquals(0xffffffbb, $noinline$testCharToCharToByte((char) 0xaabb));
}
private static boolean $inline$true() { return true; }
diff --git a/test/557-checker-instruct-simplifier-ror/src/Main.java b/test/557-checker-instruct-simplifier-ror/src/Main.java
index 5d4bb7ab33..667b35f5d9 100644
--- a/test/557-checker-instruct-simplifier-ror/src/Main.java
+++ b/test/557-checker-instruct-simplifier-ror/src/Main.java
@@ -503,6 +503,7 @@ public class Main {
}
// (j << distance) + (j >>> -distance)
+ // We can't perform the optimization as distance might be `0`, resulting in the wrong value.
/// CHECK-START: long Main.rol_long_reg_v_negv_add(long, int) instruction_simplifier (before)
/// CHECK: <<ArgValue:j\d+>> ParameterValue
@@ -516,19 +517,17 @@ public class Main {
/// CHECK-START: long Main.rol_long_reg_v_negv_add(long, int) instruction_simplifier (after)
/// CHECK: <<ArgValue:j\d+>> ParameterValue
/// CHECK: <<ArgDistance:i\d+>> ParameterValue
- /// CHECK: <<Neg:i\d+>> Neg [<<ArgDistance>>]
- /// CHECK: <<Ror:j\d+>> Ror [<<ArgValue>>,<<Neg>>]
- /// CHECK: Return [<<Ror>>]
-
- /// CHECK-START: long Main.rol_long_reg_v_negv_add(long, int) instruction_simplifier (after)
- /// CHECK-NOT: Add
- /// CHECK-NOT: Shl
- /// CHECK-NOT: UShr
+ /// CHECK-DAG: <<Neg:i\d+>> Neg [<<ArgDistance>>]
+ /// CHECK-DAG: <<UShr:j\d+>> UShr [<<ArgValue>>,<<Neg>>]
+ /// CHECK-DAG: <<Shl:j\d+>> Shl [<<ArgValue>>,<<ArgDistance>>]
+ /// CHECK: <<Add:j\d+>> Add [<<Shl>>,<<UShr>>]
+ /// CHECK: Return [<<Add>>]
public static long rol_long_reg_v_negv_add(long value, int distance) {
return (value << distance) + (value >>> -distance);
}
// (j << distance) ^ (j >>> -distance)
+ // We can't perform the optimization as distance might be `0`, resulting in the wrong value.
/// CHECK-START: long Main.rol_long_reg_v_negv_xor(long, int) instruction_simplifier (before)
/// CHECK: <<ArgValue:j\d+>> ParameterValue
@@ -542,18 +541,60 @@ public class Main {
/// CHECK-START: long Main.rol_long_reg_v_negv_xor(long, int) instruction_simplifier (after)
/// CHECK: <<ArgValue:j\d+>> ParameterValue
/// CHECK: <<ArgDistance:i\d+>> ParameterValue
- /// CHECK: <<Neg:i\d+>> Neg [<<ArgDistance>>]
- /// CHECK: <<Ror:j\d+>> Ror [<<ArgValue>>,<<Neg>>]
- /// CHECK: Return [<<Ror>>]
+ /// CHECK-DAG: <<Neg:i\d+>> Neg [<<ArgDistance>>]
+ /// CHECK-DAG: <<UShr:j\d+>> UShr [<<ArgValue>>,<<Neg>>]
+ /// CHECK-DAG: <<Shl:j\d+>> Shl [<<ArgValue>>,<<ArgDistance>>]
+ /// CHECK: <<Xor:j\d+>> Xor [<<Shl>>,<<UShr>>]
+ /// CHECK: Return [<<Xor>>]
- /// CHECK-START: long Main.rol_long_reg_v_negv_xor(long, int) instruction_simplifier (after)
- /// CHECK-NOT: Xor
- /// CHECK-NOT: Shl
- /// CHECK-NOT: UShr
public static long rol_long_reg_v_negv_xor(long value, int distance) {
return (value << distance) ^ (value >>> -distance);
}
+ /// CHECK-START: void Main.$noinline$testDontOptimizeAddIntoRotate_Int() disassembly (after)
+ /// CHECK-NOT: Ror
+ public static void $noinline$testDontOptimizeAddIntoRotate_Int() {
+ int distance = returnFalse() ? 1 : 0;
+ int value = -512667375;
+ int expected_result = 2 * value;
+ int result = (value >>> distance) + (value << -distance);
+ assertIntEquals(expected_result, result);
+ }
+
+ /// CHECK-START: void Main.$noinline$testDontOptimizeAddIntoRotate_Long() disassembly (after)
+ /// CHECK-NOT: Ror
+ public static void $noinline$testDontOptimizeAddIntoRotate_Long() {
+ int distance = returnFalse() ? 1 : 0;
+ long value = -512667375L;
+ long expected_result = 2L * value;
+ long result = (value >>> distance) + (value << -distance);
+ assertLongEquals(expected_result, result);
+ }
+
+ /// CHECK-START: void Main.$noinline$testDontOptimizeXorIntoRotate_Int() disassembly (after)
+ /// CHECK-NOT: Ror
+ public static void $noinline$testDontOptimizeXorIntoRotate_Int() {
+ int distance = returnFalse() ? 1 : 0;
+ int value = -512667375;
+ int expected_result = 0;
+ int result = (value >>> distance) ^ (value << -distance);
+ assertIntEquals(expected_result, result);
+ }
+
+ /// CHECK-START: void Main.$noinline$testDontOptimizeXorIntoRotate_Long() disassembly (after)
+ /// CHECK-NOT: Ror
+ public static void $noinline$testDontOptimizeXorIntoRotate_Long() {
+ int distance = returnFalse() ? 1 : 0;
+ long value = -512667375L;
+ long expected_result = 0;
+ long result = (value >>> distance) ^ (value << -distance);
+ assertLongEquals(expected_result, result);
+ }
+
+ static boolean returnFalse() {
+ return false;
+ }
+
public static void main(String[] args) {
assertIntEquals(2, ror_int_constant_c_c(8));
assertIntEquals(2, ror_int_constant_c_c_0(8));
@@ -581,5 +622,11 @@ public class Main {
assertLongEquals(32L, rol_long_reg_v_negv_add(8L, 2));
assertLongEquals(32L, rol_long_reg_v_negv_xor(8L, 2));
+
+ $noinline$testDontOptimizeAddIntoRotate_Int();
+ $noinline$testDontOptimizeAddIntoRotate_Long();
+
+ $noinline$testDontOptimizeXorIntoRotate_Int();
+ $noinline$testDontOptimizeXorIntoRotate_Long();
}
}
diff --git a/test/638-checker-inline-cache-intrinsic/src/Main.java b/test/638-checker-inline-cache-intrinsic/src/Main.java
index f25d03a894..86430e6e19 100644
--- a/test/638-checker-inline-cache-intrinsic/src/Main.java
+++ b/test/638-checker-inline-cache-intrinsic/src/Main.java
@@ -36,8 +36,8 @@ public class Main {
/// CHECK: InvokeInterface method_name:java.lang.CharSequence.charAt
/// CHECK-START: char Main.$noinline$inlinePolymorphic(java.lang.CharSequence) inliner (after)
- /// CHECK: InvokeVirtual method_name:java.lang.String.charAt intrinsic:StringCharAt
- /// CHECK: Deoptimize
+ /// CHECK-DAG: InvokeVirtual method_name:java.lang.String.charAt intrinsic:StringCharAt
+ /// CHECK-DAG: Deoptimize
/// CHECK-START: char Main.$noinline$inlinePolymorphic(java.lang.CharSequence) instruction_simplifier$after_inlining (after)
/// CHECK: Deoptimize
diff --git a/test/928-jni-table/jni_table.cc b/test/928-jni-table/jni_table.cc
index 1dfe34bd6b..52cd374f8e 100644
--- a/test/928-jni-table/jni_table.cc
+++ b/test/928-jni-table/jni_table.cc
@@ -44,7 +44,6 @@ static void DoDeleteGlobalRef(JNIEnv* env, jobject o) {
CHECK(thr != nullptr);
if (env->IsInstanceOf(o, thr)) {
jvmtiThreadInfo jti;
- // b/146170834: This could cause DCHECK failures.
CHECK_EQ(jvmti_env->GetThreadInfo(reinterpret_cast<jthread>(o), &jti), JVMTI_ERROR_NONE);
}
gOriginalEnv->DeleteGlobalRef(env, o);
diff --git a/test/941-recursive-obsolete-jit/src/Main.java b/test/941-recursive-obsolete-jit/src/Main.java
index e3065a7117..f2a397e6bb 100644
--- a/test/941-recursive-obsolete-jit/src/Main.java
+++ b/test/941-recursive-obsolete-jit/src/Main.java
@@ -135,6 +135,12 @@ public class Main {
do {
// Run ensureJitCompiled here since it might get GCd
ensureJitCompiled(Transform.class, "sayHi");
+ // We want to make sure sayHi method gets deoptimized. So we cannot allow any runtime frames
+ // between sayHi and the run method where the transformation is happening. If the run method
+ // is interpreted there will be a runtime frame to transition from JIT to interpreted code.
+ // So ensure the run method is JITed too, so we don't loop for a long time in the hope of
+ // getting the run method JITed.
+ ensureJitCompiled(do_redefinition.getClass(), "run");
// Clear output.
reporter.clear();
t.sayHi(2, reporter, () -> { reporter.accept("Not doing anything here"); });
diff --git a/test/943-private-recursive-jit/src/Main.java b/test/943-private-recursive-jit/src/Main.java
index 09337bae26..9fa6607e85 100644
--- a/test/943-private-recursive-jit/src/Main.java
+++ b/test/943-private-recursive-jit/src/Main.java
@@ -151,6 +151,12 @@ public class Main {
// Run ensureJitCompiled here since it might get GCd
ensureJitCompiled(Transform.class, "sayHi");
ensureJitCompiled(Transform.class, "privateSayHi");
+ // We want to make sure sayHi method gets deoptimized. So we cannot allow any runtime frames
+ // between sayHi and the run method where the transformation is happening. If the run method
+ // is interpreted there will be a runtime frame to transition from JIT to interpreted code.
+ // So ensure the run method is JITed too, so we don't loop for a long time in the hope of
+ // getting the run method JITed.
+ ensureJitCompiled(do_redefinition.getClass(), "run");
// Clear output.
reporter.clear();
t.sayHi(2, reporter, () -> { reporter.accept("Not doing anything here"); });
diff --git a/test/Android.bp b/test/Android.bp
index 5406dfd976..1768340530 100644
--- a/test/Android.bp
+++ b/test/Android.bp
@@ -924,6 +924,7 @@ cc_defaults {
"2040-huge-native-alloc/huge_native_buf.cc",
"2048-bad-native-registry/native_finalizer.cc",
"2235-JdkUnsafeTest/unsafe_test.cc",
+ "2246-trace-v2/dump_trace.cc",
"2262-miranda-methods/jni_invoke.cc",
"2270-mh-internal-hiddenapi-use/mh-internal-hidden-api.cc",
"common/runtime_state.cc",
diff --git a/test/common/stack_inspect.cc b/test/common/stack_inspect.cc
index 1dd5f5a79d..8be355005a 100644
--- a/test/common/stack_inspect.cc
+++ b/test/common/stack_inspect.cc
@@ -82,6 +82,8 @@ static bool IsMethodInterpreted(Thread* self,
StackVisitor::WalkStack(
[&](const art::StackVisitor* stack_visitor) REQUIRES_SHARED(Locks::mutator_lock_) {
if (goal == stack_visitor->GetMethod()) {
+ // We don't deoptimize beyond a runtime frame. So if we need the method to be
+ // deoptimizeable we cannot allow the previous frame to be a runtime frame.
*method_is_interpreted =
(require_deoptable && prev_was_runtime) || stack_visitor->IsShadowFrame();
method_found = true;
diff --git a/test/knownfailures.json b/test/knownfailures.json
index a0b7dcd760..b47a1a1b35 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -1238,6 +1238,7 @@
"2238-checker-polymorphic-recursive-inlining",
"2240-tracing-non-invokable-method",
"2246-trace-stream",
+ "2246-trace-v2",
"2254-class-value-before-and-after-u",
"2261-badcleaner-in-systemcleaner",
"2263-method-trace-jit",
@@ -1571,7 +1572,7 @@
},
{
"tests": ["2271-profile-inline-cache"],
- "variant": "jit-on-first-use",
+ "variant": "jit-on-first-use | debuggable | trace | stream",
"description": ["Test relies on profiling done by baseline compiled code. Meanwhile, it",
"can't use --baseline because it has a test case checking the behavior when",
"a method is optimized compiled."]
diff --git a/test/run-test b/test/run-test
index de676a0eaa..17d5227816 100755
--- a/test/run-test
+++ b/test/run-test
@@ -113,14 +113,14 @@ if True:
tmp_dir = f"{TMPDIR}/{test_dir}"
checker = f"{progdir}/../tools/checker/checker.py"
- ON_VM = os.environ.get("ART_TEST_ON_VM")
- SSH_USER = os.environ.get("ART_TEST_SSH_USER")
- SSH_HOST = os.environ.get("ART_TEST_SSH_HOST")
- SSH_PORT = os.environ.get("ART_TEST_SSH_PORT")
- SSH_CMD = os.environ.get("ART_SSH_CMD")
- SCP_CMD = os.environ.get("ART_SCP_CMD")
- CHROOT = os.environ.get("ART_TEST_CHROOT")
- CHROOT_CMD = os.environ.get("ART_CHROOT_CMD")
+ ON_VM = env.ART_TEST_ON_VM
+ SSH_USER = env.ART_TEST_SSH_USER
+ SSH_HOST = env.ART_TEST_SSH_HOST
+ SSH_PORT = env.ART_TEST_SSH_PORT
+ SSH_CMD = env.ART_SSH_CMD
+ SCP_CMD = env.ART_SCP_CMD
+ CHROOT = env.ART_TEST_CHROOT
+ CHROOT_CMD = env.ART_CHROOT_CMD
def fail(message: str, caller:Optional[FrameInfo]=None):
caller = caller or getframeinfo(currentframe().f_back) # type: ignore
diff --git a/test/testrunner/env.py b/test/testrunner/env.py
index 200de4a97d..8313756995 100644
--- a/test/testrunner/env.py
+++ b/test/testrunner/env.py
@@ -148,4 +148,17 @@ SOONG_OUT_DIR = _get_build_var('SOONG_OUT_DIR')
ART_TEST_RUN_ON_ARM_FVP = _getEnvBoolean('ART_TEST_RUN_ON_ARM_FVP', False)
ART_TEST_ON_VM = _env.get('ART_TEST_ON_VM')
-ART_SSH_CMD = _env.get('ART_SSH_CMD')
+
+ART_TEST_SSH_PORT = _env.get('ART_TEST_SSH_PORT', 10001)
+ART_TEST_SSH_USER = _env.get('ART_TEST_SSH_USER', 'ubuntu')
+ART_TEST_SSH_HOST = _env.get('ART_TEST_SSH_HOST', 'localhost')
+ART_SSH_CMD = _env.get('ART_SSH_CMD', f"ssh -q -i ~/.ssh/ubuntu -p {ART_TEST_SSH_PORT} "
+ f"-o StrictHostKeyChecking=no "
+ f"{ART_TEST_SSH_USER}@{ART_TEST_SSH_HOST}")
+ART_SCP_CMD = _env.get('ART_SCP_CMD', f"scp -i ~/.ssh/ubuntu -P {ART_TEST_SSH_PORT} "
+ f"-o StrictHostKeyChecking=no -p -r")
+ART_CHROOT_CMD = _env.get('ART_CHROOT_CMD', "unshare --user --map-root-user chroot art-test-chroot")
+if ART_TEST_ON_VM:
+ ART_TEST_CHROOT = _env.get('ART_TEST_CHROOT', f"/home/{ART_TEST_SSH_USER}/art-test-chroot")
+else:
+ ART_TEST_CHROOT = _env.get('ART_TEST_CHROOT', "/data/local/art-test-chroot")
diff --git a/test/utils/regen-test-files b/test/utils/regen-test-files
index 36ac9ef9b1..5da8549135 100755
--- a/test/utils/regen-test-files
+++ b/test/utils/regen-test-files
@@ -216,19 +216,15 @@ known_failing_tests = frozenset([
"2243-single-step-default",
"2262-miranda-methods",
"2262-default-conflict-methods",
- # 2247-checker-write-barrier-elimination: Disabled while we investigate failures
- "2247-checker-write-barrier-elimination"
])
-# These tests are new and have not had enough post-submit runs to meet
-# pre-submit SLOs. Monitor their post-submit runs before removing them
-# from this set (in order to promote them to presubmits).
+# These ART run-tests are new and have not had enough post-submit runs
+# to meet pre-submit SLOs. Monitor their post-submit runs before
+# removing them from this set (in order to promote them to
+# presubmits).
postsubmit_only_tests = frozenset([
- "851-null-instanceof",
- "853-checker-inlining",
- "2266-checker-remove-empty-ifs",
- "2268-checker-remove-dead-phis",
- "2269-checker-constant-folding-instrinsics",
+ "2247-checker-write-barrier-elimination",
+ "2273-checker-unreachable-intrinsics",
])
known_failing_on_hwasan_tests = frozenset([
@@ -269,11 +265,33 @@ art_gtest_eng_only_module_names = [
# All supported ART gtests.
art_gtest_module_names = sorted(art_gtest_user_module_names + art_gtest_eng_only_module_names)
+# These ART gtests are new and have not had enough post-submit runs
+# to meet pre-submit SLOs. Monitor their post-submit runs before
+# removing them from this set (in order to promote them to
+# presubmits).
+art_gtest_postsubmit_only_module_names = [
+ "art_standalone_dexopt_chroot_setup_tests",
+]
+
# ART gtests supported in MTS that do not need root access to the device.
art_gtest_mts_user_module_names = copy.copy(art_gtest_user_module_names)
+# ART gtests supported in presubmits.
+art_gtest_presubmit_module_names = [t for t in art_gtest_module_names
+ if t not in art_gtest_postsubmit_only_module_names]
+
# ART gtests supported in Mainline presubmits.
-art_gtests_mainline_presubmit_module_names = copy.copy(art_gtest_module_names)
+art_gtest_mainline_presubmit_module_names = copy.copy(art_gtest_presubmit_module_names)
+
+# ART gtests supported in postsubmits.
+unknown_art_gtest_postsubmit_only_module_names = [t for t in art_gtest_postsubmit_only_module_names
+ if t not in art_gtest_module_names]
+if unknown_art_gtest_postsubmit_only_module_names:
+ logging.error(textwrap.dedent("""\
+ The following `art_gtest_postsubmit_only_module_names` elements are not part of
+ `art_gtest_module_names`: """) + str(unknown_art_gtest_postsubmit_only_module_names))
+ sys.exit(1)
+art_gtest_postsubmit_module_names = copy.copy(art_gtest_postsubmit_only_module_names)
# Tests exhibiting a flaky behavior, currently exluded from MTS for
# the stake of stability / confidence (b/209958457).
@@ -309,10 +327,24 @@ flaky_tests_excluded_from_mts = {
]
}
+# Tests excluded from all test mapping test groups.
+#
+# Example of admissible values in this dictionary:
+#
+# "art_standalone_cmdline_tests": ["CmdlineParserTest#TestCompilerOption"],
+# "art_standalone_dexopt_chroot_setup_tests": ["DexoptChrootSetupTest#HelloWorld"],
+#
+failing_tests_excluded_from_test_mapping = {
+ # Empty.
+}
+
# Tests failing because of linking issues, currently exluded from MTS
# and Mainline Presubmits to minimize noise in continuous runs while
# we investigate.
#
+# Example of admissible values in this dictionary: same as for
+# `failing_tests_excluded_from_test_mapping` (see above).
+#
# TODO(b/247108425): Address the linking issues and re-enable these
# tests.
failing_tests_excluded_from_mts_and_mainline_presubmits = {
@@ -320,6 +352,11 @@ failing_tests_excluded_from_mts_and_mainline_presubmits = {
"art_standalone_libartpalette_tests": ["PaletteClientJniTest*"],
}
+failing_tests_excluded_from_mainline_presubmits = (
+ failing_tests_excluded_from_test_mapping |
+ failing_tests_excluded_from_mts_and_mainline_presubmits
+)
+
# Is `run_test` a Checker test (i.e. a test containing Checker
# assertions)?
def is_checker_test(run_test):
@@ -634,25 +671,34 @@ class Generator:
for t in art_run_tests
if t in postsubmit_only_tests]
+ def gen_tests_dict(tests, excluded_test_cases = {}, excluded_test_modules = [], suffix = ""):
+ return [
+ ({"name": t + suffix,
+ "options": [
+ {"exclude-filter": e}
+ for e in excluded_test_cases[t]
+ ]}
+ if t in excluded_test_cases
+ else {"name": t + suffix})
+ for t in tests
+ if t not in excluded_test_modules
+ ]
+
# Mainline presubmits.
mainline_presubmit_apex_suffix = "[com.google.android.art.apex]"
mainline_other_presubmit_tests = []
mainline_presubmit_tests = (mainline_other_presubmit_tests + presubmit_run_test_module_names +
- art_gtests_mainline_presubmit_module_names)
- mainline_presubmit_tests_dict = [
- ({"name": t + mainline_presubmit_apex_suffix,
- "options": [
- {"exclude-filter": e}
- for e in failing_tests_excluded_from_mts_and_mainline_presubmits[t]
- ]}
- if t in failing_tests_excluded_from_mts_and_mainline_presubmits
- else {"name": t + mainline_presubmit_apex_suffix})
- for t in mainline_presubmit_tests
- ]
+ art_gtest_mainline_presubmit_module_names)
+ mainline_presubmit_tests_dict = \
+ gen_tests_dict(mainline_presubmit_tests,
+ failing_tests_excluded_from_mainline_presubmits,
+ [],
+ mainline_presubmit_apex_suffix)
# Android Virtualization Framework presubmits
avf_presubmit_tests = ["ComposHostTestCases"]
- avf_presubmit_tests_dict = [{"name": t} for t in avf_presubmit_tests]
+ avf_presubmit_tests_dict = gen_tests_dict(avf_presubmit_tests,
+ failing_tests_excluded_from_test_mapping)
# Presubmits.
other_presubmit_tests = [
@@ -663,14 +709,18 @@ class Generator:
"art_standalone_dexpreopt_tests",
]
presubmit_tests = (other_presubmit_tests + presubmit_run_test_module_names +
- art_gtest_module_names)
- presubmit_tests_dict = [{"name": t} for t in presubmit_tests]
- hwasan_presubmit_tests_dict = [{"name": t} for t in presubmit_tests
- if t not in known_failing_on_hwasan_tests]
+ art_gtest_presubmit_module_names)
+ presubmit_tests_dict = gen_tests_dict(presubmit_tests,
+ failing_tests_excluded_from_test_mapping)
+ hwasan_presubmit_tests_dict = gen_tests_dict(presubmit_tests,
+ failing_tests_excluded_from_test_mapping,
+ known_failing_on_hwasan_tests)
# Postsubmits.
- postsubmit_tests = postsubmit_run_test_module_names
+ postsubmit_tests = postsubmit_run_test_module_names + art_gtest_postsubmit_module_names
postsubmit_tests_dict = [{"name": t} for t in postsubmit_tests]
+ postsubmit_tests_dict = gen_tests_dict(postsubmit_tests,
+ failing_tests_excluded_from_test_mapping)
# Use an `OrderedDict` container to preserve the order in which items are inserted.
# Do not produce an entry for a test group if it is empty.
@@ -986,19 +1036,28 @@ class Generator:
expected_succeeding_tests_percentage = int(
num_expected_succeeding_tests * 100 / len(run_tests))
- mainline_presubmit_gtests_percentage = int(
- len(art_gtests_mainline_presubmit_module_names) * 100 / len(art_gtest_module_names))
+ num_gtests = len(art_gtest_module_names)
+
+ num_presubmit_gtests = len(art_gtest_presubmit_module_names)
+ presubmit_gtests_percentage = int(num_presubmit_gtests * 100 / num_gtests)
+
+ num_mainline_presubmit_gtests = len(art_gtest_mainline_presubmit_module_names)
+ mainline_presubmit_gtests_percentage = int(num_mainline_presubmit_gtests * 100 / num_gtests)
+
+ num_postsubmit_gtests = len(art_gtest_postsubmit_module_names)
+ postsubmit_gtests_percentage = int(num_postsubmit_gtests * 100 / num_gtests)
print(f"Generated TEST_MAPPING entries for {num_expected_succeeding_tests} ART run-tests out"
f" of {len(run_tests)} ({expected_succeeding_tests_percentage}%):")
for (num_tests, test_kind, tests_percentage, test_group_name) in [
(num_mainline_presubmit_run_tests, "ART run-tests", mainline_presubmit_run_tests_percentage,
"mainline-presubmit"),
- (len(art_gtests_mainline_presubmit_module_names), "ART gtests",
- mainline_presubmit_gtests_percentage, "mainline-presubmit"),
(num_presubmit_run_tests, "ART run-tests", presubmit_run_tests_percentage, "presubmit"),
(num_postsubmit_run_tests, "ART run-tests", postsubmit_run_tests_percentage, "postsubmit"),
- (len(art_gtest_module_names), "ART gtests", 100, "presubmit"),
+ (num_mainline_presubmit_gtests, "ART gtests", mainline_presubmit_gtests_percentage,
+ "mainline-presubmit"),
+ (num_presubmit_gtests, "ART gtests", presubmit_gtests_percentage, "presubmit"),
+ (num_postsubmit_gtests, "ART gtests", postsubmit_gtests_percentage, "presubmit"),
]:
print(
f" {num_tests:3d} {test_kind} ({tests_percentage}%) in `{test_group_name}` test group.")
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index ca36813840..2bcb4e6a05 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -175,6 +175,8 @@ if [[ $build_target == "yes" ]]; then
make_command+=" deapexer"
# Needed to generate the primary boot image for testing.
make_command+=" generate-boot-image"
+ # Data file needed by the `ArtExecTest.SetTaskProfiles` test.
+ make_command+=" task_profiles.json"
# Build/install the required APEXes.
make_command+=" ${apexes[*]}"
make_command+=" ${specific_targets}"
diff --git a/tools/buildbot-utils.sh b/tools/buildbot-utils.sh
index 70fa01a895..1cdd275b98 100755
--- a/tools/buildbot-utils.sh
+++ b/tools/buildbot-utils.sh
@@ -87,7 +87,7 @@ if [[ -n "$ART_TEST_ON_VM" ]]; then
export RSYNC_RSH="ssh -i ~/.ssh/ubuntu -p $ART_TEST_SSH_PORT -o StrictHostKeyChecking=no" # don't prefix with "ART_", rsync expects this name
if [[ "$TARGET_ARCH" =~ ^(arm64|riscv64)$ ]]; then
- export ART_TEST_VM_IMG="ubuntu-22.04-server-cloudimg-$TARGET_ARCH.img"
+ export ART_TEST_VM_IMG="ubuntu-23.10-server-cloudimg-$TARGET_ARCH.img"
export ART_TEST_VM_DIR="$ANDROID_BUILD_TOP/vm/$TARGET_ARCH"
export ART_TEST_VM="$ART_TEST_VM_DIR/$ART_TEST_VM_IMG"
else
diff --git a/tools/buildbot-vm.sh b/tools/buildbot-vm.sh
index 6c324c3bba..a15f9e8cbe 100755
--- a/tools/buildbot-vm.sh
+++ b/tools/buildbot-vm.sh
@@ -45,24 +45,24 @@ if [[ $action = create ]]; then
# sudo apt install qemu-system-<arch> qemu-efi cloud-image-utils
- # Get the cloud image for Ubunty 22.04 (Jammy)
- wget "http://cloud-images.ubuntu.com/releases/22.04/release/$ART_TEST_VM_IMG"
+ # Get the cloud image for Ubunty 23.10 (Mantic Minotaur)
+ wget "http://cloud-images.ubuntu.com/releases/23.10/release/$ART_TEST_VM_IMG"
if [[ "$TARGET_ARCH" = "riscv64" ]]; then
# Get U-Boot for Ubuntu 22.04 (Jammy)
get_stable_binary \
- u/u-boot/u-boot-qemu_2022.01+dfsg-2ubuntu2.3_all.deb \
+ u/u-boot/u-boot-qemu_2023.07+dfsg-1ubuntu2_all.deb \
usr/lib/u-boot/qemu-riscv64_smode/uboot.elf
# Get OpenSBI for Ubuntu 22.04 (Jammy)
get_stable_binary \
- o/opensbi/opensbi_1.3-1ubuntu0.22.04.2_all.deb \
+ o/opensbi/opensbi_1.3-1ubuntu0.23.04.2_all.deb \
usr/lib/riscv64-linux-gnu/opensbi/generic/fw_jump.elf
elif [[ "$TARGET_ARCH" = "arm64" ]]; then
- # Get EFI (ARM64) for Ubuntu 22.04 (Jammy)
+ # Get EFI (ARM64)
get_stable_binary \
- e/edk2/qemu-efi-aarch64_2022.02-3ubuntu0.22.04.1_all.deb \
+ e/edk2/qemu-efi-aarch64_2023.05-2ubuntu0.1_all.deb \
usr/share/qemu-efi-aarch64/QEMU_EFI.fd
dd if=/dev/zero of=flash0.img bs=1M count=64
@@ -111,6 +111,7 @@ elif [[ $action = boot ]]; then
-nographic \
-bios fw_jump.elf \
-kernel uboot.elf \
+ -cpu rv64,v=true,vlen=256,vext_spec=v1.0 \
-drive file="$ART_TEST_VM_IMG",if=virtio \
-drive file=user-data.img,format=raw,if=virtio \
-device virtio-net-device,netdev=usernet \
@@ -134,7 +135,7 @@ elif [[ $action = boot ]]; then
(qemu-system-aarch64 \
-m 16G \
-smp 8 \
- -cpu cortex-a57 \
+ -cpu cortex-a710,sve=on \
-M virt \
-nographic \
-drive if=none,file="$ART_TEST_VM_IMG",id=hd0 \
@@ -145,9 +146,10 @@ elif [[ $action = boot ]]; then
-device virtio-net-device,netdev=usernet \
-netdev user,id=usernet,hostfwd=tcp::$ART_TEST_SSH_PORT-:22 > $SCRIPT_DIR/boot.out &)
echo "Now listening for successful boot"
+ finish_str='.*finished at.*'
while IFS= read -d $'\0' -n 1 a ; do
line+="${a}"
- if [[ "$line" =~ '.*finished.*' ]] ; then
+ if [[ "$line" =~ $finish_str ]] ; then
echo $line
echo "VM Successfully booted!"
exit 0
diff --git a/tools/create_minidebuginfo/create_minidebuginfo.cc b/tools/create_minidebuginfo/create_minidebuginfo.cc
index 506661a377..f13b62579e 100644
--- a/tools/create_minidebuginfo/create_minidebuginfo.cc
+++ b/tools/create_minidebuginfo/create_minidebuginfo.cc
@@ -94,22 +94,20 @@ static void WriteMinidebugInfo(const std::vector<uint8_t>& input, std::vector<ui
auto* debug_frame = builder->GetDebugFrame();
debug_frame->Start();
{
- std::map<std::basic_string_view<uint8_t>, Elf_Addr> cie_dedup;
+ std::map<std::string_view, Elf_Addr> cie_dedup;
std::unordered_map<const CIE*, Elf_Addr> new_cie_offset;
std::deque<std::pair<const FDE*, const CIE*>> entries;
// Read, de-duplicate and write CIE entries. Read FDE entries.
reader.VisitDebugFrame(
[&](const CIE* cie) {
- std::basic_string_view<uint8_t> key(cie->data(), cie->size());
+ std::string_view key(reinterpret_cast<const char*>(cie->data()), cie->size());
auto it = cie_dedup.emplace(key, debug_frame->GetPosition());
if (/* inserted */ it.second) {
debug_frame->WriteFully(cie->data(), cie->size());
}
new_cie_offset[cie] = it.first->second;
},
- [&](const FDE* fde, const CIE* cie) {
- entries.emplace_back(std::make_pair(fde, cie));
- });
+ [&](const FDE* fde, const CIE* cie) { entries.emplace_back(std::make_pair(fde, cie)); });
// Sort FDE entries by opcodes to improve locality for compression (saves ~25%).
std::stable_sort(entries.begin(), entries.end(), [](const auto& lhs, const auto& rhs) {
constexpr size_t opcode_offset = sizeof(FDE);
diff --git a/tools/fuzzer/corpus/recursive_encoded_array.dex b/tools/fuzzer/corpus/recursive_encoded_array.dex
new file mode 100644
index 0000000000..c775e744cd
--- /dev/null
+++ b/tools/fuzzer/corpus/recursive_encoded_array.dex
Binary files differ
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index fc28acd22b..a3c07ccdcf 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -290,8 +290,7 @@
bug: 228441328,
names: ["test.java.lang.Math.CeilAndFloorTests#nearIntegerTests",
"test.java.time.chrono.TestEraDisplayName",
- "test.java.time.format.TestDateTimeFormatterBuilderWithLocale",
- "test.java.util.TestFormatter"]
+ "test.java.time.format.TestDateTimeFormatterBuilderWithLocale"]
},
{
description: "Fails on armv8 device",
diff --git a/tools/libcore_gcstress_debug_failures.txt b/tools/libcore_gcstress_debug_failures.txt
index 168b0ad207..a1c771107c 100644
--- a/tools/libcore_gcstress_debug_failures.txt
+++ b/tools/libcore_gcstress_debug_failures.txt
@@ -47,6 +47,9 @@
"org.apache.harmony.luni.tests.internal.net.www.protocol.http.HttpURLConnectionTest#testUsingProxy",
"org.apache.harmony.luni.tests.internal.net.www.protocol.http.HttpURLConnectionTest#testUsingProxySelector",
"org.apache.harmony.luni.tests.internal.net.www.protocol.https.HttpsURLConnectionTest#testConsequentProxyConnection",
+ "org.apache.harmony.luni.tests.internal.net.www.protocol.https.HttpsURLConnectionTest#testHttpsConnection",
+ "org.apache.harmony.luni.tests.internal.net.www.protocol.https.HttpsURLConnectionTest#testProxyAuthConnection",
+ "org.apache.harmony.luni.tests.internal.net.www.protocol.https.HttpsURLConnectionTest#testProxyAuthConnection_doOutput",
"org.apache.harmony.tests.java.lang.ref.ReferenceQueueTest#test_removeJ",
"org.apache.harmony.tests.java.lang.ProcessManagerTest#testSleep",
"org.apache.harmony.tests.java.util.TimerTest#testOverdueTaskExecutesImmediately",
@@ -63,6 +66,16 @@
]
},
{
+ description: "Failure with gcstress and debug.",
+ bug: 313922528,
+ result: ERROR,
+ modes: [host],
+ names: ["libcore.java.util.jar.OldManifestTest#test_equals",
+ "test.java.util.Collection",
+ "test.java.util.TestFormatter"
+ ]
+},
+{
description: "Time-sensitive test fails check of elapsed time with gcstress",
result: EXEC_FAILED,
bug: 205007075,
diff --git a/tools/luci/config/generated/cr-buildbucket.cfg b/tools/luci/config/generated/cr-buildbucket.cfg
index 6708ed1374..c6cdc023f8 100644
--- a/tools/luci/config/generated/cr-buildbucket.cfg
+++ b/tools/luci/config/generated/cr-buildbucket.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see BuildbucketCfg message:
-# https://luci-config.appspot.com/schemas/projects:buildbucket.cfg
+# https://config.luci.app/schemas/projects:buildbucket.cfg
buckets {
name: "ci"
@@ -23,11 +23,13 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:32"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:true"
properties_j: "device:\"angler-armv7\""
properties_j: "generational_cc:true"
+ properties_j: "product:\"arm_krait\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -51,11 +53,13 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:32"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:false"
properties_j: "device:\"angler-armv7\""
properties_j: "generational_cc:true"
+ properties_j: "product:\"arm_krait\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -80,11 +84,13 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:32"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:false"
properties_j: "debug:true"
properties_j: "device:\"angler-armv7\""
properties_j: "generational_cc:false"
+ properties_j: "product:\"arm_krait\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -108,11 +114,13 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:64"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:true"
properties_j: "device:\"angler-armv8\""
properties_j: "generational_cc:true"
+ properties_j: "product:\"armv8\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -136,11 +144,13 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:64"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:false"
properties_j: "device:\"angler-armv8\""
properties_j: "generational_cc:true"
+ properties_j: "product:\"armv8\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -165,11 +175,13 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:64"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:false"
properties_j: "debug:true"
properties_j: "device:\"angler-armv8\""
properties_j: "generational_cc:false"
+ properties_j: "product:\"armv8\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -193,12 +205,14 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:32"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:false"
properties_j: "device:\"bullhead-armv7\""
properties_j: "gcstress:true"
properties_j: "generational_cc:true"
+ properties_j: "product:\"arm_krait\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -222,12 +236,14 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:64"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:true"
properties_j: "device:\"bullhead-armv8\""
properties_j: "gcstress:true"
properties_j: "generational_cc:true"
+ properties_j: "product:\"armv8\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -251,12 +267,14 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:64"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:false"
properties_j: "device:\"bullhead-armv8\""
properties_j: "gcstress:true"
properties_j: "generational_cc:true"
+ properties_j: "product:\"armv8\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -585,6 +603,36 @@ buckets {
}
}
builders {
+ name: "qemu-armv8-ndebug"
+ swarming_host: "chromium-swarm.appspot.com"
+ dimensions: "os:Linux"
+ dimensions: "pool:luci.art.ci"
+ recipe {
+ name: "art"
+ cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
+ cipd_version: "refs/heads/main"
+ properties_j: "bitness:64"
+ properties_j: "builder_group:\"client.art\""
+ properties_j: "concurrent_collector:true"
+ properties_j: "debug:false"
+ properties_j: "device:\"qemu-armv8\""
+ properties_j: "generational_cc:true"
+ properties_j: "on_virtual_machine:true"
+ }
+ execution_timeout_secs: 108000
+ expiration_secs: 61200
+ caches {
+ name: "art"
+ path: "art"
+ }
+ build_numbers: YES
+ service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
+ experiments {
+ key: "luci.recipes.use_python3"
+ value: 100
+ }
+ }
+ builders {
name: "qemu-riscv64-ndebug"
swarming_host: "chromium-swarm.appspot.com"
dimensions: "os:Linux"
@@ -593,6 +641,7 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:64"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:false"
@@ -622,6 +671,7 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:64"
properties_j: "build_only:true"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
@@ -652,12 +702,14 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:32"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:true"
properties_j: "device:\"walleye-armv7\""
properties_j: "generational_cc:true"
properties_j: "heap_poisoning:true"
+ properties_j: "product:\"arm_krait\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -681,12 +733,14 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:64"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:true"
properties_j: "device:\"walleye-armv8\""
properties_j: "generational_cc:true"
properties_j: "heap_poisoning:true"
+ properties_j: "product:\"armv8\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
@@ -710,12 +764,14 @@ buckets {
name: "art"
cipd_package: "infra/recipe_bundles/chromium.googlesource.com/chromium/tools/build"
cipd_version: "refs/heads/main"
+ properties_j: "bitness:64"
properties_j: "builder_group:\"client.art\""
properties_j: "concurrent_collector:true"
properties_j: "debug:false"
properties_j: "device:\"walleye-armv8\""
properties_j: "generational_cc:true"
properties_j: "heap_poisoning:true"
+ properties_j: "product:\"armv8\""
}
execution_timeout_secs: 108000
expiration_secs: 61200
diff --git a/tools/luci/config/generated/luci-logdog.cfg b/tools/luci/config/generated/luci-logdog.cfg
index adc75bef49..01a391261d 100644
--- a/tools/luci/config/generated/luci-logdog.cfg
+++ b/tools/luci/config/generated/luci-logdog.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see ProjectConfig message:
-# https://luci-config.appspot.com/schemas/projects:luci-logdog.cfg
+# https://config.luci.app/schemas/projects:luci-logdog.cfg
reader_auth_groups: "all"
writer_auth_groups: "luci-logdog-chromium-writers"
diff --git a/tools/luci/config/generated/luci-milo.cfg b/tools/luci/config/generated/luci-milo.cfg
index 8863af65ce..979a5d1fb9 100644
--- a/tools/luci/config/generated/luci-milo.cfg
+++ b/tools/luci/config/generated/luci-milo.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see Project message:
-# https://luci-config.appspot.com/schemas/projects:luci-milo.cfg
+# https://config.luci.app/schemas/projects:luci-milo.cfg
consoles {
id: "luci"
diff --git a/tools/luci/config/generated/luci-notify.cfg b/tools/luci/config/generated/luci-notify.cfg
index 4456fa3778..a281a827ec 100644
--- a/tools/luci/config/generated/luci-notify.cfg
+++ b/tools/luci/config/generated/luci-notify.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see ProjectConfig message:
-# https://luci-config.appspot.com/schemas/projects:luci-notify.cfg
+# https://config.luci.app/schemas/projects:luci-notify.cfg
notifiers {
notifications {
@@ -274,6 +274,19 @@ notifiers {
}
builders {
bucket: "ci"
+ name: "qemu-armv8-ndebug"
+ }
+}
+notifiers {
+ notifications {
+ on_new_status: FAILURE
+ on_new_status: INFRA_FAILURE
+ email {
+ recipients: "art-team+chromium-buildbot@google.com"
+ }
+ }
+ builders {
+ bucket: "ci"
name: "qemu-riscv64-ndebug"
}
}
diff --git a/tools/luci/config/generated/luci-scheduler.cfg b/tools/luci/config/generated/luci-scheduler.cfg
index 291ed09677..479447321d 100644
--- a/tools/luci/config/generated/luci-scheduler.cfg
+++ b/tools/luci/config/generated/luci-scheduler.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see ProjectConfig message:
-# https://luci-config.appspot.com/schemas/projects:luci-scheduler.cfg
+# https://config.luci.app/schemas/projects:luci-scheduler.cfg
job {
id: "angler-armv7-debug"
@@ -205,6 +205,16 @@ job {
}
}
job {
+ id: "qemu-armv8-ndebug"
+ realm: "ci"
+ acl_sets: "ci"
+ buildbucket {
+ server: "cr-buildbucket.appspot.com"
+ bucket: "ci"
+ builder: "qemu-armv8-ndebug"
+ }
+}
+job {
id: "qemu-riscv64-ndebug"
realm: "ci"
acl_sets: "ci"
@@ -278,6 +288,7 @@ trigger {
triggers: "host-x86_64-ndebug"
triggers: "host-x86_64-non-gen-cc"
triggers: "host-x86_64-poison-debug"
+ triggers: "qemu-armv8-ndebug"
triggers: "qemu-riscv64-ndebug"
triggers: "qemu-riscv64-ndebug-build_only"
triggers: "walleye-armv7-poison-debug"
@@ -312,6 +323,7 @@ trigger {
triggers: "host-x86_64-ndebug"
triggers: "host-x86_64-non-gen-cc"
triggers: "host-x86_64-poison-debug"
+ triggers: "qemu-armv8-ndebug"
triggers: "qemu-riscv64-ndebug"
triggers: "qemu-riscv64-ndebug-build_only"
triggers: "walleye-armv7-poison-debug"
@@ -346,6 +358,7 @@ trigger {
triggers: "host-x86_64-ndebug"
triggers: "host-x86_64-non-gen-cc"
triggers: "host-x86_64-poison-debug"
+ triggers: "qemu-armv8-ndebug"
triggers: "qemu-riscv64-ndebug"
triggers: "qemu-riscv64-ndebug-build_only"
triggers: "walleye-armv7-poison-debug"
@@ -380,6 +393,7 @@ trigger {
triggers: "host-x86_64-ndebug"
triggers: "host-x86_64-non-gen-cc"
triggers: "host-x86_64-poison-debug"
+ triggers: "qemu-armv8-ndebug"
triggers: "qemu-riscv64-ndebug"
triggers: "qemu-riscv64-ndebug-build_only"
triggers: "walleye-armv7-poison-debug"
diff --git a/tools/luci/config/generated/project.cfg b/tools/luci/config/generated/project.cfg
index 584e84f46a..d517637b4b 100644
--- a/tools/luci/config/generated/project.cfg
+++ b/tools/luci/config/generated/project.cfg
@@ -2,12 +2,12 @@
# Do not modify manually.
#
# For the schema of this file, see ProjectCfg message:
-# https://luci-config.appspot.com/schemas/projects:project.cfg
+# https://config.luci.app/schemas/projects:project.cfg
name: "art"
access: "group:all"
lucicfg {
- version: "1.38.1"
+ version: "1.43.4"
package_dir: ".."
config_dir: "generated"
entry_point: "main.star"
diff --git a/tools/luci/config/generated/realms.cfg b/tools/luci/config/generated/realms.cfg
index 777f8685e7..c45317659a 100644
--- a/tools/luci/config/generated/realms.cfg
+++ b/tools/luci/config/generated/realms.cfg
@@ -2,7 +2,7 @@
# Do not modify manually.
#
# For the schema of this file, see RealmsCfg message:
-# https://luci-config.appspot.com/schemas/projects:realms.cfg
+# https://config.luci.app/schemas/projects:realms.cfg
realms {
name: "@root"
diff --git a/tools/luci/config/main.star b/tools/luci/config/main.star
index af746d6735..0320ce68cf 100755
--- a/tools/luci/config/main.star
+++ b/tools/luci/config/main.star
@@ -217,8 +217,10 @@ def target_builders():
short_name="dbg",
dimensions=target_dims,
properties={
+ "bitness": 32,
"device": "angler-armv7",
"debug": True,
+ "product": "arm_krait",
}
)
ci_builder(
@@ -227,10 +229,12 @@ def target_builders():
short_name="ngen",
dimensions=userfault_gc_target_dims,
properties={
+ "bitness": 32,
"device": "angler-armv7",
"debug": True,
"concurrent_collector": False,
"generational_cc": False,
+ "product": "arm_krait",
}
)
ci_builder(
@@ -239,8 +243,10 @@ def target_builders():
short_name="ndbg",
dimensions=target_dims,
properties={
+ "bitness": 32,
"device": "angler-armv7",
"debug": False,
+ "product": "arm_krait",
}
)
ci_builder(
@@ -249,8 +255,10 @@ def target_builders():
short_name="dbg",
dimensions=target_dims,
properties={
+ "bitness": 64,
"device": "angler-armv8",
"debug": True,
+ "product": "armv8",
}
)
ci_builder(
@@ -259,10 +267,12 @@ def target_builders():
short_name="ngen",
dimensions=userfault_gc_target_dims,
properties={
+ "bitness": 64,
"device": "angler-armv8",
"debug": True,
"concurrent_collector": False,
"generational_cc": False,
+ "product": "armv8",
}
)
ci_builder(
@@ -271,8 +281,10 @@ def target_builders():
short_name="ndbg",
dimensions=target_dims,
properties={
+ "bitness": 64,
"device": "angler-armv8",
"debug": False,
+ "product": "armv8",
}
)
ci_builder(
@@ -281,9 +293,11 @@ def target_builders():
short_name="dbg",
dimensions=target_dims,
properties={
+ "bitness": 32,
"device": "bullhead-armv7",
"debug": False,
"gcstress": True,
+ "product": "arm_krait",
}
)
ci_builder(
@@ -292,9 +306,11 @@ def target_builders():
short_name="dbg",
dimensions=target_dims,
properties={
+ "bitness": 64,
"device": "bullhead-armv8",
"debug": True,
"gcstress": True,
+ "product": "armv8",
}
)
ci_builder(
@@ -303,9 +319,11 @@ def target_builders():
short_name="ndbg",
dimensions=target_dims,
properties={
+ "bitness": 64,
"device": "bullhead-armv8",
"debug": False,
"gcstress": True,
+ "product": "armv8",
}
)
ci_builder(
@@ -314,9 +332,11 @@ def target_builders():
short_name="dbg",
dimensions=target_dims,
properties={
+ "bitness": 32,
"device": "walleye-armv7",
"debug": True,
"heap_poisoning": True,
+ "product": "arm_krait",
}
)
ci_builder(
@@ -325,9 +345,11 @@ def target_builders():
short_name="dbg",
dimensions=target_dims,
properties={
+ "bitness": 64,
"device": "walleye-armv8",
"debug": True,
"heap_poisoning": True,
+ "product": "armv8",
}
)
ci_builder(
@@ -336,9 +358,11 @@ def target_builders():
short_name="ndbg",
dimensions=target_dims,
properties={
+ "bitness": 64,
"device": "walleye-armv8",
"debug": False,
"heap_poisoning": True,
+ "product": "armv8",
}
)
@@ -465,12 +489,26 @@ def host_builders():
}
)
ci_builder(
+ name="qemu-armv8-ndebug",
+ category="qemu|armv8",
+ short_name="ndbg",
+ dimensions=host_dims,
+ is_fyi=True,
+ properties={
+ "bitness": 64,
+ "debug": False,
+ "device": "qemu-armv8",
+ "on_virtual_machine": True,
+ }
+ )
+ ci_builder(
name="qemu-riscv64-ndebug",
category="qemu|riscv64",
short_name="ndbg",
dimensions=host_dims,
is_fyi=True,
properties={
+ "bitness": 64,
"debug": False,
"device": "qemu-riscv64",
"on_virtual_machine": True,
@@ -482,6 +520,7 @@ def host_builders():
short_name="bo",
dimensions=host_dims,
properties={
+ "bitness": 64,
"build_only": True,
"debug": False,
"device": "qemu-riscv64",