From 5eb26295f38f74a4b8fb76e4c416249b2ec2b808 Mon Sep 17 00:00:00 2001 From: Chris Masone Date: Tue, 27 Feb 2024 20:03:06 +0000 Subject: Implement Sequence Lock for RISC-V Using the same std::atomic_thread_fence approach used on x86_64 should be adequate, at least for now. Bug: b/327075387 Test: atest --host gfxstream_base_tests Change-Id: I80db19ae5647bfa43ce4b9514280ab188909e75c --- base/include/aemu/base/synchronization/Lock.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/base/include/aemu/base/synchronization/Lock.h b/base/include/aemu/base/synchronization/Lock.h index 2b88f05..281a970 100644 --- a/base/include/aemu/base/synchronization/Lock.h +++ b/base/include/aemu/base/synchronization/Lock.h @@ -237,6 +237,8 @@ static inline __attribute__((always_inline)) void SmpWmb() { asm volatile("dmb ishst" ::: "memory"); #elif defined(__x86_64__) std::atomic_thread_fence(std::memory_order_release); +#elif defined(__riscv) && (__riscv_xlen == 64) + std::atomic_thread_fence(std::memory_order_release); #else #error "Unimplemented SmpWmb for current CPU architecture" #endif @@ -247,6 +249,8 @@ static inline __attribute__((always_inline)) void SmpRmb() { asm volatile("dmb ishld" ::: "memory"); #elif defined(__x86_64__) std::atomic_thread_fence(std::memory_order_acquire); +#elif defined(__riscv) && (__riscv_xlen == 64) + std::atomic_thread_fence(std::memory_order_acquire); #else #error "Unimplemented SmpRmb for current CPU architecture" #endif -- cgit v1.2.3