aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTravis Geiselbrecht <geist@foobox.com>2024-05-09 19:51:32 -0700
committerTravis Geiselbrecht <geist@foobox.com>2024-05-09 19:51:32 -0700
commit339ff8995a8fdd33c5ef1ac492e627c22fee941c (patch)
tree561f10238e0b426190050535ddade50ccf306be1
parentd3cd5be13e8089d5c91df8c44b09a5ed296618b0 (diff)
downloadlk-339ff8995a8fdd33c5ef1ac492e627c22fee941c.tar.gz
[arch][barriers] add default memory barriers for all of the architectures
Most are pretty straightforward, but a few of the more esoteric architectures just defaults are implemented.
-rw-r--r--arch/arm/include/arch/arch_ops.h1
-rw-r--r--arch/m68k/include/arch/arch_ops.h2
-rw-r--r--arch/microblaze/include/arch/arch_ops.h9
-rw-r--r--arch/mips/include/arch/arch_ops.h10
-rw-r--r--arch/or1k/include/arch/arch_ops.h10
-rw-r--r--arch/x86/include/arch/arch_ops.h27
6 files changed, 58 insertions, 1 deletions
diff --git a/arch/arm/include/arch/arch_ops.h b/arch/arm/include/arch/arch_ops.h
index 73a21d23..4a2c7f8b 100644
--- a/arch/arm/include/arch/arch_ops.h
+++ b/arch/arm/include/arch/arch_ops.h
@@ -210,6 +210,7 @@ static inline void arch_set_current_thread(struct thread *t) {
#endif
+// TODO: use less strong versions of these (dsb sy/ld/st)
#define mb() DSB
#define wmb() DSB
#define rmb() DSB
diff --git a/arch/m68k/include/arch/arch_ops.h b/arch/m68k/include/arch/arch_ops.h
index 552f70d7..1e651d6c 100644
--- a/arch/m68k/include/arch/arch_ops.h
+++ b/arch/m68k/include/arch/arch_ops.h
@@ -47,7 +47,7 @@ static inline uint arch_curr_cpu_num(void) {
return 0;
}
-// TODO: see if there's a proper (or required) memory barrier on 68k
+// Default barriers for architectures that generally don't need them
#define mb() CF
#define wmb() CF
#define rmb() CF
diff --git a/arch/microblaze/include/arch/arch_ops.h b/arch/microblaze/include/arch/arch_ops.h
index 8e2d85f8..ab22f5de 100644
--- a/arch/microblaze/include/arch/arch_ops.h
+++ b/arch/microblaze/include/arch/arch_ops.h
@@ -66,3 +66,12 @@ static inline uint arch_curr_cpu_num(void) {
return 0;
}
+// Default barriers for architectures that generally don't need them
+// TODO: do we need these for microblaze?
+#define mb() CF
+#define wmb() CF
+#define rmb() CF
+#define smp_mb() CF
+#define smp_wmb() CF
+#define smp_rmb() CF
+
diff --git a/arch/mips/include/arch/arch_ops.h b/arch/mips/include/arch/arch_ops.h
index a20838a5..2e3fcb4e 100644
--- a/arch/mips/include/arch/arch_ops.h
+++ b/arch/mips/include/arch/arch_ops.h
@@ -57,3 +57,13 @@ static inline uint arch_curr_cpu_num(void) {
return 0;
}
+// Default barriers for architectures that generally don't need them
+// TODO: do we need these for mips?
+#define mb() CF
+#define wmb() CF
+#define rmb() CF
+#define smp_mb() CF
+#define smp_wmb() CF
+#define smp_rmb() CF
+
+
diff --git a/arch/or1k/include/arch/arch_ops.h b/arch/or1k/include/arch/arch_ops.h
index 32356c7b..ff8f785f 100644
--- a/arch/or1k/include/arch/arch_ops.h
+++ b/arch/or1k/include/arch/arch_ops.h
@@ -82,4 +82,14 @@ static inline ulong arch_cycle_count(void) { return 0; }
static inline uint arch_curr_cpu_num(void) {
return 0;
}
+
+// Default barriers for architectures that generally don't need them
+// TODO: do we need these for or1k?
+#define mb() CF
+#define wmb() CF
+#define rmb() CF
+#define smp_mb() CF
+#define smp_wmb() CF
+#define smp_rmb() CF
+
#endif // !ASSEMBLY
diff --git a/arch/x86/include/arch/arch_ops.h b/arch/x86/include/arch/arch_ops.h
index 65f25b46..b3092d60 100644
--- a/arch/x86/include/arch/arch_ops.h
+++ b/arch/x86/include/arch/arch_ops.h
@@ -65,4 +65,31 @@ static inline uint arch_curr_cpu_num(void) {
return 0;
}
+#if ARCH_X86_64
+// relies on SSE2
+#define mb() __asm__ volatile("mfence" : : : "memory")
+#define rmb() __asm__ volatile("lfence" : : : "memory")
+#define wmb() __asm__ volatile("sfence" : : : "memory")
+#else
+// Store to the top of the stack as a load/store barrier. Cannot
+// rely on SS2 being intrinsically available for older i386 class hardware.
+#define __storeload_barrier \
+ __asm__ volatile("lock; addl $0, (%%esp)" : : : "memory", "cc")
+#define mb() __storeload_barrier
+#define rmb() __storeload_barrier
+#define wmb() __storeload_barrier
+#endif
+
+#ifdef WITH_SMP
+// XXX probably too strict
+#define smp_mb() mb
+#define smp_rmb() rmb
+#define smp_wmb() wmb
+#else
+#define smp_mb() CF
+#define smp_wmb() CF
+#define smp_rmb() CF
+#endif
+
+
#endif // !ASSEMBLY