diff --git a/repos/base-fiasco/src/base/lock/lock.cc b/repos/base-fiasco/src/base/lock/lock.cc index 9f3952c3c..8cb6036d9 100644 --- a/repos/base-fiasco/src/base/lock/lock.cc +++ b/repos/base-fiasco/src/base/lock/lock.cc @@ -14,6 +14,7 @@ /* Genode includes */ #include #include +#include #include /* L4/Fiasco includes */ @@ -46,5 +47,6 @@ void Cancelable_lock::lock() void Cancelable_lock::unlock() { + Genode::memory_barrier(); _lock = UNLOCKED; } diff --git a/repos/base-hw/src/core/include/kernel/pd.h b/repos/base-hw/src/core/include/kernel/pd.h index 7ac8517a8..dd0c9241b 100644 --- a/repos/base-hw/src/core/include/kernel/pd.h +++ b/repos/base-hw/src/core/include/kernel/pd.h @@ -17,6 +17,7 @@ /* Genode includes */ #include +#include /* core includes */ #include @@ -49,11 +50,6 @@ class Kernel::Lock int volatile _locked; - /** - * Finish all previously started memory transactions - */ - void _memory_barrier() { asm volatile ("" : : : "memory"); } - public: Lock() : _locked(0) { } @@ -68,7 +64,7 @@ class Kernel::Lock */ void unlock() { - _memory_barrier(); + Genode::memory_barrier(); _locked = 0; } diff --git a/repos/base-nova/src/base/lock/spin_lock.h b/repos/base-nova/src/base/lock/spin_lock.h index eec3b3dd7..8ebf1244f 100644 --- a/repos/base-nova/src/base/lock/spin_lock.h +++ b/repos/base-nova/src/base/lock/spin_lock.h @@ -16,6 +16,7 @@ /* Genode includes */ #include +#include #include /* local includes */ @@ -89,7 +90,7 @@ static inline void spinlock_unlock(volatile T *lock_variable) if (utcb) { utcb->tls = (((utcb->tls & COUNTER_MASK) + 4) % 4096) & COUNTER_MASK; /* take care that compiler generates code that writes tls to memory */ - asm volatile ("":::"memory"); + Genode::memory_barrier(); } /* diff --git a/repos/base/include/arm/cpu/atomic.h b/repos/base/include/arm/cpu/atomic.h index f74033e4e..e9d533837 100644 --- a/repos/base/include/arm/cpu/atomic.h +++ b/repos/base/include/arm/cpu/atomic.h @@ -15,6 +15,8 @@ #ifndef _INCLUDE__ARM__CPU__ATOMIC_H_ #define _INCLUDE__ARM__CPU__ATOMIC_H_ +#include + namespace Genode { /** @@ -25,6 +27,8 @@ namespace Genode { * both values are different, the value at dest remains * unchanged. * + * Note, that cmpxchg() represents a memory barrier. + * * \return 1 if the value was successfully changed to new_val, * 0 if cmp_val and the value at dest differ. */ @@ -47,6 +51,7 @@ namespace Genode { : "=&r" (not_exclusive), "=&r" (equal) : "r" (dest), "r" (cmp_val), "r" (new_val) : "cc"); + Genode::memory_barrier(); return equal && !not_exclusive; } } diff --git a/repos/base/include/arm_v5/cpu/memory_barrier.h b/repos/base/include/arm_v5/cpu/memory_barrier.h new file mode 100644 index 000000000..cf37c854b --- /dev/null +++ b/repos/base/include/arm_v5/cpu/memory_barrier.h @@ -0,0 +1,32 @@ +/* + * \brief Memory barrier + * \author Martin Stein + * \date 2014-11-12 + * + * The memory barrier prevents memory accesses from being reordered in such a + * way that accesses to the guarded resource get outside the guarded stage. As + * cmpxchg() defines the start of the guarded stage it also represents an + * effective memory barrier. + * + * Note, we assume a compiler-memory barrier is sufficient on ARMv5. + */ + +/* + * Copyright (C) 2014 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU General Public License version 2. + */ + +#ifndef _INCLUDE__ARM_V5__CPU__MEMORY_BARRIER_H_ +#define _INCLUDE__ARM_V5__CPU__MEMORY_BARRIER_H_ + +namespace Genode { + + static inline void memory_barrier() + { + asm volatile ("" ::: "memory"); + } +} + +#endif /* _INCLUDE__ARM_V5__CPU__MEMORY_BARRIER_H_ */ diff --git a/repos/base/include/arm_v6/cpu/memory_barrier.h b/repos/base/include/arm_v6/cpu/memory_barrier.h new file mode 100644 index 000000000..5ad06bace --- /dev/null +++ b/repos/base/include/arm_v6/cpu/memory_barrier.h @@ -0,0 +1,38 @@ +/* + * \brief Memory barrier + * \author Martin Stein + * \date 2014-11-12 + * + * The memory barrier prevents memory accesses from being reordered in such a + * way that accesses to the guarded resource get outside the guarded stage. As + * cmpxchg() defines the start of the guarded stage it also represents an + * effective memory barrier. + * + * On ARM, the architectural memory model allows not only that memory accesses + * take local effect in another order as their program order but also that + * different observers (components that can access memory like data-busses, + * TLBs and branch predictors) observe these effects each in another order. + * Thus, achieving a correct program order via a compiler memory-barrier isn't + * sufficient for a correct observation order. An additional architectural + * preservation of the memory barrier is needed. + */ + +/* + * Copyright (C) 2014 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU General Public License version 2. + */ + +#ifndef _INCLUDE__ARM_V6__CPU__MEMORY_BARRIER_H_ +#define _INCLUDE__ARM_V6__CPU__MEMORY_BARRIER_H_ + +namespace Genode { + + static inline void memory_barrier() + { + asm volatile ("mcr p15, 0, r0, c7, c10, 5" ::: "memory"); + } +} + +#endif /* _INCLUDE__ARM_V6__CPU__MEMORY_BARRIER_H_ */ diff --git a/repos/base/include/arm_v7/cpu/memory_barrier.h b/repos/base/include/arm_v7/cpu/memory_barrier.h new file mode 100644 index 000000000..dc01364b5 --- /dev/null +++ b/repos/base/include/arm_v7/cpu/memory_barrier.h @@ -0,0 +1,38 @@ +/* + * \brief Memory barrier + * \author Martin Stein + * \date 2014-11-12 + * + * The memory barrier prevents memory accesses from being reordered in such a + * way that accesses to the guarded resource get outside the guarded stage. As + * cmpxchg() defines the start of the guarded stage it also represents an + * effective memory barrier. + * + * On ARM, the architectural memory model allows not only that memory accesses + * take local effect in another order as their program order but also that + * different observers (components that can access memory like data-busses, + * TLBs and branch predictors) observe these effects each in another order. + * Thus, achieving a correct program order via a compiler memory-barrier isn't + * sufficient for a correct observation order. An additional architectural + * preservation of the memory barrier is needed. + */ + +/* + * Copyright (C) 2014 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU General Public License version 2. + */ + +#ifndef _INCLUDE__ARM_V7__CPU__MEMORY_BARRIER_H_ +#define _INCLUDE__ARM_V7__CPU__MEMORY_BARRIER_H_ + +namespace Genode { + + static inline void memory_barrier() + { + asm volatile ("dmb" ::: "memory"); + } +} + +#endif /* _INCLUDE__ARM_V7__CPU__MEMORY_BARRIER_H_ */ diff --git a/repos/base/include/x86/cpu/atomic.h b/repos/base/include/x86/cpu/atomic.h index fc8df5f69..6cc4a8fc8 100644 --- a/repos/base/include/x86/cpu/atomic.h +++ b/repos/base/include/x86/cpu/atomic.h @@ -26,6 +26,8 @@ namespace Genode { * both values are different, the value at dest remains * unchanged. * + * Note, that cmpxchg() represents a memory barrier. + * * \return 1 if the value was successfully changed to new_val, * 0 if cmp_val and the value at dest differ. */ diff --git a/repos/base/include/x86/cpu/memory_barrier.h b/repos/base/include/x86/cpu/memory_barrier.h new file mode 100644 index 000000000..ff5eb0174 --- /dev/null +++ b/repos/base/include/x86/cpu/memory_barrier.h @@ -0,0 +1,36 @@ +/* + * \brief Memory barrier + * \author Martin Stein + * \date 2014-11-12 + * + * The memory barrier prevents memory accesses from being reordered in such a + * way that accesses to the guarded resource get outside the guarded stage. As + * cmpxchg() defines the start of the guarded stage it also represents an + * effective memory barrier. + * + * On x86, the architecture ensures to not reorder writes with older reads, + * writes to memory with other writes (except in cases that are not relevant + * for our locks), or read/write instructions with I/O instructions, locked + * instructions, and serializing instructions. Thus, a compiler memory-barrier + * is sufficient. + */ + +/* + * Copyright (C) 2014 Genode Labs GmbH + * + * This file is part of the Genode OS framework, which is distributed + * under the terms of the GNU General Public License version 2. + */ + +#ifndef _INCLUDE__X86__CPU__MEMORY_BARRIER_H_ +#define _INCLUDE__X86__CPU__MEMORY_BARRIER_H_ + +namespace Genode { + + static inline void memory_barrier() + { + asm volatile ("" ::: "memory"); + } +} + +#endif /* _INCLUDE__X86__CPU__MEMORY_BARRIER_H_ */ diff --git a/repos/base/mk/spec-arm_v5.mk b/repos/base/mk/spec-arm_v5.mk index e6142429f..dfe46685e 100644 --- a/repos/base/mk/spec-arm_v5.mk +++ b/repos/base/mk/spec-arm_v5.mk @@ -1,5 +1,8 @@ SPECS += arm +# add repository relative include paths +REP_INC_DIR += include/arm_v5 + # # Configure target CPU # diff --git a/repos/base/src/base/lock/lock.cc b/repos/base/src/base/lock/lock.cc index 673c31b8d..5b6c3e080 100644 --- a/repos/base/src/base/lock/lock.cc +++ b/repos/base/src/base/lock/lock.cc @@ -13,6 +13,7 @@ /* Genode includes */ #include +#include /* local includes */ #include diff --git a/repos/base/src/base/lock/spin_lock.h b/repos/base/src/base/lock/spin_lock.h index 705dc58a5..ff1cbf793 100644 --- a/repos/base/src/base/lock/spin_lock.h +++ b/repos/base/src/base/lock/spin_lock.h @@ -17,6 +17,7 @@ /* Genode includes */ #include +#include /* local includes */ #include @@ -29,7 +30,6 @@ enum State { SPINLOCK_LOCKED, SPINLOCK_UNLOCKED }; -static inline void memory_barrier() { asm volatile ("" : : : "memory"); } static inline void spinlock_lock(volatile int *lock_variable) { @@ -46,7 +46,7 @@ static inline void spinlock_lock(volatile int *lock_variable) static inline void spinlock_unlock(volatile int *lock_variable) { /* make sure all got written by compiler before releasing lock */ - memory_barrier(); + Genode::memory_barrier(); *lock_variable = SPINLOCK_UNLOCKED; }