genode/repos/base-nova/src/base/lock/spin_lock.h
Martin Stein ec6c19a487 base: memory barriers in lock implementations
The memory barrier prevents the compiler from changing the program order
of memory accesses in such a way that accesses to the guarded resource
get outside the guarded stage. As cmpxchg() defines the start of the
guarded stage it also represents an effective memory barrier.

On x86, the architecture ensures to not reorder writes with older reads,
writes to memory with other writes (except in cases that are not
relevant for our locks), or read/write instructions with I/O
instructions, locked instructions, and serializing instructions.

However on ARM, the architectural memory model allows not only that
memory accesses take local effect in another order as their program
order but also that different observers (components that can access
memory like data-busses, TLBs and branch predictors) observe these
effects each in another order. Thus, a correct program order isn't
sufficient for a correct observation order. An additional architectural
preservation of the memory barrier is needed to achieve this.

Fixes #692
2014-11-28 12:02:34 +01:00

105 lines
2.7 KiB
C++

/*
* \brief Nova specific user land "Spin lock" implementation
* \author Alexander Boettcher
* \date 2014-02-07
*/
/*
* Copyright (C) 2014-2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _INCLUDE__BASE__LOCK__SPIN_H_
#define _INCLUDE__BASE__LOCK__SPIN_H_
/* Genode includes */
#include <cpu/atomic.h>
#include <cpu/memory_barrier.h>
#include <base/thread.h>
/* local includes */
#include <lock_helper.h>
enum State {
SPINLOCK_LOCKED = 0, SPINLOCK_UNLOCKED = 1, SPINLOCK_CONTENDED = 2,
};
enum { RESERVED_BITS = 12, COUNTER_MASK = 0xFFC };
template <typename T>
static inline void spinlock_lock(volatile T *lock_variable)
{
using Genode::cmpxchg;
Genode::Thread_base * myself = Genode::Thread_base::myself();
T const tid = myself ? myself->tid().ec_sel : Nova::PT_SEL_MAIN_EC;
unsigned help_counter = 0;
/* sanity check that ec_sel fits into the lock_variable */
if (tid >= (1 << (sizeof(*lock_variable) * 8 - RESERVED_BITS)))
nova_die();
if (myself) {
Nova::Utcb * utcb = reinterpret_cast<Nova::Utcb *>(myself->utcb());
help_counter = utcb->tls & COUNTER_MASK;
}
/* try to get lock */
do {
T raw = *lock_variable;
if (raw != SPINLOCK_UNLOCKED) {
if (!(raw & SPINLOCK_CONTENDED))
/* if it fails - just re-read and retry */
if (!Genode::cmpxchg(lock_variable, raw, raw | SPINLOCK_CONTENDED))
continue;
/*
* Donate remaining time slice to help the spinlock holder to
* pass the critical section.
*/
unsigned long const ec = raw >> RESERVED_BITS;
unsigned long const tls = raw & COUNTER_MASK;
Nova::ec_ctrl(Nova::EC_DONATE_SC, ec, tls);
continue;
}
} while (!cmpxchg(lock_variable, (T)SPINLOCK_UNLOCKED,
(tid << RESERVED_BITS) | help_counter | SPINLOCK_LOCKED));
}
template <typename T>
static inline void spinlock_unlock(volatile T *lock_variable)
{
using Nova::Utcb;
Genode::Thread_base * myself = Genode::Thread_base::myself();
Utcb * utcb = myself ? reinterpret_cast<Utcb *>(myself->utcb()) : 0;
/* unlock */
T old;
do {
old = *lock_variable;
} while (!Genode::cmpxchg(lock_variable, old, (T)SPINLOCK_UNLOCKED));
/* de-flag time donation help request and set new counter */
if (utcb) {
utcb->tls = (((utcb->tls & COUNTER_MASK) + 4) % 4096) & COUNTER_MASK;
/* take care that compiler generates code that writes tls to memory */
Genode::memory_barrier();
}
/*
* If anybody donated time, request kernel for a re-schedule in order that
* the helper can get its time donation (SC) back.
*/
if (old & SPINLOCK_CONTENDED)
Nova::ec_ctrl(Nova::EC_RESCHEDULE);
}
#endif /* _INCLUDE__BASE__LOCK__SPIN_H_ */