hw: implement power-saving kernel lock for ARM smp

Thanks to former work of Martin Stein this commit finally incorporates a
non-spinning kernel lock on multi-core ARM platforms.

Fix #1313
This commit is contained in:
Stefan Kalkowski 2019-03-13 14:58:23 +01:00 committed by Norman Feske
parent b04a70177b
commit 3725e91603
14 changed files with 114 additions and 64 deletions

View File

@ -58,7 +58,6 @@ SRC_CC += kernel/init.cc
SRC_CC += kernel/ipc_node.cc
SRC_CC += kernel/irq.cc
SRC_CC += kernel/kernel.cc
SRC_CC += kernel/lock.cc
SRC_CC += kernel/object.cc
SRC_CC += kernel/signal_receiver.cc
SRC_CC += kernel/thread.cc

View File

@ -12,6 +12,7 @@ INC_DIR += $(BASE_DIR)/../base-hw/src/core/spec/arm_v6
SRC_CC += spec/arm_v6/perf_counter.cc
SRC_CC += kernel/vm_thread_off.cc
SRC_CC += kernel/cpu_up.cc
SRC_CC += kernel/lock.cc
SRC_S += spec/arm/vfpv2.s

View File

@ -11,6 +11,7 @@ INC_DIR += $(BASE_DIR)/../base-hw/src/core/spec/arm_gic
# add C++ sources
SRC_CC += spec/cortex_a15/cpu.cc
SRC_CC += kernel/cpu_mp.cc
SRC_CC += spec/arm/kernel/lock.cc
# include less specific configuration
include $(BASE_DIR)/../base-hw/lib/mk/spec/arm_v7/core-hw.inc

View File

@ -10,6 +10,7 @@ INC_DIR += $(BASE_DIR)/../base-hw/src/core/spec/cortex_a8
# add C++ sources
SRC_CC += spec/cortex_a8/cpu.cc
SRC_CC += kernel/cpu_up.cc
SRC_CC += kernel/lock.cc
NR_OF_CPUS = 1

View File

@ -12,6 +12,7 @@ INC_DIR += $(BASE_DIR)/../base-hw/src/core/spec/arm_gic
SRC_CC += spec/cortex_a9/board.cc
SRC_CC += spec/cortex_a9/timer.cc
SRC_CC += spec/arm_gic/pic.cc
SRC_CC += spec/arm/kernel/lock.cc
SRC_CC += kernel/vm_thread_off.cc
SRC_CC += kernel/cpu_mp.cc
SRC_CC += kernel/kernel.cc

View File

@ -20,6 +20,7 @@ SRC_S += spec/x86_64/exception_vector.s
# add C++ sources
SRC_CC += kernel/cpu_up.cc
SRC_CC += kernel/vm_thread_on.cc
SRC_CC += kernel/lock.cc
SRC_CC += spec/x86/io_port_session_component.cc
SRC_CC += spec/x86/io_port_session_support.cc
SRC_CC += spec/x86_64/bios_data_area.cc

View File

@ -12,6 +12,7 @@ CC_OPT += -fno-delete-null-pointer-checks
SRC_CC += platform_services.cc
SRC_CC += kernel/vm_thread_off.cc
SRC_CC += kernel/cpu_up.cc
SRC_CC += kernel/lock.cc
SRC_CC += spec/riscv/cpu.cc
SRC_CC += spec/riscv/kernel/thread.cc
SRC_CC += spec/riscv/kernel/cpu.cc

View File

@ -15,6 +15,7 @@ SRC_S += spec/x86_64/exception_vector.s
# add C++ sources
SRC_CC += kernel/cpu_mp.cc
SRC_CC += kernel/vm_thread_off.cc
SRC_CC += kernel/lock.cc
SRC_CC += spec/x86_64/pic.cc
SRC_CC += spec/x86_64/timer.cc
SRC_CC += spec/x86_64/kernel/thread_exception.cc

View File

@ -12,8 +12,10 @@
*/
#include <base/log.h>
#include <cpu/atomic.h>
#include <cpu/memory_barrier.h>
#include <hw/spec/arm/cortex_a9.h>
#include <hw/spin_lock.h>
#include <util/mmio.h>
#include <platform.h>
@ -29,20 +31,24 @@ class Cpu_counter
{
private:
Hw::Spin_lock _lock { };
volatile int _value = 0;
enum State { UNLOCKED, LOCKED };
State volatile _locked { UNLOCKED };
unsigned volatile _counter { 0 };
public:
void inc()
{
Hw::Spin_lock::Guard guard(_lock);
while (!Genode::cmpxchg((volatile int*)&_locked, UNLOCKED, LOCKED))
;
_counter++;
Genode::memory_barrier();
_value++;
_locked = UNLOCKED;
}
void wait_for(int const v) {
while (_value < v) ; }
void wait_for(unsigned const v) {
while (_counter < v) ; }
};

View File

@ -11,6 +11,10 @@
* under the terms of the GNU Affero General Public License version 3.
*/
#include <base/lock_guard.h>
#include <cpu/atomic.h>
#include <cpu/memory_barrier.h>
#include <kernel/cpu.h>
#include <kernel/lock.h>
#include <kernel/kernel.h>
@ -29,9 +33,10 @@ void Kernel::Lock::lock()
/* at least print an error message */
Genode::raw("Cpu ", _current_cpu,
" error: re-entered lock. Kernel exception?!");
for (;;) ;
}
_lock.lock();
while (!Genode::cmpxchg((volatile int*)&_locked, UNLOCKED, LOCKED)) { ; }
_current_cpu = Cpu::executing_id();
}
@ -39,5 +44,7 @@ void Kernel::Lock::lock()
void Kernel::Lock::unlock()
{
_current_cpu = INVALID;
_lock.unlock();
Genode::memory_barrier();
_locked = UNLOCKED;
}

View File

@ -15,8 +15,6 @@
#ifndef _CORE__SPEC__SMP__KERNEL__LOCK_H_
#define _CORE__SPEC__SMP__KERNEL__LOCK_H_
#include <hw/spin_lock.h>
namespace Kernel
{
class Lock;
@ -31,8 +29,10 @@ class Kernel::Lock
enum { INVALID = ~0U };
Hw::Spin_lock _lock { };
volatile unsigned _current_cpu { INVALID };
enum State { UNLOCKED, LOCKED };
State volatile _locked { UNLOCKED };
unsigned volatile _current_cpu { INVALID };
public:

View File

@ -0,0 +1,50 @@
/*
* \brief Kernel lock for multi-processor systems
* \author Stefan Kalkowski
* \date 2018-11-20
*/
/*
* Copyright (C) 2019 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#include <base/lock_guard.h>
#include <cpu/atomic.h>
#include <cpu/memory_barrier.h>
#include <kernel/cpu.h>
#include <kernel/lock.h>
#include <kernel/kernel.h>
Kernel::Lock & Kernel::data_lock()
{
static Kernel::Lock lock;
return lock;
}
void Kernel::Lock::lock()
{
/* check for the lock holder being the same cpu */
if (_current_cpu == Cpu::executing_id()) {
/* at least print an error message */
Genode::raw("Cpu ", _current_cpu,
" error: re-entered lock. Kernel exception?!");
}
Cpu::wait_for_xchg(&_locked, LOCKED, UNLOCKED);
_current_cpu = Cpu::executing_id();
}
void Kernel::Lock::unlock()
{
_current_cpu = INVALID;
Genode::memory_barrier();
_locked = UNLOCKED;
Cpu::wakeup_waiting_cpus();
}

View File

@ -265,6 +265,36 @@ struct Hw::Arm_cpu
asm volatile("dsb\n"
"isb\n");
}
static inline void wait_for_xchg(volatile void * addr,
unsigned long new_value,
unsigned long expected_value)
{
asm volatile(
/* check if load value of 'addr' is as expected */
"1: ldrex r7, [%0] \n"
"cmp r7, %2 \n"
/* if not, wait for other CPU to send us an event */
"wfene \n"
/* if yes, attempt to write 'new_value' to 'addr' */
"strexeq r7, %1, [%0] \n"
/* if write failed, restart */
"cmpeq r7, #0 \n"
"bne 1b \n"
"dmb \n"
:: "r"(addr), "r"(new_value), "r"(expected_value) : "cc", "r7");
}
static inline void wakeup_waiting_cpus()
{
asm volatile(
"dsb \n"
"sev \n"
);
}
};
#endif /* _SRC__LIB__HW__SPEC__ARM__CPU_H_ */

View File

@ -1,49 +0,0 @@
/*
* \brief Spin lock used to synchronize different CPU cores
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2012-11-30
*/
/*
* Copyright (C) 2012-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _SRC__LIB__HW__SPIN_LOCK_H_
#define _SRC__LIB__HW__SPIN_LOCK_H_
#include <base/lock_guard.h>
#include <cpu/atomic.h>
#include <cpu/memory_barrier.h>
namespace Hw { class Spin_lock; }
class Hw::Spin_lock
{
private:
enum State { UNLOCKED, LOCKED };
State volatile _locked = UNLOCKED;
public:
void lock()
{
while (!Genode::cmpxchg((volatile int*)&_locked, UNLOCKED, LOCKED))
;
}
void unlock()
{
Genode::memory_barrier();
_locked = UNLOCKED;
}
using Guard = Genode::Lock_guard<Spin_lock>;
};
#endif /* _SRC__LIB__HW__SPIN_LOCK_H_ */