base: use Mutex/Blockade

Issue #3612
This commit is contained in:
Alexander Boettcher 2020-02-19 16:26:40 +01:00 committed by Christian Helmuth
parent e87d60ddf7
commit 3956530634
50 changed files with 370 additions and 374 deletions

View File

@ -79,11 +79,11 @@ void Signal_receiver::_platform_begin_dissolve(Signal_context * const c)
{
/**
* Mark the Signal_context as already pending to prevent the receiver
* from taking the lock, and set an invalid context to prevent further
* from taking the mutex, and set an invalid context to prevent further
* processing
*/
{
Lock::Guard context_guard(c->_lock);
Mutex::Guard context_guard(c->_mutex);
c->_pending = true;
c->_curr_signal = Signal::Data(nullptr, 0);
}
@ -96,8 +96,8 @@ void Signal_receiver::_platform_finish_dissolve(Signal_context *) { }
Signal_context_capability Signal_receiver::manage(Signal_context * const c)
{
/* ensure that the context isn't managed already */
Lock::Guard contexts_guard(_contexts_lock);
Lock::Guard context_guard(c->_lock);
Mutex::Guard contexts_guard(_contexts_mutex);
Mutex::Guard context_guard(c->_mutex);
if (c->_receiver) { throw Context_already_in_use(); }
for (;;) {
@ -137,11 +137,11 @@ void Signal_receiver::block_for_signal()
/**
* Check for the signal being pending already to prevent a dead-lock
* when the context is in destruction, and its lock is held
* when the context is in destruction, and its mutex is held
*/
if (!context->_pending) {
/* update signal context */
Lock::Guard lock_guard(context->_lock);
Mutex::Guard context_guard(context->_mutex);
unsigned const num = context->_curr_signal.num + data->num;
context->_pending = true;
context->_curr_signal = Signal::Data(context, num);
@ -154,7 +154,7 @@ void Signal_receiver::block_for_signal()
Signal Signal_receiver::pending_signal()
{
Lock::Guard contexts_lock_guard(_contexts_lock);
Mutex::Guard contexts_guard(_contexts_mutex);
Signal::Data result;
_contexts.for_each_locked([&] (Signal_context &context) {
@ -169,7 +169,7 @@ Signal Signal_receiver::pending_signal()
throw Context_ring::Break_for_each();
});
if (result.context) {
Lock::Guard lock_guard(result.context->_lock);
Mutex::Guard context_guard(result.context->_mutex);
if (result.num == 0)
warning("returning signal with num == 0");
@ -188,7 +188,7 @@ Signal Signal_receiver::pending_signal()
{
/* update signal context */
Lock::Guard lock_guard(context->_lock);
Mutex::Guard context_guard(context->_mutex);
context->_pending = false;
context->_curr_signal = Signal::Data(context, data->num);
result = context->_curr_signal;

View File

@ -64,7 +64,7 @@ Untyped_capability Rpc_entrypoint::_manage(Rpc_object_base *obj)
}
static void cleanup_call(Rpc_object_base *obj, Nova::Utcb * ep_utcb,
Native_capability &cap, Genode::Lock &delay_start)
Native_capability &cap, Genode::Blockade &delay_start)
{
/* effectively invalidate the capability used before */
@ -85,7 +85,7 @@ static void cleanup_call(Rpc_object_base *obj, Nova::Utcb * ep_utcb,
return;
/* activate entrypoint now - otherwise cleanup call will block forever */
delay_start.unlock();
delay_start.wakeup();
/* make a IPC to ensure that cap() identifier is not used anymore */
utcb->msg()[0] = 0xdead;
@ -159,10 +159,11 @@ void Rpc_entrypoint::_activation_entry()
ep._rcv_buf.reset();
reply(utcb, exc, ep._snd_buf);
}
{
/* potentially delay start */
Lock::Guard lock_guard(ep._delay_start);
}
/* delay start */
ep._delay_start.block();
/* XXX inadequate usage of Blockade here is planned to be removed, see #3612 */
ep._delay_start.wakeup();
/* atomically lookup and lock referenced object */
auto lambda = [&] (Rpc_object_base *obj) {
@ -207,7 +208,7 @@ void Rpc_entrypoint::activate()
* called, we grab the '_delay_start' lock on construction and release it
* here.
*/
_delay_start.unlock();
_delay_start.wakeup();
}
@ -222,7 +223,6 @@ Rpc_entrypoint::Rpc_entrypoint(Pd_session *pd_session, size_t stack_size,
Affinity::Location location)
:
Thread(Cpu_session::Weight::DEFAULT_WEIGHT, name, stack_size, location),
_delay_start(Lock::LOCKED),
_pd_session(*pd_session)
{
/* set magic value evaluated by thread_nova.cc to start a local thread */

View File

@ -14,7 +14,7 @@
#ifndef _INCLUDE__BASE__ALARM_H_
#define _INCLUDE__BASE__ALARM_H_
#include <base/lock.h>
#include <base/mutex.h>
namespace Genode {
class Alarm_scheduler;
@ -41,11 +41,11 @@ class Genode::Alarm
bool is_pending_at(uint64_t time, bool time_period) const;
};
Lock _dispatch_lock { }; /* taken during handle method */
Raw _raw { };
int _active { 0 }; /* set to one when active */
Alarm *_next { nullptr }; /* next alarm in alarm list */
Alarm_scheduler *_scheduler { nullptr }; /* currently assigned scheduler */
Mutex _dispatch_mutex { }; /* taken during handle method */
Raw _raw { };
int _active { 0 }; /* set to one when active */
Alarm *_next { nullptr }; /* next alarm in alarm list */
Alarm_scheduler *_scheduler { nullptr }; /* currently assigned scheduler */
void _assign(Time period,
Time deadline,
@ -90,7 +90,7 @@ class Genode::Alarm_scheduler
{
private:
Lock _lock { }; /* protect alarm list */
Mutex _mutex { }; /* protect alarm list */
Alarm *_head { nullptr }; /* head of alarm list */
Alarm::Time _now { 0UL }; /* recent time (updated by handle method) */
bool _now_period { false };

View File

@ -17,7 +17,7 @@
#include <base/rpc_server.h>
#include <base/heap.h>
#include <base/service.h>
#include <base/lock.h>
#include <base/mutex.h>
#include <base/local_connection.h>
#include <base/quota_guard.h>
#include <util/arg_string.h>
@ -296,8 +296,8 @@ class Genode::Child : protected Rpc_object<Parent>,
Signal_context_capability _heartbeat_sigh { };
/* arguments fetched by the child in response to a yield signal */
Lock _yield_request_lock { };
Resource_args _yield_request_args { };
Mutex _yield_request_mutex { };
Resource_args _yield_request_args { };
/* number of unanswered heartbeat signals */
unsigned _outstanding_heartbeats = 0;

View File

@ -92,7 +92,7 @@ class Genode::Entrypoint : Noncopyable
Reconstructible<Signal_receiver> _sig_rec { };
Lock _deferred_signals_mutex { };
Mutex _deferred_signals_mutex { };
List<List_element<Signal_context> > _deferred_signals { };
void _handle_deferred_signals() { }

View File

@ -16,7 +16,7 @@
#include <util/noncopyable.h>
#include <util/meta.h>
#include <base/lock.h>
#include <base/mutex.h>
#include <base/log.h>
#include <util/avl_tree.h>
@ -85,7 +85,7 @@ class Genode::Id_space : public Noncopyable
:
_obj(obj), _id_space(id_space)
{
Lock::Guard guard(_id_space._lock);
Mutex::Guard guard(_id_space._mutex);
_id = id_space._unused_id();
_id_space._elements.insert(this);
}
@ -99,14 +99,14 @@ class Genode::Id_space : public Noncopyable
:
_obj(obj), _id_space(id_space), _id(id)
{
Lock::Guard guard(_id_space._lock);
Mutex::Guard guard(_id_space._mutex);
_id_space._check_conflict(id);
_id_space._elements.insert(this);
}
~Element()
{
Lock::Guard guard(_id_space._lock);
Mutex::Guard guard(_id_space._mutex);
_id_space._elements.remove(this);
}
@ -122,7 +122,7 @@ class Genode::Id_space : public Noncopyable
private:
Lock mutable _lock { }; /* protect '_elements' and '_cnt' */
Mutex mutable _mutex { }; /* protect '_elements' and '_cnt' */
Avl_tree<Element> _elements { };
unsigned long _cnt = 0;
@ -175,7 +175,7 @@ class Genode::Id_space : public Noncopyable
template <typename ARG, typename FUNC>
void for_each(FUNC const &fn) const
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
if (_elements.first())
_elements.first()->template _for_each<ARG>(fn);
@ -194,7 +194,7 @@ class Genode::Id_space : public Noncopyable
{
T *obj = nullptr;
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
if (!_elements.first())
throw Unknown_id();
@ -226,7 +226,7 @@ class Genode::Id_space : public Noncopyable
{
T *obj = nullptr;
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
if (_elements.first())
obj = &_elements.first()->_obj;

View File

@ -15,7 +15,7 @@
#define _INCLUDE__BASE__LOG_H_
#include <base/output.h>
#include <base/lock.h>
#include <base/mutex.h>
#include <trace/timestamp.h>
namespace Genode {
@ -46,7 +46,7 @@ class Genode::Log
private:
Lock _lock { };
Mutex _mutex { };
Output &_output;
@ -62,9 +62,9 @@ class Genode::Log
{
/*
* This function is being inlined. Hence, we try to keep it as
* small as possible. For this reason, the lock operations are
* small as possible. For this reason, the mutex operations are
* performed by the '_acquire' and '_release' functions instead of
* using a lock guard.
* using a mutex guard.
*/
_acquire(type);
Output::out_args(_output, args...);
@ -107,7 +107,7 @@ class Genode::Trace_output
{
private:
Lock _lock { };
Mutex _mutex { };
Output &_output;

View File

@ -19,6 +19,7 @@
#include <util/avl_tree.h>
#include <util/noncopyable.h>
#include <base/capability.h>
#include <base/mutex.h>
#include <base/weak_ptr.h>
namespace Genode { template <typename> class Object_pool; }
@ -95,13 +96,13 @@ class Genode::Object_pool : Interface, Noncopyable
private:
Avl_tree<Entry> _tree { };
Lock _lock { };
Mutex _mutex { };
protected:
bool empty()
{
Lock::Guard lock_guard(_lock);
Mutex::Guard lock_guard(_mutex);
return _tree.first() == nullptr;
}
@ -109,13 +110,13 @@ class Genode::Object_pool : Interface, Noncopyable
void insert(OBJ_TYPE *obj)
{
Lock::Guard lock_guard(_lock);
Mutex::Guard lock_guard(_mutex);
_tree.insert(obj);
}
void remove(OBJ_TYPE *obj)
{
Lock::Guard lock_guard(_lock);
Mutex::Guard lock_guard(_mutex);
_tree.remove(obj);
}
@ -131,7 +132,7 @@ class Genode::Object_pool : Interface, Noncopyable
Weak_ptr ptr;
{
Lock::Guard lock_guard(_lock);
Mutex::Guard lock_guard(_mutex);
Entry * entry = _tree.first() ?
_tree.first()->find_by_obj_id(capid) : nullptr;
@ -164,7 +165,7 @@ class Genode::Object_pool : Interface, Noncopyable
OBJ_TYPE * obj;
{
Lock::Guard lock_guard(_lock);
Mutex::Guard lock_guard(_mutex);
if (!((obj = (OBJ_TYPE*) _tree.first()))) return;

View File

@ -20,7 +20,7 @@
#include <base/thread.h>
#include <base/ipc.h>
#include <base/object_pool.h>
#include <base/lock.h>
#include <base/blockade.h>
#include <base/log.h>
#include <base/trace/events.h>
#include <pd_session/pd_session.h>
@ -345,9 +345,9 @@ class Genode::Rpc_entrypoint : Thread, public Object_pool<Rpc_object_base>
protected:
Native_capability _caller { };
Lock _cap_valid { }; /* thread startup synchronization */
Lock _delay_start { }; /* delay start of request dispatching */
Lock _delay_exit { }; /* delay destructor until server settled */
Blockade _cap_valid { }; /* thread startup synchronization */
Blockade _delay_start { }; /* delay start of request dispatching */
Blockade _delay_exit { }; /* delay destructor until server settled */
Pd_session &_pd_session; /* for creating capabilities */
Exit_handler _exit_handler { };
Capability<Exit> _exit_cap { };

View File

@ -15,7 +15,8 @@
#ifndef _INCLUDE__BASE__SEMAPHORE_H_
#define _INCLUDE__BASE__SEMAPHORE_H_
#include <base/lock.h>
#include <base/blockade.h>
#include <base/mutex.h>
#include <util/fifo.h>
namespace Genode { class Semaphore; }
@ -25,16 +26,10 @@ class Genode::Semaphore
{
protected:
int _cnt;
Lock _meta_lock { };
int _cnt;
Mutex _meta_lock { };
struct Element : Fifo<Element>::Element
{
Lock lock { Lock::LOCKED };
void block() { lock.lock(); }
void wake_up() { lock.unlock(); }
};
struct Element : Fifo<Element>::Element { Blockade blockade { }; };
Fifo<Element> _queue { };
@ -50,7 +45,7 @@ class Genode::Semaphore
~Semaphore()
{
/* synchronize destruction with unfinished 'up()' */
try { _meta_lock.lock(); } catch (...) { }
try { _meta_lock.acquire(); } catch (...) { }
}
/**
@ -64,7 +59,7 @@ class Genode::Semaphore
Element * element = nullptr;
{
Lock::Guard lock_guard(_meta_lock);
Mutex::Guard guard(_meta_lock);
if (++_cnt > 0)
return;
@ -78,7 +73,7 @@ class Genode::Semaphore
}
/* do not hold the lock while unblocking a waiting thread */
if (element) element->wake_up();
if (element) element->blockade.wakeup();
}
/**
@ -86,7 +81,7 @@ class Genode::Semaphore
*/
void down()
{
_meta_lock.lock();
_meta_lock.acquire();
if (--_cnt < 0) {
@ -96,17 +91,17 @@ class Genode::Semaphore
*/
Element queue_element;
_queue.enqueue(queue_element);
_meta_lock.unlock();
_meta_lock.release();
/*
* The thread is going to block on a local lock now,
* waiting for getting waked from another thread
* calling 'up()'
* */
queue_element.block();
queue_element.blockade.block();
} else {
_meta_lock.unlock();
_meta_lock.release();
}
}

View File

@ -208,11 +208,11 @@ class Genode::Signal_context : Interface, Noncopyable
*/
Signal_receiver *_receiver { nullptr };
Lock _lock { }; /* protect '_curr_signal' */
Mutex _mutex { }; /* protect '_curr_signal' */
Signal::Data _curr_signal { }; /* most-currently received signal */
bool _pending { false }; /* current signal is valid */
unsigned int _ref_cnt { 0 }; /* number of references to context */
Lock _destroy_lock { }; /* prevent destruction while the
Mutex _destroy_mutex { }; /* prevent destruction while the
context is in use */
/**
@ -298,7 +298,7 @@ class Genode::Signal_receiver : Noncopyable
if (!context) return;
do {
Lock::Guard lock_guard(context->_lock);
Mutex::Guard mutex_guard(context->_mutex);
try {
functor(*context);
} catch (Break_for_each) { return; }
@ -322,14 +322,14 @@ class Genode::Signal_receiver : Noncopyable
/**
* List of associated contexts
*/
Lock _contexts_lock { };
Context_ring _contexts { };
Mutex _contexts_mutex { };
Context_ring _contexts { };
/**
* Helper to dissolve given context
*
* This method prevents duplicated code in '~Signal_receiver'
* and 'dissolve'. Note that '_contexts_lock' must be held when
* and 'dissolve'. Note that '_contexts_mutex' must be held when
* calling this method.
*/
void _unsynchronized_dissolve(Signal_context *context);

View File

@ -1,12 +1,12 @@
/*
* \brief Lock-guarded allocator interface
* \brief Mutex-guarded allocator interface
* \author Norman Feske
* \author Stefan Kalkowski
* \date 2008-08-05
*/
/*
* Copyright (C) 2008-2017 Genode Labs GmbH
* Copyright (C) 2008-2020 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
@ -24,7 +24,7 @@ namespace Genode {
/**
* Lock-guarded allocator
* Mutex-guarded allocator
*
* This class wraps the complete 'Allocator' interface while
* preventing concurrent calls to the wrapped allocator implementation.
@ -36,17 +36,17 @@ class Genode::Synced_allocator : public Allocator
{
private:
Lock _lock { };
ALLOC _alloc;
Synced_interface<ALLOC, Lock> _synced_object;
Mutex _mutex { };
ALLOC _alloc;
Synced_interface<ALLOC, Mutex> _synced_object;
public:
using Guard = typename Synced_interface<ALLOC, Lock>::Guard;
using Guard = typename Synced_interface<ALLOC, Mutex>::Guard;
template <typename... ARGS>
Synced_allocator(ARGS &&... args)
: _alloc(args...), _synced_object(_lock, &_alloc) { }
: _alloc(args...), _synced_object(_mutex, &_alloc) { }
Guard operator () () { return _synced_object(); }
Guard operator () () const { return _synced_object(); }

View File

@ -15,11 +15,11 @@
#define _INCLUDE__BASE__SYNCED_INTERFACE_H_
/* Genode includes */
#include <base/lock.h>
#include <base/mutex.h>
namespace Genode {
template <typename, typename LOCK = Genode::Lock> class Synced_interface;
template <typename, typename LOCK = Genode::Mutex> class Synced_interface;
}
@ -48,7 +48,7 @@ class Genode::Synced_interface
Guard(LOCK &lock, IF *interface)
: _lock(lock), _interface(interface)
{
_lock.lock();
_lock.acquire();
}
friend class Synced_interface;
@ -57,7 +57,7 @@ class Genode::Synced_interface
public:
~Guard() { _lock.unlock(); }
~Guard() { _lock.release(); }
Guard(Guard const &other)
: _lock(other._lock), _interface(other._interface) { }

View File

@ -14,7 +14,8 @@
#ifndef _INCLUDE__BASE__WEAK_PTR_H_
#define _INCLUDE__BASE__WEAK_PTR_H_
#include <base/lock.h>
#include <base/blockade.h>
#include <base/mutex.h>
#include <base/log.h>
#include <util/list.h>
@ -42,15 +43,15 @@ class Genode::Weak_ptr_base : public Genode::List<Weak_ptr_base>::Element
friend class Weak_object_base;
friend class Locked_ptr_base;
Lock mutable _lock { };
Mutex mutable _mutex { };
Weak_object_base *_obj { nullptr };
/*
* This lock is used to synchronize destruction of a weak pointer
* and its corresponding weak object that happen simultanously
* This blocakde is used to synchronize destruction of a weak pointer
* and its corresponding weak object that happen simultaneously
*/
Lock mutable _destruct_lock { Lock::LOCKED };
Blockade mutable _destruct { };
inline void _adopt(Weak_object_base *obj);
inline void _disassociate();
@ -109,8 +110,8 @@ class Genode::Weak_object_base
/**
* List of weak pointers currently pointing to the object
*/
Lock _list_lock { };
List<Weak_ptr_base> _list { };
Mutex _list_mutex { };
List<Weak_ptr_base> _list { };
/**
* Buffers dequeued weak pointer that get invalidated currently
@ -118,9 +119,9 @@ class Genode::Weak_object_base
Weak_ptr_base *_ptr_in_destruction = nullptr;
/**
* Lock to synchronize access to object
* Mutex to synchronize access to object
*/
Lock _lock { };
Mutex _mutex { };
protected:
@ -152,12 +153,12 @@ class Genode::Weak_object_base
if (!ptr) return;
{
Lock::Guard guard(_list_lock);
Mutex::Guard guard(_list_mutex);
/*
* If the weak pointer that tries to disassociate is currently
* removed to invalidate it by the weak object's destructor,
* signal that fact to the pointer, so it can free it's lock,
* signal that fact to the pointer, so it can free it's mutex,
* and block until invalidation is finished.
*/
if (_ptr_in_destruction == ptr)
@ -182,11 +183,11 @@ class Genode::Weak_object_base
/*
* To prevent dead-locks we always have to hold
* the order of lock access, therefore we first
* dequeue one weak pointer and free the list lock again
* the order of mutex access, therefore we first
* dequeue one weak pointer and free the list mutex again
*/
{
Lock::Guard guard(_list_lock);
Mutex::Guard guard(_list_mutex);
_ptr_in_destruction = _list.first();
/* if the list is empty we're done */
@ -195,22 +196,22 @@ class Genode::Weak_object_base
}
{
Lock::Guard guard(_ptr_in_destruction->_lock);
Mutex::Guard guard(_ptr_in_destruction->_mutex);
_ptr_in_destruction->_obj = nullptr;
/*
* unblock a weak pointer that tried to disassociate
* in the meantime
*/
_ptr_in_destruction->_destruct_lock.unlock();
_ptr_in_destruction->_destruct.wakeup();
}
}
/*
* synchronize with locked pointers that already aquired
* the lock before the corresponding weak pointer got invalidated
* synchronize with locked pointers that already acquired
* the mutex before the corresponding weak pointer got invalidated
*/
_lock.lock();
_mutex.acquire();
}
/**
@ -364,7 +365,7 @@ void Genode::Weak_ptr_base::_adopt(Genode::Weak_object_base *obj)
if (_obj)
{
Lock::Guard guard(_obj->_list_lock);
Mutex::Guard guard(_obj->_list_mutex);
_obj->_list.insert(this);
}
}
@ -374,11 +375,11 @@ void Genode::Weak_ptr_base::_disassociate()
{
/* defer destruction of object */
try {
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
if (_obj) _obj->disassociate(this);
} catch(Weak_object_base::In_destruction&) {
_destruct_lock.lock();
_destruct.block();
}
}
@ -398,7 +399,7 @@ Genode::Weak_ptr_base::operator = (Weak_ptr_base const &other)
_disassociate();
{
Lock::Guard guard(other._lock);
Mutex::Guard guard(other._mutex);
_adopt(other._obj);
}
return *this;
@ -410,7 +411,7 @@ bool Genode::Weak_ptr_base::operator == (Weak_ptr_base const &other) const
if (&other == this)
return true;
Lock::Guard guard_this(_lock), guard_other(other._lock);
Mutex::Guard guard_this(_mutex), guard_other(other._mutex);
return (_obj == other._obj);
}
@ -433,19 +434,19 @@ Genode::Weak_ptr<T> Genode::Weak_object_base::_weak_ptr()
Genode::Locked_ptr_base::Locked_ptr_base(Weak_ptr_base &weak_ptr)
: curr(nullptr)
{
Lock::Guard guard(weak_ptr._lock);
Mutex::Guard guard(weak_ptr._mutex);
if (!weak_ptr.obj()) return;
curr = weak_ptr.obj();
curr->_lock.lock();
curr->_mutex.acquire();
}
Genode::Locked_ptr_base::~Locked_ptr_base()
{
if (curr)
curr->_lock.unlock();
curr->_mutex.release();
}
#endif /* _INCLUDE__BASE__WEAK_PTR_H_ */

View File

@ -21,9 +21,9 @@
/* Genode includes */
#include <util/noncopyable.h>
#include <base/lock.h>
#include <base/log.h>
#include <base/duration.h>
#include <base/log.h>
#include <base/mutex.h>
namespace Genode {
@ -169,11 +169,11 @@ class Genode::Timeout : private Noncopyable
bool is_pending_at(uint64_t time, bool time_period) const;
};
Lock _dispatch_lock { };
Raw _raw { };
int _active { 0 };
Alarm *_next { nullptr };
Alarm_timeout_scheduler *_scheduler { nullptr };
Mutex _dispatch_mutex { };
Raw _raw { };
int _active { 0 };
Alarm *_next { nullptr };
Alarm_timeout_scheduler *_scheduler { nullptr };
void _alarm_assign(Time period,
Time deadline,
@ -239,7 +239,7 @@ class Genode::Alarm_timeout_scheduler : private Noncopyable,
using Alarm = Timeout::Alarm;
Time_source &_time_source;
Lock _lock { };
Mutex _mutex { };
Alarm *_active_head { nullptr };
Alarm *_pending_head { nullptr };
Alarm::Time _now { 0UL };

View File

@ -146,7 +146,7 @@ class Timer::Connection : public Genode::Connection<Session>,
using Timeout_handler = Genode::Time_source::Timeout_handler;
using Timestamp = Genode::Trace::Timestamp;
using Duration = Genode::Duration;
using Lock = Genode::Lock;
using Mutex = Genode::Mutex;
using Microseconds = Genode::Microseconds;
using Milliseconds = Genode::Milliseconds;
using Entrypoint = Genode::Entrypoint;
@ -172,7 +172,7 @@ class Timer::Connection : public Genode::Connection<Session>,
enum Mode { LEGACY, MODERN };
Mode _mode { LEGACY };
Genode::Lock _lock { };
Mutex _mutex { };
Genode::Signal_receiver _sig_rec { };
Genode::Signal_context _default_sigh_ctx { };
@ -204,7 +204,7 @@ class Timer::Connection : public Genode::Connection<Session>,
Genode::Io_signal_handler<Connection> _signal_handler;
Timeout_handler *_handler { nullptr };
Lock _real_time_lock { Lock::UNLOCKED };
Mutex _real_time_mutex { };
uint64_t _us { elapsed_us() };
Timestamp _ts { _timestamp() };
Duration _real_time { Microseconds(_us) };
@ -313,7 +313,7 @@ class Timer::Connection : public Genode::Connection<Session>,
return;
/* serialize sleep calls issued by different threads */
Genode::Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
/* temporarily install to the default signal handler */
if (_custom_sigh_cap.valid())

View File

@ -4,9 +4,9 @@
<timeout meaning="failed" sec="20" />
<log meaning="succeeded">
[init -> test-synced_interface] --- Synced interface test ---
[init -> test-synced_interface] lock
[init -> test-synced_interface] acquire
[init -> test-synced_interface] adding 13 + 14
[init -> test-synced_interface] unlock
[init -> test-synced_interface] release
[init -> test-synced_interface] result is 27
[init -> test-synced_interface] --- Synced interface test finished ---
</log>

View File

@ -15,8 +15,8 @@
#ifndef _CORE__INCLUDE__CORE_MEM_ALLOC_H_
#define _CORE__INCLUDE__CORE_MEM_ALLOC_H_
#include <base/lock.h>
#include <base/allocator_avl.h>
#include <base/mutex.h>
#include <synced_range_allocator.h>
#include <util.h>
@ -201,10 +201,10 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
protected:
/**
* Lock used for synchronization of all operations on the
* Mutex used for synchronization of all operations on the
* embedded allocators.
*/
Lock _lock { };
Mutex _mutex { };
/**
* Synchronized allocator of physical memory ranges
@ -227,7 +227,7 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
*
* This allocator is internally used within this class for
* allocating meta data for the other allocators. It is not
* synchronized to avoid nested locking. The lock-guarded
* synchronized to avoid nested locking. The Mutex-guarded
* access to this allocator from the outer world is
* provided via the 'Allocator' interface implemented by
* 'Core_mem_allocator'. The allocator works at byte
@ -241,8 +241,8 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
* Constructor
*/
Core_mem_allocator()
: _phys_alloc(_lock, &_mem_alloc),
_virt_alloc(_lock, &_mem_alloc),
: _phys_alloc(_mutex, &_mem_alloc),
_virt_alloc(_mutex, &_mem_alloc),
_mem_alloc(_phys_alloc, _virt_alloc) { }
/**
@ -283,13 +283,13 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
Alloc_return alloc_aligned(size_t size, void **out_addr, int align,
addr_t from = 0, addr_t to = ~0UL) override
{
Lock::Guard lock_guard(_lock);
Mutex::Guard lock_guard(_mutex);
return _mem_alloc.alloc_aligned(size, out_addr, align, from, to);
}
void free(void *addr) override
{
Lock::Guard lock_guard(_lock);
Mutex::Guard lock_guard(_mutex);
return _mem_alloc.free(addr);
}
@ -307,7 +307,7 @@ class Genode::Core_mem_allocator : public Genode::Core_mem_translator
void free(void *addr, size_t size) override
{
Lock::Guard lock_guard(_lock);
Mutex::Guard lock_guard(_mutex);
return _mem_alloc.free(addr, size);
}

View File

@ -1,5 +1,5 @@
/*
* \brief Lock-guarded allocator interface
* \brief Mutex-guarded allocator interface
* \author Norman Feske
* \author Stefan Kalkowski
* \date 2008-08-05
@ -25,7 +25,7 @@ namespace Genode {
/**
* Lock-guarded range allocator
* Mutex-guarded range allocator
*
* This class wraps the complete 'Range_allocator' interface while
* preventing concurrent calls to the wrapped allocator implementation.
@ -39,23 +39,23 @@ class Genode::Synced_range_allocator : public Range_allocator
friend class Mapped_mem_allocator;
Lock _default_lock { };
Lock &_lock;
ALLOC _alloc;
Synced_interface<ALLOC, Lock> _synced_object;
Mutex _default_mutex { };
Mutex &_mutex;
ALLOC _alloc;
Synced_interface<ALLOC, Mutex> _synced_object;
public:
using Guard = typename Synced_interface<ALLOC, Lock>::Guard;
using Guard = typename Synced_interface<ALLOC, Mutex>::Guard;
template <typename... ARGS>
Synced_range_allocator(Lock &lock, ARGS &&... args)
: _lock(lock), _alloc(args...), _synced_object(_lock, &_alloc) { }
Synced_range_allocator(Mutex &mutex, ARGS &&... args)
: _mutex(mutex), _alloc(args...), _synced_object(_mutex, &_alloc) { }
template <typename... ARGS>
Synced_range_allocator(ARGS &&... args)
: _lock(_default_lock), _alloc(args...),
_synced_object(_lock, &_alloc) { }
: _mutex(_default_mutex), _alloc(args...),
_synced_object(_mutex, &_alloc) { }
Guard operator () () { return _synced_object(); }
Guard operator () () const { return _synced_object(); }

View File

@ -23,8 +23,8 @@
/* base includes */
#include <util/avl_tree.h>
#include <util/bit_allocator.h>
#include <base/lock.h>
#include <base/log.h>
#include <base/mutex.h>
#include <util/construct_at.h>
/* base-internal includes */
@ -114,7 +114,7 @@ class Genode::Capability_space_tpl
Tree_managed_data _caps_data[NUM_CAPS];
Bit_allocator<NUM_CAPS> _alloc { };
Avl_tree<Tree_managed_data> _tree { };
Lock mutable _lock { };
Mutex mutable _mutex { };
/**
* Calculate index into _caps_data for capability data object
@ -127,7 +127,7 @@ class Genode::Capability_space_tpl
Data *_lookup(Rpc_obj_key key) const
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
if (!_tree.first())
return nullptr;
@ -146,7 +146,7 @@ class Genode::Capability_space_tpl
template <typename... ARGS>
Native_capability::Data &create_capability(ARGS... args)
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
addr_t const index = _alloc.alloc();
@ -160,7 +160,7 @@ class Genode::Capability_space_tpl
void dec_ref(Data &data)
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
if (data.dec_ref() == 0) {
@ -174,7 +174,7 @@ class Genode::Capability_space_tpl
void inc_ref(Data &data)
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
if (data.inc_ref() == 255)
throw Native_capability::Reference_count_overflow();

View File

@ -46,9 +46,9 @@ class Genode::Expanding_parent_client : public Parent_client
State _state = { UNDEFINED };
/**
* Lock used to serialize resource requests
* Mutex used to serialize resource requests
*/
Lock _lock { };
Mutex _mutex { };
struct Io_signal_context : Signal_context
{
@ -156,7 +156,7 @@ class Genode::Expanding_parent_client : public Parent_client
void resource_avail_sigh(Signal_context_capability sigh) override
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
/*
* If signal hander gets de-installed, let the next call of
@ -177,7 +177,7 @@ class Genode::Expanding_parent_client : public Parent_client
void resource_request(Resource_args const &args) override
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
/*
* Issue request but don't block if a custom signal handler is

View File

@ -46,7 +46,7 @@ class Genode::Stack_allocator
}
} _alloc { };
Lock _threads_lock { };
Mutex _threads_mutex { };
public:

View File

@ -94,7 +94,7 @@ bool Alarm::Raw::is_pending_at(uint64_t time, bool time_period) const
Alarm *Alarm_scheduler::_get_pending_alarm()
{
Lock::Guard lock_guard(_lock);
Mutex::Guard guard(_mutex);
if (!_head || !_head->_raw.is_pending_at(_now, _now_period)) {
return nullptr; }
@ -104,10 +104,10 @@ Alarm *Alarm_scheduler::_get_pending_alarm()
_head = _head->_next;
/*
* Acquire dispatch lock to defer destruction until the call of 'on_alarm'
* Acquire dispatch mutex to defer destruction until the call of 'on_alarm'
* is finished
*/
pending_alarm->_dispatch_lock.lock();
pending_alarm->_dispatch_mutex.acquire();
/* reset alarm object */
pending_alarm->_next = nullptr;
@ -178,12 +178,12 @@ void Alarm_scheduler::handle(Alarm::Time curr_time)
curr->_raw.deadline = deadline;
/* synchronize enqueue operation */
Lock::Guard lock_guard(_lock);
Mutex::Guard guard(_mutex);
_unsynchronized_enqueue(curr);
}
/* release alarm, resume concurrent destructor operation */
curr->_dispatch_lock.unlock();
curr->_dispatch_mutex.release();
}
}
@ -206,7 +206,7 @@ void Alarm_scheduler::_setup_alarm(Alarm &alarm, Alarm::Time period, Alarm::Time
void Alarm_scheduler::schedule_absolute(Alarm *alarm, Alarm::Time timeout)
{
Lock::Guard alarm_list_lock_guard(_lock);
Mutex::Guard alarm_list_guard(_mutex);
_setup_alarm(*alarm, 0, timeout);
}
@ -214,7 +214,7 @@ void Alarm_scheduler::schedule_absolute(Alarm *alarm, Alarm::Time timeout)
void Alarm_scheduler::schedule(Alarm *alarm, Alarm::Time period)
{
Lock::Guard alarm_list_lock_guard(_lock);
Mutex::Guard alarm_list_guard(_mutex);
/*
* Refuse to schedule a periodic timeout of 0 because it would trigger
@ -236,17 +236,17 @@ void Alarm_scheduler::discard(Alarm *alarm)
{
/*
* Make sure that nobody is inside the '_get_pending_alarm' when
* grabbing the '_dispatch_lock'. This is important when this function
* is called from the 'Alarm' destructor. Without the '_dispatch_lock',
* we could take the lock and proceed with destruction just before
* '_get_pending_alarm' tries to grab the lock. When the destructor is
* grabbing the '_dispatch_mutex'. This is important when this function
* is called from the 'Alarm' destructor. Without the '_dispatch_mutex',
* we could take the mutex and proceed with destruction just before
* '_get_pending_alarm' tries to grab the mutex. When the destructor is
* finished, '_get_pending_alarm' would proceed with operating on a
* dangling pointer.
*/
Lock::Guard alarm_list_lock_guard(_lock);
Mutex::Guard alarm_list_guard(_mutex);
if (alarm) {
Lock::Guard alarm_lock_guard(alarm->_dispatch_lock);
Mutex::Guard alarm_guard(alarm->_dispatch_mutex);
_unsynchronized_dequeue(alarm);
}
}
@ -254,7 +254,7 @@ void Alarm_scheduler::discard(Alarm *alarm)
bool Alarm_scheduler::next_deadline(Alarm::Time *deadline)
{
Lock::Guard alarm_list_lock_guard(_lock);
Mutex::Guard alarm_list_guard(_mutex);
if (!_head) return false;
@ -270,7 +270,7 @@ bool Alarm_scheduler::next_deadline(Alarm::Time *deadline)
Alarm_scheduler::~Alarm_scheduler()
{
Lock::Guard lock_guard(_lock);
Mutex::Guard guard(_mutex);
while (_head) {

View File

@ -35,7 +35,7 @@ static Service &parent_service()
void Child::yield(Resource_args const &args)
{
Lock::Guard guard(_yield_request_lock);
Mutex::Guard guard(_yield_request_mutex);
/* buffer yield request arguments to be picked up by the child */
_yield_request_args = args;
@ -685,7 +685,7 @@ void Child::yield_sigh(Signal_context_capability sigh) { _yield_sigh = sigh; }
Parent::Resource_args Child::yield_request()
{
Lock::Guard guard(_yield_request_lock);
Mutex::Guard guard(_yield_request_mutex);
return _yield_request_args;
}

View File

@ -48,9 +48,9 @@ namespace {
Genode::Parent &_parent = *env_deprecated()->parent();
/**
* Lock for serializing 'session' and 'close'
* Mutex for serializing 'session' and 'close'
*/
Genode::Lock _lock { };
Genode::Mutex _mutex { };
/**
* Utility to used block for single signal
@ -121,7 +121,7 @@ namespace {
Parent::Session_args const &args,
Affinity const &affinity) override
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
/*
* Since we account for the backing store for session meta data on
@ -189,7 +189,7 @@ namespace {
void upgrade(Parent::Client::Id id, Parent::Upgrade_args const &args) override
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
if (_parent.upgrade(id, args) == Parent::UPGRADE_PENDING)
_block_for_session();
@ -197,7 +197,7 @@ namespace {
void close(Parent::Client::Id id) override
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
if (_parent.close(id) == Parent::CLOSE_PENDING)
_block_for_session();

View File

@ -86,7 +86,7 @@ void Entrypoint::_defer_signal(Signal &sig)
{
Signal_context *context = sig.context();
Lock::Guard guard(_deferred_signals_mutex);
Mutex::Guard guard(_deferred_signals_mutex);
_deferred_signals.remove(context->deferred_le());
_deferred_signals.insert(context->deferred_le());
}
@ -97,7 +97,7 @@ void Entrypoint::_process_deferred_signals()
for (;;) {
Signal_context *context = nullptr;
{
Lock::Guard guard(_deferred_signals_mutex);
Mutex::Guard guard(_deferred_signals_mutex);
if (!_deferred_signals.first()) return;
context = _deferred_signals.first()->object();
@ -118,7 +118,6 @@ void Entrypoint::_process_incoming_signals()
do {
{
/* see documentation in 'wait_and_dispatch_one_io_signal()' */
Mutex::Guard guard { _block_for_signal_mutex };
_signal_proxy_delivers_signal = true;
@ -281,7 +280,7 @@ void Genode::Entrypoint::dissolve(Signal_dispatcher_base &dispatcher)
/* also remove context from deferred signal list */
{
Lock::Guard guard(_deferred_signals_mutex);
Mutex::Guard guard(_deferred_signals_mutex);
_deferred_signals.remove(dispatcher.deferred_le());
}
}

View File

@ -19,7 +19,7 @@ using namespace Genode;
void Log::_acquire(Type type)
{
_lock.lock();
_mutex.acquire();
/*
* Mark warnings and errors via distinct colors.
@ -39,7 +39,7 @@ void Log::_release()
*/
_output.out_string("\033[0m\n");
_lock.unlock();
_mutex.release();
}
@ -63,7 +63,7 @@ void Raw::_release()
void Trace_output::_acquire()
{
_lock.lock();
_mutex.acquire();
}
@ -74,5 +74,5 @@ void Trace_output::_release()
*/
_output.out_string("\n");
_lock.unlock();
_mutex.release();
}

View File

@ -51,15 +51,15 @@ namespace {
enum { MAX = 32 };
Lock mutable _lock { };
Service _services[MAX] { };
unsigned _cnt = 0;
Mutex mutable _mutex { };
Service _services[MAX] { };
unsigned _cnt = 0;
public:
void insert(Service const &service)
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
if (_cnt == MAX) {
error("maximum number of services announced");
@ -76,10 +76,10 @@ namespace {
void apply(Service::Name const &name, FUNC const &fn)
{
/*
* Protect '_services' but execute 'fn' with the lock released.
* Protect '_services' but execute 'fn' with the mutex released.
*
* If we called 'fn' with the lock held, the following scenario
* may result in a deadlock:
* If we called 'fn' with the mutex held, the following
* scenario may result in a deadlock:
*
* A component provides two services, e.g., "Framebuffer" and
* "Input" (fb_sdl or nit_fb). In-between the two 'announce'
@ -91,19 +91,19 @@ namespace {
* call blocks until 'Component::construct' returns. However,
* before returning, the function announces the second service,
* eventually arriving at 'Service_registry::insert', which
* tries to acquire the same lock as the blocking 'apply' call.
* tries to acquire the same mutex as the blocking 'apply' call.
*/
_lock.lock();
_mutex.acquire();
for (unsigned i = 0; i < _cnt; i++) {
if (name != _services[i].name)
continue;
_lock.unlock();
_mutex.release();
fn(_services[i]);
return;
}
_lock.unlock();
_mutex.release();
}
};

View File

@ -26,7 +26,7 @@ void Rpc_entrypoint::_entry(Native_context& native_context)
Ipc_server srv(native_context);
_cap = srv;
_cap_valid.unlock();
_cap_valid.wakeup();
/*
* Now, the capability of the server activation is initialized
@ -35,7 +35,7 @@ void Rpc_entrypoint::_entry(Native_context& native_context)
* is completely initialized. Thus, we wait until the activation
* gets explicitly unblocked by calling 'Rpc_entrypoint::activate()'.
*/
_delay_start.lock();
_delay_start.block();
Rpc_exception_code exc = Rpc_exception_code(Rpc_exception_code::INVALID_OBJECT);
@ -65,5 +65,5 @@ void Rpc_entrypoint::_entry(Native_context& native_context)
ipc_reply(_caller, Rpc_exception_code(Rpc_exception_code::SUCCESS), snd_buf);
/* defer the destruction of 'Ipc_server' until '~Rpc_entrypoint' is ready */
_delay_exit.lock();
_delay_exit.block();
}

View File

@ -44,7 +44,7 @@ void Rpc_entrypoint::_dissolve(Rpc_object_base *obj)
void Rpc_entrypoint::_block_until_cap_valid()
{
_cap_valid.lock();
_cap_valid.block();
}
@ -59,7 +59,7 @@ void Rpc_entrypoint::reply_signal_info(Untyped_capability reply_cap,
void Rpc_entrypoint::activate()
{
_delay_start.unlock();
_delay_start.wakeup();
}
@ -75,8 +75,6 @@ Rpc_entrypoint::Rpc_entrypoint(Pd_session *pd_session, size_t stack_size,
:
Thread(Cpu_session::Weight::DEFAULT_WEIGHT, name, _native_stack_size(stack_size), location),
_cap(Untyped_capability()),
_cap_valid(Lock::LOCKED), _delay_start(Lock::LOCKED),
_delay_exit(Lock::LOCKED),
_pd_session(*pd_session)
{
Thread::start();
@ -93,9 +91,9 @@ Rpc_entrypoint::~Rpc_entrypoint()
{
/*
* We have to make sure the server loop is running which is only the case
* if the Rpc_entrypoint was actived before we execute the RPC call.
* if the Rpc_entrypoint was activated before we execute the RPC call.
*/
_delay_start.unlock();
_delay_start.wakeup();
/* leave server loop */
_exit_cap.call<Exit::Rpc_exit>();
@ -111,7 +109,7 @@ Rpc_entrypoint::~Rpc_entrypoint()
* entrypoint thread to leave the scope. Thereby, the 'Ipc_server' object
* will get destructed.
*/
_delay_exit.unlock();
_delay_exit.wakeup();
join();
}

View File

@ -28,7 +28,7 @@
using namespace Genode;
class Signal_handler_thread : Thread, Lock
class Signal_handler_thread : Thread, Blockade
{
private:
@ -44,7 +44,7 @@ class Signal_handler_thread : Thread, Lock
void entry() override
{
_signal_source.construct(env_deprecated()->pd_session()->alloc_signal_source());
unlock();
wakeup();
Signal_receiver::dispatch_signals(&(*_signal_source));
}
@ -56,7 +56,7 @@ class Signal_handler_thread : Thread, Lock
* Constructor
*/
Signal_handler_thread(Env &env)
: Thread(env, "signal handler", STACK_SIZE), Lock(Lock::LOCKED)
: Thread(env, "signal handler", STACK_SIZE)
{
start();
@ -65,7 +65,7 @@ class Signal_handler_thread : Thread, Lock
* with the use of signals. Otherwise, signals may get lost until
* the construction finished.
*/
lock();
block();
}
~Signal_handler_thread()
@ -136,34 +136,34 @@ namespace Genode {
* scalability problem, we might introduce a more sophisticated
* associative data structure.
*/
Lock mutable _lock { };
Mutex mutable _mutex { };
List<List_element<Signal_context> > _list { };
public:
void insert(List_element<Signal_context> *le)
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
_list.insert(le);
}
void remove(List_element<Signal_context> *le)
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
_list.remove(le);
}
bool test_and_lock(Signal_context *context) const
{
Lock::Guard guard(_lock);
Mutex::Guard guard(_mutex);
/* search list for context */
List_element<Signal_context> const *le = _list.first();
for ( ; le; le = le->next()) {
if (context == le->object()) {
/* lock object */
context->_lock.lock();
/* acquire the object */
context->_mutex.acquire();
return true;
}
}
@ -197,7 +197,7 @@ Signal_context_capability Signal_receiver::manage(Signal_context *context)
context->_receiver = this;
Lock::Guard contexts_lock_guard(_contexts_lock);
Mutex::Guard contexts_guard(_contexts_mutex);
/* insert context into context list */
_contexts.insert_as_tail(context);
@ -237,7 +237,7 @@ void Signal_receiver::block_for_signal()
Signal Signal_receiver::pending_signal()
{
Lock::Guard contexts_lock_guard(_contexts_lock);
Mutex::Guard contexts_guard(_contexts_mutex);
Signal::Data result;
_contexts.for_each_locked([&] (Signal_context &context) {
@ -252,7 +252,7 @@ Signal Signal_receiver::pending_signal()
throw Context_ring::Break_for_each();
});
if (result.context) {
Lock::Guard lock_guard(result.context->_lock);
Mutex::Guard context_guard(result.context->_mutex);
if (result.num == 0)
warning("returning signal with num == 0");
@ -324,8 +324,8 @@ void Signal_receiver::dispatch_signals(Signal_source *signal_source)
warning("signal context ", context, " with no receiver in signal dispatcher");
}
/* free context lock that was taken by 'test_and_lock' */
context->_lock.unlock();
/* free context mutex that was taken by 'test_and_lock' */
context->_mutex.release();
}
}
@ -333,8 +333,8 @@ void Signal_receiver::dispatch_signals(Signal_source *signal_source)
void Signal_receiver::_platform_begin_dissolve(Signal_context *context)
{
/*
* Because the 'remove' operation takes the registry lock, the context
* must not be locked when calling this method. See the comment in
* Because the 'remove' operation takes the registry mutex, the context
* must not be acquired when calling this method. See the comment in
* 'Signal_receiver::dissolve'.
*/
signal_context_registry()->remove(&context->_registry_le);

View File

@ -58,10 +58,10 @@ Signal::~Signal() { _dec_ref_and_unlock(); }
void Signal::_dec_ref_and_unlock()
{
if (_data.context) {
Lock::Guard lock_guard(_data.context->_lock);
Mutex::Guard context_guard(_data.context->_mutex);
_data.context->_ref_cnt--;
if (_data.context->_ref_cnt == 0)
_data.context->_destroy_lock.unlock();
_data.context->_destroy_mutex.release();
}
}
@ -69,7 +69,7 @@ void Signal::_dec_ref_and_unlock()
void Signal::_inc_ref()
{
if (_data.context) {
Lock::Guard lock_guard(_data.context->_lock);
Mutex::Guard context_guard(_data.context->_mutex);
_data.context->_ref_cnt++;
}
}
@ -98,7 +98,7 @@ Signal::Signal(Signal::Data data) : _data(data)
* is in its clear state).
*/
if (_data.context->_ref_cnt == 1) {
_data.context->_destroy_lock.lock();
_data.context->_destroy_mutex.acquire();
} else {
/* print warning only once to avoid flooding the log */
@ -161,7 +161,7 @@ Signal Signal_receiver::wait_for_signal()
Signal_receiver::~Signal_receiver()
{
Lock::Guard contexts_lock_guard(_contexts_lock);
Mutex::Guard contexts_guard(_contexts_mutex);
/* disassociate contexts from the receiver */
while (Signal_context *context = _contexts.head()) {
@ -197,24 +197,24 @@ void Signal_receiver::dissolve(Signal_context *context)
/*
* We must adhere to the following lock-taking order:
*
* 1. Taking the lock for the list of contexts ('_contexts_lock')
* 1. Taking the lock for the list of contexts ('_contexts_mutex')
* 2. Taking the context-registry lock (this happens inside
* '_platform_begin_dissolve' on platforms that use such a
* registry)
* 3. Taking the lock for an individual signal context
*/
Lock::Guard contexts_lock_guard(_contexts_lock);
Mutex::Guard contexts_guard(_contexts_mutex);
_platform_begin_dissolve(context);
Lock::Guard context_lock_guard(context->_lock);
Mutex::Guard context_guard(context->_mutex);
_unsynchronized_dissolve(context);
}
_platform_finish_dissolve(context);
Lock::Guard context_destroy_lock_guard(context->_destroy_lock);
Mutex::Guard context_destroy_guard(context->_destroy_mutex);
}

View File

@ -6,17 +6,17 @@
*/
/*
* Copyright (C) 2006-2017 Genode Labs GmbH
* Copyright (C) 2006-2020 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#include <base/blockade.h>
#include <base/sleep.h>
#include <base/lock.h>
void Genode::sleep_forever()
{
Lock sleep;
while (true) sleep.lock();
Blockade sleep;
while (true) sleep.block();
}

View File

@ -52,7 +52,7 @@ Stack_allocator::alloc(Thread *, bool main_thread)
return base_to_stack(stack_area_virtual_base());
try {
Lock::Guard _lock_guard(_threads_lock);
Mutex::Guard guard(_threads_mutex);
return base_to_stack(idx_to_base(_alloc.alloc()));
} catch(Bit_allocator<MAX_THREADS>::Out_of_indices) {
return 0;
@ -64,7 +64,7 @@ void Stack_allocator::free(Stack *stack)
{
addr_t const base = addr_to_base(stack);
Lock::Guard _lock_guard(_threads_lock);
Mutex::Guard guard(_threads_mutex);
_alloc.free(base_to_idx(base));
}

View File

@ -91,9 +91,9 @@ static List<Tls_tree> &_tls_tree_list()
}
static Lock &_emutls_lock()
static Mutex &_emutls_mutex()
{
static Lock instance;
static Mutex instance;
return instance;
}
@ -103,7 +103,7 @@ static Lock &_emutls_lock()
*/
void Genode::cxx_free_tls(void *thread)
{
Lock::Guard lock_guard(_emutls_lock());
Mutex::Guard lock_guard(_emutls_mutex());
for (Tls_tree *tls_tree = _tls_tree_list().first();
tls_tree; tls_tree = tls_tree->next()) {
@ -130,7 +130,7 @@ void Genode::cxx_free_tls(void *thread)
*/
extern "C" void *__emutls_get_address(void *obj)
{
Lock::Guard lock_guard(_emutls_lock());
Mutex::Guard lock_guard(_emutls_mutex());
__emutls_object *emutls_object = reinterpret_cast<__emutls_object*>(obj);

View File

@ -38,7 +38,7 @@ extern "C" int dl_iterate_phdr(int (*callback) (Phdr_info *info, size_t size, vo
int err = 0;
Phdr_info info;
Lock::Guard guard(lock());
Mutex::Guard guard(mutex());
for (Object *e = obj_list_head();e; e = e->next_obj()) {

View File

@ -20,6 +20,7 @@
#include <file.h>
#include <util.h>
#include <config.h>
#include <base/mutex.h>
/*
* Mark functions that are used during the linkers self-relocation phase as
@ -125,9 +126,9 @@ namespace Linker {
Dependency *binary_root_dep();
/**
* Global ELF access lock
* Global ELF access mutex
*/
Lock &lock();
Mutex &mutex();
}

View File

@ -66,10 +66,10 @@ Linker::Region_map::Constructible_region_map &Linker::Region_map::r()
}
Genode::Lock &Linker::lock()
Genode::Mutex &Linker::mutex()
{
static Lock _lock;
return _lock;
static Mutex _mutex;
return _mutex;
}
@ -296,7 +296,7 @@ struct Linker::Ld : private Dependency, Elf_object
Elf::Addr Ld::jmp_slot(Dependency const &dep, Elf::Size index)
{
Lock::Guard guard(lock());
Mutex::Guard guard(mutex());
if (verbose_relocation)
log("LD: SLOT ", &dep.obj(), " ", Hex(index));

View File

@ -29,9 +29,9 @@ static Linker::Root_object const &to_root(void *h)
* Needed during shared object creation and destruction, since global lists are
* manipulated
*/
static Genode::Lock & shared_object_lock()
static Genode::Mutex & shared_object_lock()
{
static Genode::Lock _lock;
static Genode::Mutex _lock;
return _lock;
}
@ -61,7 +61,7 @@ Genode::Shared_object::Shared_object(Env &env, Allocator &md_alloc,
log("LD: open '", file ? file : "binary", "'");
try {
Lock::Guard guard(shared_object_lock());
Mutex::Guard guard(shared_object_lock());
_handle = new (md_alloc)
Root_object(env, md_alloc, file ? file : binary_name(),
@ -96,7 +96,7 @@ void *Genode::Shared_object::_lookup(const char *name) const
log("LD: shared object lookup '", name, "'");
try {
Lock::Guard guard(Linker::lock());
Mutex::Guard guard(Linker::mutex());
Root_object const &root = to_root(_handle);
@ -121,7 +121,7 @@ Genode::Shared_object::~Shared_object()
if (verbose_shared)
log("LD: close shared object");
Lock::Guard guard(shared_object_lock());
Mutex::Guard guard(shared_object_lock());
destroy(_md_alloc, &const_cast<Root_object &>(to_root(_handle)));
}

View File

@ -62,9 +62,9 @@ static struct atexit
} _atexit;
static Genode::Lock &atexit_lock()
static Genode::Mutex &atexit_mutex()
{
static Genode::Lock _atexit_lock;
static Genode::Mutex _atexit_lock;
return _atexit_lock;
}
@ -77,7 +77,7 @@ static void atexit_enable()
static int atexit_register(struct atexit_fn *fn)
{
Genode::Lock::Guard atexit_lock_guard(atexit_lock());
Genode::Mutex::Guard atexit_lock_guard(atexit_mutex());
if (!_atexit.enabled)
return 0;
@ -144,7 +144,7 @@ void genode___cxa_finalize(void *dso)
struct atexit_fn fn;
int n = 0;
atexit_lock().lock();
atexit_mutex().acquire();
for (n = _atexit.index; --n >= 0;) {
if (_atexit.fns[n].fn_type == ATEXIT_FN_EMPTY)
continue; /* already been called */
@ -157,7 +157,7 @@ void genode___cxa_finalize(void *dso)
* has already been called.
*/
_atexit.fns[n].fn_type = ATEXIT_FN_EMPTY;
atexit_lock().unlock();
atexit_mutex().release();
/* call the function of correct type */
if (fn.fn_type == ATEXIT_FN_CXA)
@ -165,9 +165,9 @@ void genode___cxa_finalize(void *dso)
else if (fn.fn_type == ATEXIT_FN_STD)
fn.fn_ptr.std_func();
atexit_lock().lock();
atexit_mutex().acquire();
}
atexit_lock().unlock();
atexit_mutex().release();
}

View File

@ -120,7 +120,7 @@ Alarm_timeout_scheduler::Alarm_timeout_scheduler(Time_source &time_source,
Alarm_timeout_scheduler::~Alarm_timeout_scheduler()
{
Lock::Guard lock_guard(_lock);
Mutex::Guard mutex_guard(_mutex);
while (_active_head) {
Alarm *next = _active_head->_next;
_active_head->_alarm_reset();
@ -234,7 +234,7 @@ void Alarm_timeout_scheduler::_alarm_unsynchronized_dequeue(Alarm *alarm)
Timeout::Alarm *Alarm_timeout_scheduler::_alarm_get_pending_alarm()
{
Lock::Guard lock_guard(_lock);
Mutex::Guard mutex_guard(_mutex);
if (!_active_head || !_active_head->_raw.is_pending_at(_now, _now_period)) {
return nullptr; }
@ -244,10 +244,10 @@ Timeout::Alarm *Alarm_timeout_scheduler::_alarm_get_pending_alarm()
_active_head = _active_head->_next;
/*
* Acquire dispatch lock to defer destruction until the call of '_on_alarm'
* Acquire dispatch mutex to defer destruction until the call of '_on_alarm'
* is finished
*/
pending_alarm->_dispatch_lock.lock();
pending_alarm->_dispatch_mutex.acquire();
/* reset alarm object */
pending_alarm->_next = nullptr;
@ -333,12 +333,12 @@ void Alarm_timeout_scheduler::_alarm_handle(Alarm::Time curr_time)
curr->_raw.deadline = deadline;
/* synchronize enqueue operation */
Lock::Guard lock_guard(_lock);
Mutex::Guard mutex_guard(_mutex);
_alarm_unsynchronized_enqueue(curr);
}
/* release alarm, resume concurrent destructor operation */
curr->_dispatch_lock.unlock();
curr->_dispatch_mutex.release();
}
}
@ -362,7 +362,7 @@ void Alarm_timeout_scheduler::_alarm_setup_alarm(Alarm &alarm, Alarm::Time perio
void Alarm_timeout_scheduler::_alarm_schedule_absolute(Alarm *alarm, Alarm::Time duration)
{
Lock::Guard alarm_list_lock_guard(_lock);
Mutex::Guard alarm_list_guard(_mutex);
_alarm_setup_alarm(*alarm, 0, duration);
}
@ -370,7 +370,7 @@ void Alarm_timeout_scheduler::_alarm_schedule_absolute(Alarm *alarm, Alarm::Time
void Alarm_timeout_scheduler::_alarm_schedule(Alarm *alarm, Alarm::Time period)
{
Lock::Guard alarm_list_lock_guard(_lock);
Mutex::Guard alarm_list_guard(_mutex);
/*
* Refuse to schedule a periodic timeout of 0 because it would trigger
@ -392,17 +392,17 @@ void Alarm_timeout_scheduler::_alarm_discard(Alarm *alarm)
{
/*
* Make sure that nobody is inside the '_alarm_get_pending_alarm' when
* grabbing the '_dispatch_lock'. This is important when this function
* is called from the 'Alarm' destructor. Without the '_dispatch_lock',
* we could take the lock and proceed with destruction just before
* '_alarm_get_pending_alarm' tries to grab the lock. When the destructor is
* finished, '_alarm_get_pending_alarm' would proceed with operating on a
* dangling pointer.
* grabbing the '_dispatch_mutex'. This is important when this function
* is called from the 'Alarm' destructor. Without the '_dispatch_mutex',
* we could take the mutex and proceed with destruction just before
* '_alarm_get_pending_alarm' tries to grab the mutex. When the destructor
* is finished, '_alarm_get_pending_alarm' would proceed with operating on
* a dangling pointer.
*/
Lock::Guard alarm_list_lock_guard(_lock);
Mutex::Guard alarm_list_guard(_mutex);
if (alarm) {
Lock::Guard alarm_lock_guard(alarm->_dispatch_lock);
Mutex::Guard alarm_guard(alarm->_dispatch_mutex);
_alarm_unsynchronized_dequeue(alarm);
}
}
@ -410,7 +410,7 @@ void Alarm_timeout_scheduler::_alarm_discard(Alarm *alarm)
bool Alarm_timeout_scheduler::_alarm_next_deadline(Alarm::Time *deadline)
{
Lock::Guard alarm_list_lock_guard(_lock);
Mutex::Guard alarm_list_guard(_mutex);
if (!_active_head) return false;

View File

@ -21,7 +21,7 @@ using namespace Genode::Trace;
void Timer::Connection::_update_real_time()
{
Lock_guard<Lock> lock_guard(_real_time_lock);
Mutex::Guard guard(_real_time_mutex);
/*
@ -145,8 +145,8 @@ Duration Timer::Connection::curr_time()
{
_enable_modern_mode();
Reconstructible<Lock_guard<Lock> > lock_guard(_real_time_lock);
Duration interpolated_time(_real_time);
Reconstructible<Mutex::Guard> mutex_guard(_real_time_mutex);
Duration interpolated_time(_real_time);
/*
* Interpolate with timestamps only if the factor value
@ -158,12 +158,12 @@ Duration Timer::Connection::curr_time()
*/
if (_interpolation_quality == MAX_INTERPOLATION_QUALITY)
{
/* buffer interpolation related members and free the lock */
/* buffer interpolation related members and free the mutex */
Timestamp const ts = _ts;
uint64_t const us_to_ts_factor = _us_to_ts_factor;
unsigned const us_to_ts_factor_shift = _us_to_ts_factor_shift;
lock_guard.destruct();
mutex_guard.destruct();
/* interpolate time difference since the last real time update */
Timestamp const ts_diff = _timestamp() - ts;
@ -177,7 +177,7 @@ Duration Timer::Connection::curr_time()
/* use remote timer instead of timestamps */
interpolated_time.add(Microseconds(elapsed_us() - _us));
lock_guard.destruct();
mutex_guard.destruct();
}
return _update_interpolated_time(interpolated_time);
}

View File

@ -156,11 +156,11 @@ namespace Affinity_test {
{
Genode::Affinity::Location const location;
Genode::uint64_t volatile cnt;
Genode::Lock barrier;
Genode::Blockade barrier { };
void entry() override
{
barrier.unlock();
barrier.wakeup();
Genode::log("Affinity: thread started on CPU ",
location, " spinning...");
@ -170,7 +170,7 @@ namespace Affinity_test {
Spinning_thread(Genode::Env &env, Location location)
: Genode::Thread(env, Name("spinning_thread"), STACK_SIZE, location,
Weight(), env.cpu()),
location(location), cnt(0ULL), barrier(Genode::Lock::LOCKED) {
location(location), cnt(0ULL) {
start(); }
};
@ -192,7 +192,7 @@ namespace Affinity_test {
/* wait until all threads are up and running */
for (unsigned i = 0; i < cpus.total(); i++)
threads[i]->barrier.lock();
threads[i]->barrier.block();
log("Affinity: Threads started on a different CPU each.");
log("Affinity: You may inspect them using the kernel debugger - if you have one.");
@ -254,13 +254,13 @@ namespace Tlb_shootdown_test {
unsigned cpu_idx;
volatile unsigned * values;
Genode::Lock barrier;
Genode::Blockade barrier { };
void entry() override
{
Genode::log("TLB: thread started on CPU ", cpu_idx);
values[cpu_idx] = 1;
barrier.unlock();
barrier.wakeup();
for (; values[cpu_idx] == 1;) ;
@ -271,7 +271,7 @@ namespace Tlb_shootdown_test {
volatile unsigned * values)
: Genode::Thread(env, Name("tlb_thread"), STACK_SIZE, location,
Weight(), env.cpu()),
cpu_idx(idx), values(values), barrier(Genode::Lock::LOCKED) {
cpu_idx(idx), values(values) {
start(); }
/*
@ -302,7 +302,7 @@ namespace Tlb_shootdown_test {
ram_ds->local_addr<volatile unsigned>());
/* wait until all threads are up and running */
for (unsigned i = 1; i < cpus.total(); i++) threads[i]->barrier.lock();
for (unsigned i = 1; i < cpus.total(); i++) threads[i]->barrier.block();
log("TLB: all threads are up and running...");
destroy(heap, ram_ds);

View File

@ -28,18 +28,18 @@ struct Adder
};
struct Pseudo_lock
struct Pseudo_mutex
{
void lock() { log("lock"); }
void unlock() { log("unlock"); }
void acquire() { log("acquire"); }
void release() { log("release"); }
};
struct Main
{
Pseudo_lock lock { };
Adder adder { };
Synced_interface<Adder, Pseudo_lock> synced_adder { lock, &adder };
Pseudo_mutex mutex { };
Adder adder { };
Synced_interface<Adder, Pseudo_mutex> synced_adder { mutex, &adder };
Main(Env &)
{

View File

@ -349,11 +349,11 @@ static void test_create_as_many_threads(Env &env)
struct Lock_helper : Thread
{
Lock &lock;
bool &lock_is_free;
bool unlock;
Blockade &lock;
bool &lock_is_free;
bool unlock;
Lock_helper(Env &env, const char * name, Cpu_session &cpu, Lock &lock,
Lock_helper(Env &env, const char * name, Cpu_session &cpu, Blockade &lock,
bool &lock_is_free, bool unlock = false)
:
Thread(env, name, STACK_SIZE, Thread::Location(), Thread::Weight(),
@ -366,9 +366,9 @@ struct Lock_helper : Thread
log(" thread '", name(), "' started");
if (unlock)
lock.unlock();
lock.wakeup();
lock.lock();
lock.block();
if (!lock_is_free) {
log(" thread '", name(), "' got lock but somebody else is within"
@ -378,13 +378,13 @@ struct Lock_helper : Thread
log(" thread '", name(), "' done");
lock.unlock();
lock.wakeup();
}
};
static void test_locks(Genode::Env &env)
{
Lock lock (Lock::LOCKED);
Blockade lock;
bool lock_is_free = true;
@ -403,7 +403,7 @@ static void test_locks(Genode::Env &env)
l3.start();
l4.start();
lock.lock();
lock.block();
log(" thread '", Thread::myself()->name(), "' - I'm the lock holder - "
"take lock again");
@ -421,11 +421,11 @@ static void test_locks(Genode::Env &env)
for (unsigned volatile i = 0; i < 8000000; ++i) memory_barrier();
log(" spinning done");
lock.lock();
lock.block();
log(" I'm the lock holder - still alive");
lock_is_free = true;
lock.unlock();
lock.wakeup();
/* check that really all threads come back ! */
l1.join();
@ -444,13 +444,13 @@ static void test_locks(Genode::Env &env)
struct Cxa_helper : Thread
{
Lock &in_cxa;
Lock &sync_startup;
int test;
bool sync;
Blockade &in_cxa;
Blockade &sync_startup;
int test;
bool sync;
Cxa_helper(Env &env, const char * name, Cpu_session &cpu, Lock &cxa,
Lock &startup, int test, bool sync = false)
Cxa_helper(Env &env, const char * name, Cpu_session &cpu, Blockade &cxa,
Blockade &startup, int test, bool sync = false)
:
Thread(env, name, STACK_SIZE, Thread::Location(), Thread::Weight(),
cpu),
@ -462,14 +462,14 @@ struct Cxa_helper : Thread
log(" thread '", name(), "' started");
if (sync)
sync_startup.unlock();
sync_startup.wakeup();
struct Contention {
Contention(Name name, Lock &in_cxa, Lock &sync_startup)
Contention(Name name, Blockade &in_cxa, Blockade &sync_startup)
{
log(" thread '", name, "' in static constructor");
sync_startup.unlock();
in_cxa.lock();
sync_startup.wakeup();
in_cxa.block();
}
};
@ -501,16 +501,16 @@ static void test_cxa_guards(Env &env)
{
enum { TEST_1ST = 1 };
Lock in_cxa (Lock::LOCKED);
Lock sync_startup (Lock::LOCKED);
Blockade in_cxa;
Blockade sync_startup;
/* start low priority thread */
Cxa_helper cxa_l(env, "cxa_low", cpu_l, in_cxa, sync_startup, TEST_1ST);
cxa_l.start();
/* wait until low priority thread is inside static variable */
sync_startup.lock();
sync_startup.unlock();
sync_startup.block();
sync_startup.wakeup();
/* start high priority threads */
Cxa_helper cxa_h1(env, "cxa_high_1", env.cpu(), in_cxa, sync_startup,
@ -537,10 +537,10 @@ static void test_cxa_guards(Env &env)
* if the middle priority thread manages to sync with current
* (high priority) entrypoint thread
*/
sync_startup.lock();
sync_startup.block();
/* let's see whether we get all our threads out of the static variable */
in_cxa.unlock();
in_cxa.wakeup();
/* eureka ! */
cxa_h1.join(); cxa_h2.join(); cxa_h3.join(); cxa_h4.join();
@ -551,12 +551,12 @@ static void test_cxa_guards(Env &env)
{
enum { TEST_2ND = 2, TEST_3RD = 3, TEST_4TH = 4 };
Lock in_cxa_2 (Lock::LOCKED);
Lock sync_startup_2 (Lock::LOCKED);
Lock in_cxa_3 (Lock::LOCKED);
Lock sync_startup_3 (Lock::LOCKED);
Lock in_cxa_4 (Lock::LOCKED);
Lock sync_startup_4 (Lock::LOCKED);
Blockade in_cxa_2;
Blockade sync_startup_2;
Blockade in_cxa_3;
Blockade sync_startup_3;
Blockade in_cxa_4;
Blockade sync_startup_4;
/* start low priority threads */
Cxa_helper cxa_l_2(env, "cxa_low_2", cpu_l, in_cxa_2, sync_startup_2,
@ -570,12 +570,12 @@ static void test_cxa_guards(Env &env)
cxa_l_4.start();
/* wait until low priority threads are inside static variables */
sync_startup_2.lock();
sync_startup_2.unlock();
sync_startup_3.lock();
sync_startup_3.unlock();
sync_startup_4.lock();
sync_startup_4.unlock();
sync_startup_2.block();
sync_startup_2.wakeup();
sync_startup_3.block();
sync_startup_3.wakeup();
sync_startup_4.block();
sync_startup_4.wakeup();
/* start high priority threads */
Cxa_helper cxa_h1_2(env, "cxa_high_1_2", env.cpu(), in_cxa_2,
@ -628,14 +628,14 @@ static void test_cxa_guards(Env &env)
* variables, if the middle priority threads manage to sync with
* current (high priority) entrypoint thread
*/
sync_startup_2.lock();
sync_startup_3.lock();
sync_startup_4.lock();
sync_startup_2.block();
sync_startup_3.block();
sync_startup_4.block();
/* let's see whether we get all our threads out of the static variable */
in_cxa_4.unlock();
in_cxa_3.unlock();
in_cxa_2.unlock();
in_cxa_4.wakeup();
in_cxa_3.wakeup();
in_cxa_2.wakeup();
cxa_h1_2.join(); cxa_h2_2.join(); cxa_h3_2.join(); cxa_h4_2.join();
cxa_m_2.join(); cxa_l_2.join();

View File

@ -68,14 +68,14 @@ static l4_timeout_s mus_to_timeout(uint64_t mus)
Microseconds Timer::Time_source::max_timeout() const
{
Genode::Lock::Guard lock_guard(_lock);
Genode::Mutex::Guard lock_guard(_mutex);
return Microseconds(1000 * 1000 * 100);
}
Duration Timer::Time_source::curr_time()
{
Genode::Lock::Guard lock_guard(_lock);
Genode::Mutex::Guard mutex_guard(_mutex);
static Genode::Attached_rom_dataspace kip_ds(_env, "l4v2_kip");
static Fiasco::l4_kernel_info_t * const kip =
kip_ds.local_addr<Fiasco::l4_kernel_info_t>();

View File

@ -21,7 +21,7 @@ using namespace Genode;
void Timer::Time_source::schedule_timeout(Microseconds duration,
Timeout_handler &handler)
{
Genode::Lock::Guard lock_guard(_lock);
Mutex::Guard mutex_guard(_mutex);
Threaded_time_source::handler(handler);
_next_timeout_us = duration.value;
}
@ -31,9 +31,9 @@ void Timer::Time_source::_wait_for_irq()
{
enum { SLEEP_GRANULARITY_US = 1000 };
uint64_t last_time_us = curr_time().trunc_to_plain_us().value;
_lock.lock();
_mutex.acquire();
while (_next_timeout_us > 0) {
_lock.unlock();
_mutex.release();
try { _usleep(SLEEP_GRANULARITY_US); }
catch (Blocking_canceled) { }
@ -42,11 +42,11 @@ void Timer::Time_source::_wait_for_irq()
uint64_t sleep_duration_us = curr_time_us - last_time_us;
last_time_us = curr_time_us;
_lock.lock();
_mutex.acquire();
if (_next_timeout_us >= sleep_duration_us)
_next_timeout_us -= sleep_duration_us;
else
break;
}
_lock.unlock();
_mutex.release();
}

View File

@ -31,9 +31,9 @@ class Timer::Time_source : public Threaded_time_source
Genode::Env &_env;
Genode::Lock mutable _lock { };
uint64_t _curr_time_us = 0;
uint64_t _next_timeout_us = max_timeout().value;
Genode::Mutex mutable _mutex { };
uint64_t _curr_time_us = 0;
uint64_t _next_timeout_us = max_timeout().value;
void _usleep(uint64_t us);

View File

@ -99,7 +99,7 @@ class Timed_semaphore : public Semaphore
*/
bool _abort(Element &element)
{
Genode::Lock::Guard lock_guard(Semaphore::_meta_lock);
Genode::Mutex::Guard lock_guard(Semaphore::_meta_lock);
/* potentially, the queue is empty */
if (++Semaphore::_cnt <= 0) {
@ -119,7 +119,7 @@ class Timed_semaphore : public Semaphore
* Wakeup the thread.
*/
if (&element == e) {
e->wake_up();
e->blockade.wakeup();
return true;
}
@ -198,14 +198,14 @@ class Timed_semaphore : public Semaphore
*/
Alarm::Time down(Alarm::Time t)
{
Semaphore::_meta_lock.lock();
Semaphore::_meta_lock.acquire();
if (--Semaphore::_cnt < 0) {
/* If t==0 we shall not block */
if (t == 0) {
++_cnt;
Semaphore::_meta_lock.unlock();
Semaphore::_meta_lock.release();
throw Nonblocking_exception();
}
@ -215,7 +215,7 @@ class Timed_semaphore : public Semaphore
*/
Element queue_element;
Semaphore::_queue.enqueue(queue_element);
Semaphore::_meta_lock.unlock();
Semaphore::_meta_lock.release();
/* Create the timeout */
Alarm::Time const curr_time = _timeout_ep.time();
@ -227,7 +227,7 @@ class Timed_semaphore : public Semaphore
* waiting for getting waked from another thread
* calling 'up()'
* */
queue_element.block();
queue_element.blockade.block();
/* Deactivate timeout */
_timeout_ep.discard(timeout);
@ -243,7 +243,7 @@ class Timed_semaphore : public Semaphore
return _timeout_ep.time() - timeout.start();
} else {
Semaphore::_meta_lock.unlock();
Semaphore::_meta_lock.release();
}
return 0;
}

View File

@ -178,7 +178,7 @@ class Vcpu : public StaticReceiver<Vcpu>
Vcpu(Genode::Entrypoint &ep,
Genode::Vm_connection &vm_con,
Genode::Allocator &alloc, Genode::Env &env,
Genode::Lock &vcpu_lock, VCpu *unsynchronized_vcpu,
Genode::Mutex &vcpu_mutex, VCpu *unsynchronized_vcpu,
Seoul::Guest_memory &guest_memory, Synced_motherboard &motherboard,
bool vmx, bool svm, bool map_small, bool rdtsc)
:
@ -197,7 +197,7 @@ class Vcpu : public StaticReceiver<Vcpu>
_guest_memory(guest_memory),
_motherboard(motherboard),
_vcpu(vcpu_lock, unsynchronized_vcpu)
_vcpu(vcpu_mutex, unsynchronized_vcpu)
{
if (!_svm && !_vmx)
Logging::panic("no SVM/VMX available, sorry");
@ -764,10 +764,10 @@ class Machine : public StaticReceiver<Machine>
Genode::Heap &_heap;
Genode::Vm_connection &_vm_con;
Clock _clock;
Genode::Lock _motherboard_lock;
Genode::Mutex _motherboard_mutex { };
Motherboard _unsynchronized_motherboard;
Synced_motherboard _motherboard;
Genode::Lock _timeouts_lock { };
Genode::Mutex _timeouts_mutex { };
TimeoutList<32, void> _unsynchronized_timeouts { };
Synced_timeout_list _timeouts;
Seoul::Guest_memory &_guest_memory;
@ -912,7 +912,7 @@ class Machine : public StaticReceiver<Machine>
_vcpus_active.set(_vcpus_up, 1);
Vcpu * vcpu = new Vcpu(*ep, _vm_con, _heap, _env,
_motherboard_lock, msg.vcpu,
_motherboard_mutex, msg.vcpu,
_guest_memory, _motherboard,
has_vmx, has_svm, _map_small,
_rdtsc_exit);
@ -967,11 +967,11 @@ class Machine : public StaticReceiver<Machine>
_unsynchronized_motherboard.bus_console.send(msgcon);
}
_motherboard_lock.unlock();
_motherboard_mutex.release();
_vcpus[vcpu_id]->block();
_motherboard_lock.lock();
_motherboard_mutex.acquire();
if (!_vcpus_active.get(0, 64)) {
MessageConsole msgcon(MessageConsole::Type::TYPE_RESET);
@ -1186,16 +1186,17 @@ class Machine : public StaticReceiver<Machine>
:
_env(env), _heap(heap), _vm_con(vm_con),
_clock(Attached_rom_dataspace(env, "platform_info").xml().sub_node("hardware").sub_node("tsc").attribute_value("freq_khz", 0ULL) * 1000ULL),
_motherboard_lock(Genode::Lock::LOCKED),
_unsynchronized_motherboard(&_clock, nullptr),
_motherboard(_motherboard_lock, &_unsynchronized_motherboard),
_timeouts(_timeouts_lock, &_unsynchronized_timeouts),
_motherboard(_motherboard_mutex, &_unsynchronized_motherboard),
_timeouts(_timeouts_mutex, &_unsynchronized_timeouts),
_guest_memory(guest_memory),
_boot_modules(boot_modules),
_map_small(map_small),
_rdtsc_exit(rdtsc_exit),
_same_cpu(vmm_vcpu_same_cpu)
{
_motherboard_mutex.acquire();
_timeouts()->init();
/* register host operations, called back by the VMM */
@ -1338,7 +1339,7 @@ class Machine : public StaticReceiver<Machine>
Logging::printf("INIT done\n");
_motherboard_lock.unlock();
_motherboard_mutex.release();
}
Synced_motherboard &motherboard() { return _motherboard; }