hw: helping on IPC

On base-hw, each thread owns exactly one scheduling context for its
whole lifetime. However, introducing helping on IPC, a thread might get
executed on scheduling contexts that it doesn't own. Figuratively
spoken, the IPC-helping relation spans trees between threads. These
trees are identical to those of the IPC relation between threads. The
root of such a tree is executed on all scheduling contexts in the tree.
All other threads in the tree are not executed on any scheduling context
as long as they remain in this position. Consequently, the ready-state
of all scheduling contexts in an IPC-helping tree always equals the
state of the root context.

fix #1102
This commit is contained in:
Martin Stein 2014-12-01 15:10:33 +01:00 committed by Christian Helmuth
parent 6370b6880a
commit d704563453
7 changed files with 131 additions and 35 deletions

View File

@ -109,20 +109,25 @@ class Kernel::Cpu_job : public Cpu_share
void _interrupt(unsigned const id);
/**
* Insert context into the scheduling of this CPU
* Activate our own CPU-share
*/
void _schedule();
void _activate_own_share();
/**
* Remove context from the scheduling of this CPU
* Deactivate our own CPU-share
*/
void _unschedule();
void _deactivate_own_share();
/**
* Yield the currently scheduled CPU share of this context
*/
void _yield();
/**
* Return wether we are allowed to help job 'j' with our CPU-share
*/
bool _helping_possible(Cpu_job * const j) { return j->_cpu == _cpu; }
public:
/**
@ -135,6 +140,11 @@ class Kernel::Cpu_job : public Cpu_share
*/
virtual void proceed(unsigned const id) = 0;
/**
* Return which job currently uses our CPU-share
*/
virtual Cpu_job * helping_sink() = 0;
/**
* Construct a job with scheduling priority 'p' and time quota 'q'
*/
@ -151,6 +161,11 @@ class Kernel::Cpu_job : public Cpu_share
*/
void affinity(Cpu * const cpu);
/**
* Return wether our CPU-share is currently active
*/
bool own_share_active() { return Cpu_share::ready(); }
/***************
** Accessors **
***************/
@ -179,9 +194,11 @@ class Kernel::Cpu_idle : public Genode::Cpu::User_context, public Cpu_job
*/
Cpu_idle(Cpu * const cpu);
/**
* Handle exception that occured during execution on CPU 'cpu'
/*
* Cpu_job interface
*/
void exception(unsigned const cpu)
{
switch (cpu_exception) {
@ -191,10 +208,8 @@ class Kernel::Cpu_idle : public Genode::Cpu::User_context, public Cpu_job
default: assert(0); }
}
/**
* Continue execution on CPU 'cpu_id'
*/
void proceed(unsigned const cpu_id);
Cpu_job * helping_sink() { return this; }
};
class Kernel::Cpu : public Genode::Cpu
@ -211,7 +226,8 @@ class Kernel::Cpu : public Genode::Cpu
unsigned _quota() const { return _timer->ms_to_tics(cpu_quota_ms); }
unsigned _fill() const { return _timer->ms_to_tics(cpu_fill_ms); }
Job * _head() const { return static_cast<Job *>(_scheduler.head()); }
Job * _scheduled_job() const {
return static_cast<Job *>(_scheduler.head())->helping_sink(); }
public:
@ -250,7 +266,7 @@ class Kernel::Cpu : public Genode::Cpu
void exception()
{
/* update old job */
Job * const old_job = _head();
Job * const old_job = _scheduled_job();
old_job->exception(_id);
/* update scheduler */
@ -260,7 +276,7 @@ class Kernel::Cpu : public Genode::Cpu
_scheduler.update(quota);
/* get new job */
Job * const new_job = _head();
Job * const new_job = _scheduled_job();
quota = _scheduler.head_quota();
assert(quota);
_timer->start_one_shot(quota, _id);

View File

@ -99,6 +99,12 @@ class Kernel::Cpu_share : public Cpu_claim, public Cpu_fill
*/
Cpu_share(signed const p, unsigned const q)
: _prio(p), _quota(q), _claim(q), _ready(0) { }
/*
* Accessors
*/
bool ready() const { return _ready; }
};
class Kernel::Cpu_scheduler

View File

@ -61,6 +61,7 @@ class Kernel::Ipc_node
Message_buf _inbuf;
Message_buf _outbuf;
Ipc_node * _outbuf_dst;
bool _outbuf_dst_help;
State _state;
/**
@ -179,6 +180,15 @@ class Kernel::Ipc_node
}
}
/**
* Return wether we are the source of a helping relationship
*/
bool _helps_outbuf_dst()
{
return (_state == PREPARE_AND_AWAIT_REPLY ||
_state == AWAIT_REPLY) && _outbuf_dst_help;
}
/**
* IPC node returned from waiting due to reply receipt
*/
@ -227,23 +237,28 @@ class Kernel::Ipc_node
* \param buf_base base of receive buffer and request message
* \param buf_size size of receive buffer
* \param msg_size size of request message
* \param help wether the request implies a helping relationship
*/
void send_request(Ipc_node * const dst, void * const buf_base,
size_t const buf_size, size_t const msg_size)
size_t const buf_size, size_t const msg_size,
bool help)
{
/* assertions */
assert(_state == INACTIVE || _state == PREPARE_REPLY);
/* prepare transmission of request message */
_outbuf.base = buf_base;
_outbuf.size = msg_size;
_outbuf.src = this;
_outbuf_dst = dst;
_outbuf.base = buf_base;
_outbuf.size = msg_size;
_outbuf.src = this;
_outbuf_dst = dst;
_outbuf_dst_help = 0;
/* prepare reception of reply message */
/*
* Prepare reception of reply message but don't clear
* '_inbuf.origin' because we might also prepare a reply.
*/
_inbuf.base = buf_base;
_inbuf.size = buf_size;
/* don't clear '_inbuf.origin' because we might prepare a reply */
/* update state */
if (_state != PREPARE_REPLY) { _state = AWAIT_REPLY; }
@ -251,6 +266,29 @@ class Kernel::Ipc_node
/* announce request */
dst->_announce_request(&_outbuf);
/* set help relation after announcement to simplify scheduling */
_outbuf_dst_help = help;
}
/**
* Return root destination of the helping-relation tree we are in
*/
Ipc_node * helping_sink() {
return _helps_outbuf_dst() ? _outbuf_dst->helping_sink() : this; }
/**
* Call function 'f' of type 'void (Ipc_node *)' for each helper
*/
template <typename F> void for_each_helper(F f)
{
/* if we have a helper in the receive buffer, call 'f' for it */
if (_state == PREPARE_REPLY || _state == PREPARE_AND_AWAIT_REPLY) {
if (_inbuf.src->_outbuf_dst_help) { f(_inbuf.src); } }
/* call 'f' for each helper in our request queue */
_request_queue.for_each([f] (Message_buf * const b) {
if (b->src->_outbuf_dst_help) { f(b->src); } });
}
/**

View File

@ -44,8 +44,8 @@ class Kernel::Thread
:
public Cpu::User_context,
public Object<Thread, MAX_THREADS, Thread_ids, thread_ids, thread_pool>,
public Cpu_job, public Cpu_domain_update, public Ipc_node,
public Signal_context_killer, public Signal_handler, public Thread_base
public Cpu_domain_update, public Ipc_node, public Signal_context_killer,
public Signal_handler, public Thread_base, public Cpu_job
{
friend class Thread_event;
@ -112,6 +112,16 @@ class Kernel::Thread
*/
void _become_inactive(State const s);
/**
* Activate our CPU-share and those of our helpers
*/
void _activate_used_shares();
/**
* Deactivate our CPU-share and those of our helpers
*/
void _deactivate_used_shares();
/**
* Pause execution
*/
@ -294,6 +304,7 @@ class Kernel::Thread
void exception(unsigned const cpu);
void proceed(unsigned const cpu);
Cpu_job * helping_sink();
/***************

View File

@ -68,8 +68,8 @@ class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
** Vm_session **
****************/
void run() { Cpu_job::_schedule(); }
void pause() { Cpu_job::_unschedule(); }
void run() { Cpu_job::_activate_own_share(); }
void pause() { Cpu_job::_deactivate_own_share(); }
/*************
@ -86,12 +86,13 @@ class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
case Genode::Cpu_state::DATA_ABORT:
_state->dfar = Cpu::Dfar::read();
default:
Cpu_job::_unschedule();
Cpu_job::_deactivate_own_share();
_context->submit(1);
}
}
void proceed(unsigned const cpu) { mtc()->continue_vm(_state, cpu); }
Cpu_job * helping_sink() { return this; }
};
#endif /* _KERNEL__VM_H_ */

View File

@ -65,12 +65,16 @@ namespace Kernel
** Cpu_job **
*************/
Cpu_job::~Cpu_job() { if (_cpu) { _cpu->scheduler()->remove(this); } }
Cpu_job::~Cpu_job()
{
if (!_cpu) { return; }
_cpu->scheduler()->remove(this);
}
void Cpu_job::_schedule() { _cpu->schedule(this); }
void Cpu_job::_activate_own_share() { _cpu->schedule(this); }
void Cpu_job::_unschedule()
void Cpu_job::_deactivate_own_share()
{
assert(_cpu->id() == Cpu::executing_id());
_cpu->scheduler()->unready(this);

View File

@ -74,7 +74,8 @@ void Thread::_send_request_succeeded()
{
assert(_state == AWAITS_IPC);
user_arg_0(0);
_become_active();
_state = ACTIVE;
if (!Cpu_job::own_share_active()) { _activate_used_shares(); }
}
@ -82,7 +83,8 @@ void Thread::_send_request_failed()
{
assert(_state == AWAITS_IPC);
user_arg_0(-1);
_become_active();
_state = ACTIVE;
if (!Cpu_job::own_share_active()) { _activate_used_shares(); }
}
@ -129,18 +131,30 @@ void Thread::_pause()
_become_inactive(AWAITS_RESUME);
}
void Thread::_deactivate_used_shares()
{
Cpu_job::_deactivate_own_share();
Ipc_node::for_each_helper([&] (Ipc_node * const h) {
static_cast<Thread *>(h)->_deactivate_used_shares(); });
}
void Thread::_activate_used_shares()
{
Cpu_job::_activate_own_share();
Ipc_node::for_each_helper([&] (Ipc_node * const h) {
static_cast<Thread *>(h)->_activate_used_shares(); });
}
void Thread::_become_active()
{
if (_state == ACTIVE) { return; }
Cpu_job::_schedule();
if (_state != ACTIVE) { _activate_used_shares(); }
_state = ACTIVE;
}
void Thread::_become_inactive(State const s)
{
if (_state == ACTIVE) { Cpu_job::_unschedule(); }
if (_state == ACTIVE) { _deactivate_used_shares(); }
_state = s;
}
@ -148,7 +162,7 @@ void Thread::_become_inactive(State const s)
Thread::Thread(unsigned const priority, unsigned const quota,
char const * const label)
:
Cpu_job(priority, quota), Thread_base(this), _state(AWAITS_START), _pd(0),
Thread_base(this), Cpu_job(priority, quota), _state(AWAITS_START), _pd(0),
_utcb_phys(0), _signal_receiver(0), _label(label)
{ cpu_exception = RESET; }
@ -181,6 +195,10 @@ void Thread::init(Cpu * const cpu, Pd * const pd,
void Thread::_stop() { _become_inactive(STOPPED); }
Cpu_job * Thread::helping_sink() {
return static_cast<Thread *>(Ipc_node::helping_sink()); }
void Thread::exception(unsigned const cpu)
{
switch (cpu_exception) {
@ -411,11 +429,13 @@ void Thread::_call_send_request_msg()
_become_inactive(AWAITS_IPC);
return;
}
bool const help = Cpu_job::_helping_possible(dst);
void * buf_base;
size_t buf_size, msg_size;
_utcb_phys->message()->request_info(buf_base, buf_size, msg_size);
Ipc_node::send_request(dst, buf_base, buf_size, msg_size);
_become_inactive(AWAITS_IPC);
_state = AWAITS_IPC;
Ipc_node::send_request(dst, buf_base, buf_size, msg_size, help);
if (!help || !dst->own_share_active()) { _deactivate_used_shares(); }
}