hw: separate function declaration/implementation

Move kernel object functions from its headers to compilation units,
thereby reducing the kernel's text section, cache, and TLB footprint.

Fix #1492
devel
Stefan Kalkowski 8 years ago committed by Christian Helmuth
parent bc3ba380ae
commit e61a3db30d

@ -45,9 +45,12 @@ SRC_CC += thread_start.cc
SRC_CC += rm_session_support.cc
SRC_CC += pager.cc
SRC_CC += _main.cc
SRC_CC += kernel/cpu_scheduler.cc
SRC_CC += kernel/double_list.cc
SRC_CC += kernel/kernel.cc
SRC_CC += kernel/thread.cc
SRC_CC += kernel/signal_receiver.cc
SRC_CC += kernel/ipc_node.cc
SRC_CC += kernel/irq.cc
SRC_CC += kernel/pd.cc
SRC_CC += kernel/cpu.cc

@ -16,14 +16,12 @@
#define _KERNEL__CPU_H_
/* core includes */
#include <translation_table.h>
#include <timer.h>
#include <cpu.h>
#include <kernel/cpu_scheduler.h>
#include <kernel/irq.h>
/* base includes */
#include <unmanaged_singleton.h>
namespace Genode { class Translation_table; }
namespace Kernel
{
@ -97,7 +95,7 @@ class Kernel::Cpu_domain_update : public Double_list_item
/**
* Domain-update back-end
*/
void _domain_update() { Genode::Cpu::flush_tlb_by_pid(_domain_id); }
void _domain_update();
/**
* Perform the domain update on the executing CPU
@ -106,13 +104,7 @@ class Kernel::Cpu_domain_update : public Double_list_item
protected:
/**
* Constructor
*/
Cpu_domain_update()
{
for (unsigned i = 0; i < NR_OF_CPUS; i++) { _pending[i] = false; }
}
Cpu_domain_update();
/**
* Do an update of domain 'id' on all CPUs and return if this blocks
@ -177,8 +169,7 @@ class Kernel::Cpu_job : public Cpu_share
/**
* Construct a job with scheduling priority 'p' and time quota 'q'
*/
Cpu_job(Cpu_priority const p, unsigned const q)
: Cpu_share(p, q), _cpu(0) { }
Cpu_job(Cpu_priority const p, unsigned const q);
/**
* Destructor
@ -219,7 +210,7 @@ class Kernel::Cpu_idle : public Genode::Cpu::User_context, public Cpu_job
/**
* Main function of all idle threads
*/
static void _main() { while (1) { Genode::Cpu::wait_for_interrupt(); } }
static void _main();
public:
@ -289,11 +280,7 @@ class Kernel::Cpu : public Genode::Cpu,
/**
* Construct object for CPU 'id' with scheduling timer 'timer'
*/
Cpu(unsigned const id, Timer * const timer)
: _id(id), _idle(this), _timer(timer),
_scheduler(&_idle, _quota(), _fill()),
_ipi_irq(*this),
_timer_irq(_timer->interrupt_id(_id), *this) { }
Cpu(unsigned const id, Timer * const timer);
/**
* Raise the IPI of the CPU
@ -306,13 +293,7 @@ class Kernel::Cpu : public Genode::Cpu,
* \param irq_id id of the interrupt that occured
* \returns true if the interrupt belongs to this CPU, otherwise false
*/
bool interrupt(unsigned const irq_id)
{
Irq * const irq = object(irq_id);
if (!irq) return false;
irq->occurred();
return true;
}
bool interrupt(unsigned const irq_id);
/**
* Schedule 'job' at this CPU
@ -322,32 +303,8 @@ class Kernel::Cpu : public Genode::Cpu,
/**
* Handle recent exception of the CPU and proceed its user execution
*/
void exception()
{
/* update old job */
Job * const old_job = scheduled_job();
old_job->exception(_id);
/* update scheduler */
unsigned const old_time = _scheduler.head_quota();
unsigned const new_time = _timer->value(_id);
unsigned quota = old_time > new_time ? old_time - new_time : 1;
_scheduler.update(quota);
/* get new job */
Job * const new_job = scheduled_job();
quota = _scheduler.head_quota();
assert(quota);
_timer->start_one_shot(quota, _id);
/* switch between lazy state of old and new job */
Cpu_lazy_state * const old_state = old_job->lazy_state();
Cpu_lazy_state * const new_state = new_job->lazy_state();
prepare_proceeding(old_state, new_state);
/* resume new job */
new_job->proceed(_id);
}
void exception();
/***************
** Accessors **
@ -372,24 +329,12 @@ class Kernel::Cpu_pool
public:
/**
* Construct pool and thereby objects for all available CPUs
*/
Cpu_pool()
{
for (unsigned id = 0; id < NR_OF_CPUS; id++) {
new (_cpus[id]) Cpu(id, &_timer); }
}
Cpu_pool();
/**
* Return object of CPU 'id'
*/
Cpu * cpu(unsigned const id) const
{
assert(id < NR_OF_CPUS);
char * const p = const_cast<char *>(_cpus[id]);
return reinterpret_cast<Cpu *>(p);
}
Cpu * cpu(unsigned const id) const;
/**
* Return object of primary CPU

@ -16,7 +16,6 @@
/* core includes */
#include <util.h>
#include <assert.h>
#include <kernel/configuration.h>
#include <kernel/double_list.h>
@ -137,109 +136,33 @@ class Kernel::Cpu_scheduler
template <typename T>
static Share * _share(T * const t) { return static_cast<Share *>(t); }
static void _reset(Claim * const c) {
_share(c)->_claim = _share(c)->_quota; }
static void _reset(Claim * const c);
void _reset_claims(unsigned const p)
{
_rcl[p].for_each([&] (Claim * const c) { _reset(c); });
_ucl[p].for_each([&] (Claim * const c) { _reset(c); });
}
void _next_round()
{
_residual = _quota;
_for_each_prio([&] (unsigned const p) { _reset_claims(p); });
}
void _consumed(unsigned const q)
{
if (_residual > q) { _residual -= q; }
else { _next_round(); }
}
void _set_head(Share * const s, unsigned const q, bool const c)
{
_head_quota = q;
_head_claims = c;
_head = s;
}
void _next_fill()
{
_head->_fill = _fill;
_fills.head_to_tail();
}
void _head_claimed(unsigned const r)
{
if (!_head->_quota) { return; }
_head->_claim = r > _head->_quota ? _head->_quota : r;
if (_head->_claim || !_head->_ready) { return; }
_rcl[_head->_prio].to_tail(_head);
}
void _head_filled(unsigned const r)
{
if (_fills.head() != _head) { return; }
if (r) { _head->_fill = r; }
else { _next_fill(); }
}
bool _claim_for_head()
{
for (signed p = Prio::max; p > Prio::min - 1; p--) {
Share * const s = _share(_rcl[p].head());
if (!s) { continue; }
if (!s->_claim) { continue; }
_set_head(s, s->_claim, 1);
return 1;
}
return 0;
}
bool _fill_for_head()
{
Share * const s = _share(_fills.head());
if (!s) { return 0; }
_set_head(s, s->_fill, 0);
return 1;
}
unsigned _trim_consumption(unsigned & q)
{
q = Genode::min(Genode::min(q, _head_quota), _residual);
if (!_head_yields) { return _head_quota - q; }
_head_yields = 0;
return 0;
}
void _reset_claims(unsigned const p);
void _next_round();
void _consumed(unsigned const q);
void _set_head(Share * const s, unsigned const q, bool const c);
void _next_fill();
void _head_claimed(unsigned const r);
void _head_filled(unsigned const r);
bool _claim_for_head();
bool _fill_for_head();
unsigned _trim_consumption(unsigned & q);
/**
* Fill 's' becomes a claim due to a quota donation
*/
void _quota_introduction(Share * const s)
{
if (s->_ready) { _rcl[s->_prio].insert_tail(s); }
else { _ucl[s->_prio].insert_tail(s); }
}
void _quota_introduction(Share * const s);
/**
* Claim 's' looses its state as claim due to quota revokation
*/
void _quota_revokation(Share * const s)
{
if (s->_ready) { _rcl[s->_prio].remove(s); }
else { _ucl[s->_prio].remove(s); }
}
void _quota_revokation(Share * const s);
/**
* The quota of claim 's' changes to 'q'
*/
void _quota_adaption(Share * const s, unsigned const q)
{
if (q) { if (s->_claim > q) { s->_claim = q; } }
else { _quota_revokation(s); }
}
void _quota_adaption(Share * const s, unsigned const q);
public:
@ -251,104 +174,47 @@ class Kernel::Cpu_scheduler
* \param q total amount of time quota that can be claimed by shares
* \param f time-slice length of the fill round-robin
*/
Cpu_scheduler(Share * const i, unsigned const q, unsigned const f)
: _idle(i), _head_yields(0), _quota(q), _residual(q), _fill(f)
{ _set_head(i, f, 0); }
Cpu_scheduler(Share * const i, unsigned const q, unsigned const f);
/**
* Update head according to the consumption of quota 'q'
*/
void update(unsigned q)
{
unsigned const r = _trim_consumption(q);
if (_head_claims) { _head_claimed(r); }
else { _head_filled(r); }
_consumed(q);
if (_claim_for_head()) { return; }
if (_fill_for_head()) { return; }
_set_head(_idle, _fill, 0);
}
void update(unsigned q);
/**
* Set 's1' ready and return wether this outdates current head
*/
bool ready_check(Share * const s1)
{
ready(s1);
Share * s2 = _head;
if (!s1->_claim) { return s2 == _idle; }
if (!_head_claims) { return 1; }
if (s1->_prio != s2->_prio) { return s1->_prio > s2->_prio; }
for (; s2 && s2 != s1; s2 = _share(Claim_list::next(s2))) ;
return !s2;
}
bool ready_check(Share * const s1);
/**
* Set share 's' ready
*/
void ready(Share * const s)
{
assert(!s->_ready && s != _idle);
s->_ready = 1;
s->_fill = _fill;
_fills.insert_tail(s);
if (!s->_quota) { return; }
_ucl[s->_prio].remove(s);
if (s->_claim) { _rcl[s->_prio].insert_head(s); }
else { _rcl[s->_prio].insert_tail(s); }
}
void ready(Share * const s);
/**
* Set share 's' unready
*/
void unready(Share * const s)
{
assert(s->_ready && s != _idle);
s->_ready = 0;
_fills.remove(s);
if (!s->_quota) { return; }
_rcl[s->_prio].remove(s);
_ucl[s->_prio].insert_tail(s);
}
void unready(Share * const s);
/**
* Current head looses its current claim/fill for this round
*/
void yield() { _head_yields = 1; }
void yield();
/**
* Remove share 's' from scheduler
*/
void remove(Share * const s)
{
assert(s != _idle && s != _head);
if (s->_ready) { _fills.remove(s); }
if (!s->_quota) { return; }
if (s->_ready) { _rcl[s->_prio].remove(s); }
else { _ucl[s->_prio].remove(s); }
}
void remove(Share * const s);
/**
* Insert share 's' into scheduler
*/
void insert(Share * const s)
{
assert(!s->_ready);
if (!s->_quota) { return; }
s->_claim = s->_quota;
_ucl[s->_prio].insert_head(s);
}
void insert(Share * const s);
/**
* Set quota of share 's' to 'q'
*/
void quota(Share * const s, unsigned const q)
{
assert(s != _idle);
if (s->_quota) { _quota_adaption(s, q); }
else if (q) { _quota_introduction(s); }
s->_quota = q;
}
void quota(Share * const s, unsigned const q);
/*
* Accessors

@ -51,86 +51,40 @@ class Kernel::Double_list
Item * _head;
Item * _tail;
void _connect_neighbors(Item * const i)
{
i->_prev->_next = i->_next;
i->_next->_prev = i->_prev;
}
void _to_tail(Item * const i)
{
if (i == _tail) { return; }
_connect_neighbors(i);
i->_prev = _tail;
i->_next = 0;
_tail->_next = i;
_tail = i;
}
void _connect_neighbors(Item * const i);
void _to_tail(Item * const i);
public:
/**
* Construct empty list
*/
Double_list(): _head(0), _tail(0) { }
Double_list();
/**
* Move item 'i' from its current list position to the tail
*/
void to_tail(Item * const i)
{
if (i == _head) { head_to_tail(); }
else { _to_tail(i); }
}
void to_tail(Item * const i);
/**
* Insert item 'i' as new tail into list
*/
void insert_tail(Item * const i)
{
if (_tail) { _tail->_next = i; }
else { _head = i; }
i->_prev = _tail;
i->_next = 0;
_tail = i;
}
void insert_tail(Item * const i);
/**
* Insert item 'i' as new head into list
*/
void insert_head(Item * const i)
{
if (_head) { _head->_prev = i; }
else { _tail = i; }
i->_next = _head;
i->_prev = 0;
_head = i;
}
void insert_head(Item * const i);
/**
* Remove item 'i' from list
*/
void remove(Item * const i)
{
if (i == _tail) { _tail = i->_prev; }
else { i->_next->_prev = i->_prev; }
if (i == _head) { _head = i->_next; }
else { i->_prev->_next = i->_next; }
}
void remove(Item * const i);
/**
* Move head item of list to tail position
*/
void head_to_tail()
{
if (!_head || _head == _tail) { return; }
_head->_prev = _tail;
_tail->_next = _head;
_head = _head->_next;
_head->_prev = 0;
_tail = _tail->_next;
_tail->_next = 0;
}
void head_to_tail();
/**
* Call function 'f' of type 'void (Item *)' for each item in the list

@ -16,7 +16,7 @@
/* core includes */
#include <kernel/fifo.h>
#include <assert.h>
#include <kernel/interface.h>
namespace Kernel
{
@ -67,21 +67,7 @@ class Kernel::Ipc_node
/**
* Buffer next request from request queue in 'r' to handle it
*/
void _receive_request(Message_buf * const r)
{
/* FIXME: invalid requests should be discarded */
if (r->size > _inbuf.size) {
PWRN("oversized request");
r->size = _inbuf.size;
}
/* fetch message */
Genode::memcpy(_inbuf.base, r->base, r->size);
_inbuf.size = r->size;
_inbuf.src = r->src;
/* update state */
_state = PREPARE_REPLY;
}
void _receive_request(Message_buf * const r);
/**
* Receive a given reply if one is expected
@ -89,105 +75,42 @@ class Kernel::Ipc_node
* \param base base of the reply payload
* \param size size of the reply payload
*/
void _receive_reply(void * const base, size_t const size)
{
/* FIXME: when discard awaited replies userland must get a hint */
if (size > _inbuf.size) {
PDBG("discard invalid IPC reply");
return;
}
/* receive reply */
Genode::memcpy(_inbuf.base, base, size);
_inbuf.size = size;
/* update state */
if (_state != PREPARE_AND_AWAIT_REPLY) { _state = INACTIVE; }
else { _state = PREPARE_REPLY; }
_send_request_succeeded();
}
void _receive_reply(void * const base, size_t const size);
/**
* Insert 'r' into request queue, buffer it if we were waiting for it
*/
void _announce_request(Message_buf * const r)
{
/* directly receive request if we've awaited it */
if (_state == AWAIT_REQUEST) {
_receive_request(r);
_await_request_succeeded();
return;
}
/* cannot receive yet, so queue request */
_request_queue.enqueue(r);
}
void _announce_request(Message_buf * const r);
/**
* Cancel all requests in request queue
*/
void _cancel_request_queue()
{
while (1) {
Message_buf * const r = _request_queue.dequeue();
if (!r) { return; }
r->src->_outbuf_request_cancelled();
}
}
void _cancel_request_queue();
/**
* Cancel request in outgoing buffer
*/
void _cancel_outbuf_request()
{
if (_outbuf_dst) {
_outbuf_dst->_announced_request_cancelled(&_outbuf);
_outbuf_dst = 0;
}
}
void _cancel_outbuf_request();
/**
* Cancel request in incoming buffer
*/
void _cancel_inbuf_request()
{
if (_inbuf.src) {
_inbuf.src->_outbuf_request_cancelled();
_inbuf.src = 0;
}
}
void _cancel_inbuf_request();
/**
* A request 'r' in inbuf or request queue was cancelled by sender
*/
void _announced_request_cancelled(Message_buf * const r)
{
if (_inbuf.src == r->src) {
_inbuf.src = 0;
return;
}
_request_queue.remove(r);
}
void _announced_request_cancelled(Message_buf * const r);
/**
* The request in the outbuf was cancelled by receiver
*/
void _outbuf_request_cancelled()
{
if (_outbuf_dst) {
_outbuf_dst = 0;
if (!_inbuf.src) { _state = INACTIVE; }
else { _state = PREPARE_REPLY; }
_send_request_failed();
}
}
void _outbuf_request_cancelled();
/**
* Return wether we are the source of a helping relationship
*/
bool _helps_outbuf_dst()
{
return (_state == PREPARE_AND_AWAIT_REPLY ||
_state == AWAIT_REPLY) && _outbuf_dst_help;
}
bool _helps_outbuf_dst();
/**
* IPC node returned from waiting due to reply receipt
@ -221,14 +144,8 @@ class Kernel::Ipc_node
public:
/**
* Constructor
*/
Ipc_node() : _state(INACTIVE)
{
_inbuf.src = 0;
_outbuf_dst = 0;
}
Ipc_node();
~Ipc_node();
/**
* Send a request and wait for the according reply
@ -241,41 +158,12 @@ class Kernel::Ipc_node
*/
void send_request(Ipc_node * const dst, void * const buf_base,
size_t const buf_size, size_t const msg_size,
bool help)
{
/* assertions */
assert(_state == INACTIVE || _state == PREPARE_REPLY);
/* prepare transmission of request message */
_outbuf.base = buf_base;
_outbuf.size = msg_size;
_outbuf.src = this;
_outbuf_dst = dst;
_outbuf_dst_help = 0;
/*
* Prepare reception of reply message but don't clear
* '_inbuf.origin' because we might also prepare a reply.
*/
_inbuf.base = buf_base;
_inbuf.size = buf_size;
/* update state */
if (_state != PREPARE_REPLY) { _state = AWAIT_REPLY; }
else { _state = PREPARE_AND_AWAIT_REPLY; }
/* announce request */
dst->_announce_request(&_outbuf);
/* set help relation after announcement to simplify scheduling */
_outbuf_dst_help = help;
}
bool help);
/**
* Return root destination of the helping-relation tree we are in
*/
Ipc_node * helping_sink() {
return _helps_outbuf_dst() ? _outbuf_dst->helping_sink() : this; }
Ipc_node * helping_sink();
/**
* Call function 'f' of type 'void (Ipc_node *)' for each helper
@ -300,25 +188,7 @@ class Kernel::Ipc_node
* \return wether a request could be received already
*/
bool await_request(void * const buf_base,
size_t const buf_size)
{
/* assertions */
assert(_state == INACTIVE);
/* prepare receipt of request */
_inbuf.base = buf_base;
_inbuf.size = buf_size;
_inbuf.src = 0;
/* if anybody already announced a request receive it */
if (!_request_queue.empty()) {
_receive_request(_request_queue.dequeue());
return true;
}
/* no request announced, so wait */
_state = AWAIT_REQUEST;
return false;
}
size_t const buf_size);
/**
* Reply to last request if there's any
@ -327,51 +197,12 @@ class Kernel::Ipc_node
* \param msg_size size of reply message
*/
void send_reply(void * const msg_base,
size_t const msg_size)
{
/* reply to the last request if we have to */
if (_state == PREPARE_REPLY) {
if (_inbuf.src) {
_inbuf.src->_receive_reply(msg_base, msg_size);
_inbuf.src = 0;
}
_state = INACTIVE;
}
}
/**
* Destructor
*/
~Ipc_node()
{
_cancel_request_queue();
_cancel_inbuf_request();
_cancel_outbuf_request();
}
size_t const msg_size);
/**
* If IPC node waits, cancel '_outbuf' to stop waiting
*/
void cancel_waiting()
{
switch (_state) {
case AWAIT_REPLY:
_cancel_outbuf_request();
_state = INACTIVE;
_send_request_failed();
return;
case AWAIT_REQUEST:
_state = INACTIVE;
_await_request_failed();
return;
case PREPARE_AND_AWAIT_REPLY:
_cancel_outbuf_request();
_state = PREPARE_REPLY;
_send_request_failed();
return;
default: return;
}
}
void cancel_waiting();
};
#endif /* _KERNEL__IPC_NODE_H_ */

@ -0,0 +1,63 @@
/*
* \brief Kernel lock
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2012-11-30
*/
/*
* Copyright (C) 2012-2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _KERNEL__LOCK_H_
#define _KERNEL__LOCK_H_
/* Genode includes */
#include <base/lock_guard.h>
#include <cpu/atomic.h>
#include <cpu/memory_barrier.h>
namespace Kernel
{
/**
* Lock that enables synchronization inside the kernel
*/
class Lock;
Lock & data_lock();
}
class Kernel::Lock
{
private:
int volatile _locked;
public:
Lock() : _locked(0) { }
/**
* Request the lock
*/
void lock() { while (!Genode::cmpxchg(&_locked, 0, 1)); }
/**
* Free the lock
*/
void unlock()
{
Genode::memory_barrier();
_locked = 0;
}
/**
* Provide guard semantic for this type of lock
*/
typedef Genode::Lock_guard<Kernel::Lock> Guard;
};
#endif /* _KERNEL__LOCK_H_ */

@ -15,62 +15,10 @@
#ifndef _KERNEL__PD_H_
#define _KERNEL__PD_H_
/* Genode includes */
#include <cpu/atomic.h>
#include <cpu/memory_barrier.h>
/* core includes */
#include <kernel/early_translations.h>
#include <kernel/object.h>
#include <kernel/cpu.h>
#include <assert.h>
#include <page_slab.h>
/* structure of the mode transition */
extern int _mt_begin;
extern int _mt_end;
extern int _mt_user_entry_pic;
extern Genode::addr_t _mt_client_context_ptr;
extern Genode::addr_t _mt_master_context_begin;
extern Genode::addr_t _mt_master_context_end;
namespace Kernel
{
/**
* Lock that enables synchronization inside the kernel
*/
class Lock;
}
class Kernel::Lock
{
private:
int volatile _locked;
public:
Lock() : _locked(0) { }
/**
* Request the lock
*/
void lock() { while (!Genode::cmpxchg(&_locked, 0, 1)); }
/**
* Free the lock
*/
void unlock()
{
Genode::memory_barrier();
_locked = 0;
}
/**
* Provide guard semantic for this type of lock
*/
typedef Genode::Lock_guard<Kernel::Lock> Guard;
};
namespace Kernel
{
@ -99,8 +47,6 @@ namespace Kernel
typedef Object_pool<Pd> Pd_pool;
Pd_pool * pd_pool();
Lock & data_lock();
}
class Kernel::Mode_transition_control
@ -122,27 +68,17 @@ class Kernel::Mode_transition_control
/**
* Return size of the mode transition
*/
static size_t _size() { return (addr_t)&_mt_end - (addr_t)&_mt_begin; }
static size_t _size();
/**
* Return size of master-context space in the mode transition
*/
static size_t _master_context_size()
{
addr_t const begin = (addr_t)&_mt_master_context_begin;
addr_t const end = (addr_t)&_mt_master_context_end;
return end - begin;
}
static size_t _master_context_size();
/**
* Return virtual address of the user entry-code
*/
static addr_t _virt_user_entry()
{
addr_t const phys = (addr_t)&_mt_user_entry_pic;
addr_t const phys_base = (addr_t)&_mt_begin;
return VIRT_BASE + (phys - phys_base);
}
static addr_t _virt_user_entry();
public:
@ -167,15 +103,7 @@ class Kernel::Mode_transition_control
* \param ram RAM donation for mapping (first try without)
*/
void map(Genode::Translation_table * tt,
Genode::Page_slab * alloc)
{
try {
addr_t const phys_base = (addr_t)&_mt_begin;
tt->insert_translation(VIRT_BASE, phys_base, SIZE,
Page_flags::mode_transition(), alloc);
} catch(...) {
PERR("Inserting exception vector in page table failed!"); }
}
Genode::Page_slab * alloc);
/**
* Continue execution of client context
@ -188,21 +116,7 @@ class Kernel::Mode_transition_control
void switch_to(Cpu::Context * const context,
unsigned const cpu,
addr_t const entry_raw,
addr_t const context_ptr_base)
{
/* override client-context pointer of the executing CPU */
size_t const context_ptr_offset = cpu * sizeof(context);
addr_t const context_ptr = context_ptr_base + context_ptr_offset;
*(void * *)context_ptr = context;
/* unlock kernel data */
data_lock().unlock();
/* call assembly code that applies the virtual-machine context */
typedef void (* Entry)();
Entry __attribute__((noreturn)) const entry = (Entry)entry_raw;
entry();
}
addr_t const context_ptr_base);
/**
* Continue execution of user context
@ -211,11 +125,7 @@ class Kernel::Mode_transition_control
* \param cpu kernel name of targeted CPU
*/
void switch_to_user(Cpu::Context * const context,
unsigned const cpu)
{
switch_to(context, cpu, _virt_user_entry(),
(addr_t)&_mt_client_context_ptr);
}
unsigned const cpu);
} __attribute__((aligned(Mode_transition_control::ALIGN)));

@ -15,7 +15,6 @@
#define _KERNEL__SIGNAL_RECEIVER_H_
/* Genode includes */
#include <util/fifo.h>
#include <base/signal.h>
/* core include */
@ -92,11 +91,6 @@ class Kernel::Signal_handler
Fifo_element _handlers_fe;
Signal_receiver * _receiver;
/**
* Backend for for destructor and cancel_waiting
*/
void _cancel_waiting();
/**
* Let the handler block for signal receipt
*
@ -122,24 +116,13 @@ class Kernel::Signal_handler
public:
/**
* Constructor
*/
Signal_handler()
:
_handlers_fe(this),
_receiver(0)
{ }
/**
* Destructor
*/
virtual ~Signal_handler() { _cancel_waiting(); }
Signal_handler();
virtual ~Signal_handler();
/**
* Stop waiting for a signal receiver
*/
void cancel_waiting() { _cancel_waiting(); }
void cancel_waiting();
};
class Kernel::Signal_context_killer
@ -150,11 +133,6 @@ class Kernel::Signal_context_killer
Signal_context * _context;
/**
* Backend for destructor and cancel_waiting
*/
void _cancel_waiting();
/**
* Notice that the kill operation is pending
*/
@ -180,20 +158,13 @@ class Kernel::Signal_context_killer
public:
/**
* Constructor
*/
Signal_context_killer() : _context(0) { }
/**
* Destructor
*/
virtual ~Signal_context_killer() { _cancel_waiting(); }
Signal_context_killer();
virtual ~Signal_context_killer();
/**
* Stop waiting for a signal context
*/
void cancel_waiting() { _cancel_waiting(); }
void cancel_waiting();
};
class Kernel::Signal_context
@ -239,16 +210,12 @@ class Kernel::Signal_context
/**
* Called by receiver when all submits have been delivered
*/
void _delivered()
{
_submits = 0;
_ack = 0;
}
void _delivered();
/**
* Notice that the killer of the context has cancelled waiting
*/
void _killer_cancelled() { _killer = 0; }
void _killer_cancelled();
public:
@ -272,11 +239,7 @@ class Kernel::Signal_context
*
* \param h handler that shall be attached or 0 to detach handler
*/
void ack_handler(Signal_ack_handler * const h)
{
_ack_handler = h ? h : &_default_ack_handler;
_ack_handler->_signal_context = this;
}
void ack_handler(Signal_ack_handler * const h);
/**
* Submit the signal
@ -286,32 +249,12 @@ class Kernel::Signal_context
* \retval 0 succeeded
* \retval -1 failed
*/
int submit(unsigned const n)
{
if (_killed || _submits >= (unsigned)~0 - n) { return -1; }
_submits += n;
if (_ack) { _deliverable(); }
return 0;
}
int submit(unsigned const n);
/**
* Acknowledge delivery of signal
*/
void ack()
{
_ack_handler->_signal_acknowledged();
if (_ack) { return; }
if (!_killed) {
_ack = 1;
_deliverable();
return;
}
if (_killer) {
_killer->_context = 0;
_killer->_signal_context_kill_done();
_killer = 0;
}
}
void ack();
/**
* Destruct context or prepare to do it as soon as delivery is done
@ -321,25 +264,7 @@ class Kernel::Signal_context
* \retval 0 succeeded
* \retval -1 failed
*/
int kill(Signal_context_killer * const k)
{
/* check if in a kill operation or already killed */
if (_killed) {
if (_ack) { return 0; }
return -1;
}
/* kill directly if there is no unacknowledged delivery */
if (_ack) {
_killed = 1;
return 0;
}
/* wait for delivery acknowledgement */
_killer = k;
_killed = 1;
_killer->_context = this;
_killer->_signal_context_kill_pending();
return 0;
}
int kill(Signal_context_killer * const k);
};
class Kernel::Signal_receiver
@ -361,79 +286,33 @@ class Kernel::Signal_receiver
/**
* Recognize that context 'c' has submits to deliver
*/
void _add_deliverable(Signal_context * const c)
{
if (!c->_deliver_fe.is_enqueued()) {
_deliver.enqueue(&c->_deliver_fe);
}
_listen();
}
void _add_deliverable(Signal_context * const c);
/**
* Deliver as much submits as possible
*/
void _listen()
{
while (1)
{
/* check for deliverable signals and waiting handlers */
if (_deliver.empty() || _handlers.empty()) { return; }
/* create a signal data-object */
typedef Genode::Signal_context * Signal_imprint;
auto const context = _deliver.dequeue()->object();
auto const imprint =
reinterpret_cast<Signal_imprint>(context->_imprint);
Signal::Data data(imprint, context->_submits);
/* communicate signal data to handler */
auto const handler = _handlers.dequeue()->object();
handler->_receiver = 0;
handler->_receive_signal(&data, sizeof(data));
context->_delivered();
}
}
void _listen();
/**
* Notice that a context of the receiver has been destructed
*
* \param c destructed context
*/
void _context_destructed(Signal_context * const c)
{
_contexts.remove(&c->_contexts_fe);
if (!c->_deliver_fe.is_enqueued()) { return; }
_deliver.remove(&c->_deliver_fe);
}
void _context_destructed(Signal_context * const c);
/**
* Notice that handler 'h' has cancelled waiting
*/
void _handler_cancelled(Signal_handler * const h)
{
_handlers.remove(&h->_handlers_fe);
}
void _handler_cancelled(Signal_handler * const h);
/**
* Assign context 'c' to the receiver
*/
void _add_context(Signal_context * const c)
{
_contexts.enqueue(&c->_contexts_fe);
}
void _add_context(Signal_context * const c);
public:
/**
* Destructor
*/
~Signal_receiver()
{
/* destruct all attached contexts */
while (Signal_context * c = _contexts.dequeue()->object()) {
c->~Signal_context();
}
}
~Signal_receiver();
/**
* Let a handler 'h' wait for signals of the receiver
@ -441,20 +320,12 @@ class Kernel::Signal_receiver
* \retval 0 succeeded
* \retval -1 failed
*/
int add_handler(Signal_handler * const h)
{
if (h->_receiver) { return -1; }
_handlers.enqueue(&h->_handlers_fe);
h->_receiver = this;
h->_await_signal(this);
_listen();
return 0;
}
int add_handler(Signal_handler * const h);
/**
* Return wether any of the contexts of this receiver is deliverable
*/
bool deliverable() { return !_deliver.empty(); }
bool deliverable();
};
#endif /* _KERNEL__SIGNAL_RECEIVER_ */

@ -106,8 +106,6 @@ namespace Genode
using Level_1_stage_2_translation_table =
Level_x_translation_table<Level_2_stage_2_translation_table,
STAGE2, SIZE_LOG2_256GB>;
using Translation_table = Level_1_stage_1_translation_table;
}
@ -520,4 +518,7 @@ class Genode::Level_x_translation_table :
this->_range_op(vo, 0, size, Remove_func(slab)); }
};
namespace Genode {
class Translation_table : public Level_1_stage_1_translation_table { }; }
#endif /* _ARM_V7__LONG_TRANSLATION_TABLE_H_ */

@ -66,8 +66,6 @@ namespace Genode
Page_directory<Level_3_translation_table,
SIZE_LOG2_1GB, SIZE_LOG2_512GB>;
using Translation_table = Pml4_table;
/**
* IA-32e common descriptor.
*
@ -683,4 +681,7 @@ class Genode::Pml4_table
}
} __attribute__((aligned(1 << ALIGNM_LOG2)));
namespace Genode {
class Translation_table : public Pml4_table { }; }
#endif /* _TRANSLATION_TABLE_H_ */

@ -64,12 +64,6 @@ namespace Kernel
** Cpu_job **
*************/
Cpu_job::~Cpu_job()
{
if (!_cpu) { return; }
_cpu->scheduler()->remove(this);
}
void Cpu_job::_activate_own_share() { _cpu->schedule(this); }
@ -121,6 +115,17 @@ void Cpu_job::quota(unsigned const q)
}
Cpu_job::Cpu_job(Cpu_priority const p, unsigned const q)
: Cpu_share(p, q), _cpu(0) { }
Cpu_job::~Cpu_job()
{
if (!_cpu) { return; }
_cpu->scheduler()->remove(this);
}
/**************
** Cpu_idle **
**************/
@ -128,6 +133,9 @@ void Cpu_job::quota(unsigned const q)
void Cpu_idle::proceed(unsigned const cpu) { mtc()->switch_to_user(this, cpu); }
void Cpu_idle::_main() { while (1) { Genode::Cpu::wait_for_interrupt(); } }
/*********
** Cpu **
*********/
@ -158,6 +166,50 @@ void Cpu::Ipi::trigger(unsigned const cpu_id)
Cpu::Ipi::Ipi(Irq::Pool &p) : Irq(Pic::IPI, p) { }
bool Cpu::interrupt(unsigned const irq_id)
{
Irq * const irq = object(irq_id);
if (!irq) return false;
irq->occurred();
return true;
}
void Cpu::exception()
{
/* update old job */
Job * const old_job = scheduled_job();
old_job->exception(_id);
/* update scheduler */
unsigned const old_time = _scheduler