hw: clean up interface of Kernel::Thread

ref #874
This commit is contained in:
Martin Stein 2013-10-16 20:53:59 +02:00 committed by Norman Feske
parent ffb26eb501
commit 6b9376bb01
4 changed files with 507 additions and 515 deletions

View File

@ -19,6 +19,7 @@
#include <root/root.h>
/* Core includes */
#include <tlb.h>
#include <platform.h>
#include <platform_thread.h>
#include <address_space.h>

View File

@ -106,25 +106,15 @@ namespace Kernel
static char idle_stack[DEFAULT_STACK_SIZE]
__attribute__((aligned(Cpu::DATA_ACCESS_ALIGNM)));
static Thread idle((Platform_thread *)0);
static bool initial = 1;
if (initial)
{
/* initialize idle thread */
static bool init = 0;
if (!init) {
enum { STACK_SIZE = sizeof(idle_stack)/sizeof(idle_stack[0]) };
void * const ip = (void *)&idle_main;
void * const sp = (void *)&idle_stack[STACK_SIZE];
/*
* Idle doesn't use its UTCB pointer, thus
* utcb_phys = utcb_virt = 0 is save.
* Base-hw doesn't support multiple cores, thus
* cpu_no = 0 is ok. We don't use 'start' to avoid
* recursive call of'cpu_scheduler()'.
*/
idle.prepare_to_start(ip, sp, 0, core_id(), 0, 0, 0);
initial = 0;
idle.init(ip, sp, 0, core_id(), 0, 0, 0, 0);
init = 1;
}
/* create scheduler with a permanent idle thread */
/* create CPU scheduler with a permanent idle thread */
static Cpu_scheduler cpu_sched(&idle);
return &cpu_sched;
}
@ -220,15 +210,17 @@ extern "C" void kernel()
*/
/* create the core main thread */
static Native_utcb cm_utcb;
static char cm_stack[DEFAULT_STACK_SIZE]
__attribute__((aligned(Cpu::DATA_ACCESS_ALIGNM)));
static Thread core_main((Platform_thread *)0);
_main_utcb = &cm_utcb;
enum { CM_STACK_SIZE = sizeof(cm_stack)/sizeof(cm_stack[0]) + 1 };
core_main.start((void *)CORE_MAIN,
(void *)&cm_stack[CM_STACK_SIZE - 1],
0, core_id(), &cm_utcb, &cm_utcb, 1);
{
static Native_utcb utcb;
static char stack[DEFAULT_STACK_SIZE]
__attribute__((aligned(Cpu::DATA_ACCESS_ALIGNM)));
enum { STACK_SIZE = sizeof(stack)/sizeof(stack[0]) + 1 };
void * const ip = (void *)CORE_MAIN;
void * const sp = (void *)&stack[STACK_SIZE - 1];
_main_utcb = &utcb;
static Thread t((Platform_thread *)0);
t.init(ip, sp, 0, core_id(), &utcb, &utcb, 1, 1);
}
/* kernel initialization finished */
reset_lap_time();

View File

@ -17,44 +17,248 @@
/* core includes */
#include <kernel/kernel.h>
#include <kernel/thread.h>
#include <kernel/pd.h>
#include <kernel/vm.h>
#include <platform_thread.h>
#include <platform_pd.h>
using namespace Kernel;
namespace Kernel
typedef Genode::Thread_state Thread_state;
bool Thread::_core() const
{
typedef Genode::Thread_state Thread_state;
return pd_id() == core_id();
}
char const * Kernel::Thread::label()
Kernel::Pd * Thread::_pd() const
{
if (!platform_thread()) {
if (!phys_utcb()) { return "idle"; }
return "core";
return Pd::pool()->object(pd_id());
}
void Thread::_signal_context_kill_pending()
{
assert(_state == SCHEDULED);
_state = AWAITS_SIGNAL_CONTEXT_KILL;
cpu_scheduler()->remove(this);
}
void Thread::_signal_context_kill_done()
{
assert(_state == AWAITS_SIGNAL_CONTEXT_KILL);
user_arg_0(0);
_schedule();
}
void Thread::_signal_receiver_kill_pending()
{
assert(_state == SCHEDULED);
_state = AWAITS_SIGNAL_RECEIVER_KILL;
cpu_scheduler()->remove(this);
}
void Thread::_signal_receiver_kill_done()
{
assert(_state == AWAITS_SIGNAL_RECEIVER_KILL);
user_arg_0(0);
_schedule();
}
void Thread::_await_signal(Signal_receiver * const receiver)
{
cpu_scheduler()->remove(this);
_state = AWAITS_SIGNAL;
_signal_receiver = receiver;
}
void Thread::_receive_signal(void * const base, size_t const size)
{
assert(_state == AWAITS_SIGNAL && size <= _phys_utcb->size());
Genode::memcpy(_phys_utcb->base(), base, size);
_schedule();
}
void Thread::_received_ipc_request(size_t const s)
{
switch (_state) {
case SCHEDULED:
user_arg_0(s);
return;
default:
PERR("wrong thread state to receive IPC");
_stop();
return;
}
return platform_thread()->name();
}
char const * Kernel::Thread::pd_label()
void Thread::_await_ipc()
{
if (core()) { return "core"; }
if (!pd()) { return "?"; }
return pd()->platform_pd()->label();
switch (_state) {
case SCHEDULED:
cpu_scheduler()->remove(this);
_state = AWAITS_IPC;
case AWAITS_PAGER:
return;
default:
PERR("wrong thread state to await IPC");
_stop();
return;
}
}
void Thread::_await_ipc_succeeded(bool const reply, size_t const s)
{
switch (_state) {
case AWAITS_IPC:
/* FIXME: return error codes on all IPC transfers */
if (reply) {
_phys_utcb->ipc_msg_size(s);
user_arg_0(0);
_schedule();
} else {
user_arg_0(s);
_schedule();
}
return;
case AWAITS_PAGER_IPC:
_schedule();
return;
case AWAITS_PAGER:
_state = AWAITS_RESUME;
return;
default:
PERR("wrong thread state to receive IPC");
_stop();
return;
}
}
void Thread::_await_ipc_failed(bool const reply)
{
switch (_state) {
case AWAITS_IPC:
/* FIXME: return error codes on all IPC transfers */
if (reply) {
user_arg_0(-1);
_schedule();
} else {
PERR("failed to receive IPC");
_stop();
}
return;
case SCHEDULED:
PERR("failed to receive IPC");
_stop();
return;
case AWAITS_PAGER_IPC:
PERR("failed to get pagefault resolved");
_stop();
return;
case AWAITS_PAGER:
PERR("failed to get pagefault resolved");
_stop();
return;
default:
PERR("wrong thread state to cancel IPC");
_stop();
return;
}
}
void Thread::_received_irq()
{
assert(_state == AWAITS_IRQ);
_schedule();
}
void Thread::_awaits_irq()
{
cpu_scheduler()->remove(this);
_state = AWAITS_IRQ;
}
int Thread::_resume()
{
switch (_state) {
case AWAITS_RESUME:
_schedule();
return 0;
case AWAITS_PAGER:
_state = AWAITS_PAGER_IPC;
return 0;
case AWAITS_PAGER_IPC:
Ipc_node::cancel_waiting();
return 0;
case SCHEDULED:
return 1;
case AWAITS_IPC:
Ipc_node::cancel_waiting();
return 0;
case AWAITS_IRQ:
Irq_receiver::cancel_waiting();
return 0;
case AWAITS_SIGNAL:
Signal_handler::cancel_waiting();
return 0;
case AWAITS_SIGNAL_CONTEXT_KILL:
Signal_context_killer::cancel_waiting();
return 0;
case AWAITS_SIGNAL_RECEIVER_KILL:
Signal_receiver_killer::cancel_waiting();
return 0;
case AWAITS_START:
case STOPPED:;
}
PERR("failed to resume thread");
return -1;
}
void Thread::_pause()
{
assert(_state == AWAITS_RESUME || _state == SCHEDULED);
cpu_scheduler()->remove(this);
_state = AWAITS_RESUME;
}
void Thread::_schedule()
{
cpu_scheduler()->insert(this);
_state = SCHEDULED;
}
Thread::Thread(Platform_thread * const platform_thread)
:
_platform_thread(platform_thread), _state(AWAITS_START),
_pager(0), _pd_id(0), _phys_utcb(0), _virt_utcb(0),
_signal_receiver(0)
{
if (_platform_thread) { priority = _platform_thread->priority(); }
else { priority = Kernel::Priority::MAX; }
}
void
Kernel::Thread::prepare_to_start(void * const ip,
void * const sp,
unsigned const cpu_id,
unsigned const pd_id,
Native_utcb * const utcb_phys,
Native_utcb * const utcb_virt,
bool const main)
Thread::init(void * const ip, void * const sp, unsigned const cpu_id,
unsigned const pd_id, Native_utcb * const utcb_phys,
Native_utcb * const utcb_virt, bool const main,
bool const start)
{
assert(_state == AWAITS_START)
@ -66,34 +270,109 @@ Kernel::Thread::prepare_to_start(void * const ip,
_virt_utcb = utcb_virt;
_pd_id = pd_id;
/* join a protection domain */
/* join protection domain */
Pd * const pd = Pd::pool()->object(_pd_id);
assert(pd);
addr_t const tlb = pd->tlb()->base();
/* initialize CPU context */
User_context * const c = static_cast<User_context *>(this);
bool const core = (_pd_id == core_id());
if (!main) { c->init_thread(ip, sp, tlb, pd_id); }
else if (!core) { c->init_main_thread(ip, utcb_virt, tlb, pd_id); }
else if (!_core()) { c->init_main_thread(ip, utcb_virt, tlb, pd_id); }
else { c->init_core_main_thread(ip, sp, tlb, pd_id); }
/* print log message */
if (START_VERBOSE) {
PINF("in program %u '%s' start thread %u '%s'",
this->pd_id(), pd_label(), id(), label());
this->pd_id(), pd_label(), id(), label());
}
/* start execution */
if (start) { _schedule(); }
}
void Thread::_stop()
{
if (_state == SCHEDULED) { cpu_scheduler()->remove(this); }
_state = STOPPED;
}
void Thread::handle_exception()
{
switch (cpu_exception) {
case SUPERVISOR_CALL:
_syscall();
return;
case PREFETCH_ABORT:
_mmu_exception();
return;
case DATA_ABORT:
_mmu_exception();
return;
case INTERRUPT_REQUEST:
handle_interrupt();
return;
case FAST_INTERRUPT_REQUEST:
handle_interrupt();
return;
default:
PERR("unknown exception");
_stop();
reset_lap_time();
}
}
Kernel::Thread::Thread(Platform_thread * const platform_thread)
:
_platform_thread(platform_thread), _state(AWAITS_START),
_pager(0), _pd_id(0), _phys_utcb(0), _virt_utcb(0),
_signal_receiver(0)
void Thread::_receive_yielded_cpu()
{
if (_platform_thread) { priority = _platform_thread->priority(); }
else { priority = Kernel::Priority::MAX; }
if (_state == AWAITS_RESUME) { _schedule(); }
else { PERR("failed to receive yielded CPU"); }
}
void Thread::proceed()
{
mtc()->continue_user(static_cast<Cpu::Context *>(this));
}
void Thread::_mmu_exception()
{
/* pause thread */
cpu_scheduler()->remove(this);
_state = AWAITS_PAGER;
/* check out cause and attributes */
addr_t va = 0;
bool w = 0;
if (!pagefault(va, w)) {
PERR("unknown MMU exception");
return;
}
/* inform pager */
_pagefault = Pagefault(id(), (Tlb *)tlb(), ip, va, w);
void * const base = &_pagefault;
size_t const size = sizeof(_pagefault);
Ipc_node::send_request_await_reply(_pager, base, size, base, size);
}
char const * Kernel::Thread::label() const
{
if (!platform_thread()) {
if (!_phys_utcb) { return "idle"; }
return "core";
}
return platform_thread()->name();
}
char const * Kernel::Thread::pd_label() const
{
if (_core()) { return "core"; }
if (!_pd()) { return "?"; }
return _pd()->platform_pd()->label();
}
@ -103,7 +382,7 @@ Kernel::Thread::Thread(Platform_thread * const platform_thread)
void Thread::_syscall_new_pd()
{
/* check permissions */
if (pd_id() != core_id()) {
if (!_core()) {
PERR("not entitled to create protection domain");
user_arg_0(0);
return;
@ -123,7 +402,7 @@ void Thread::_syscall_new_pd()
void Thread::_syscall_kill_pd()
{
/* check permissions */
if (pd_id() != core_id()) {
if (!_core()) {
PERR("not entitled to destruct protection domain");
user_arg_0(-1);
return;
@ -153,7 +432,7 @@ void Thread::_syscall_kill_pd()
void Thread::_syscall_new_thread()
{
/* check permissions */
assert(pd_id() == core_id());
assert(_core());
/* dispatch arguments */
Syscall_arg const arg1 = user_arg_1();
@ -173,7 +452,7 @@ void Thread::_syscall_new_thread()
void Thread::_syscall_delete_thread()
{
/* check permissions */
assert(pd_id() == core_id());
assert(_core());
/* get targeted thread */
unsigned thread_id = (unsigned)user_arg_1();
@ -190,7 +469,7 @@ void Thread::_syscall_delete_thread()
void Thread::_syscall_start_thread()
{
/* check permissions */
assert(pd_id() == core_id());
assert(_core());
/* dispatch arguments */
Platform_thread * pt = (Platform_thread *)user_arg_1();
@ -206,7 +485,8 @@ void Thread::_syscall_start_thread()
unsigned const pd_id = pt->pd_id();
Native_utcb * const utcb_p = pt->phys_utcb();
Native_utcb * const utcb_v = pt->virt_utcb();
t->start(ip, sp, cpu_id, pd_id, utcb_p, utcb_v, pt->main_thread());
bool const main = pt->main_thread();
t->init(ip, sp, cpu_id, pd_id, utcb_p, utcb_v, main, 1);
/* return software TLB that the thread is assigned to */
Pd::Pool * const pp = Pd::pool();
@ -225,17 +505,17 @@ void Thread::_syscall_pause_thread()
/* shortcut for a thread to pause itself */
if (!tid) {
pause();
_pause();
user_arg_0(0);
return;
}
/* get targeted thread and check permissions */
Thread * const t = Thread::pool()->object(tid);
assert(t && (pd_id() == core_id() || this == t));
assert(t && (_core() || this == t));
/* pause targeted thread */
t->pause();
t->_pause();
user_arg_0(0);
}
@ -253,13 +533,13 @@ void Thread::_syscall_resume_thread()
return;
}
/* check permissions */
if (pd_id() != core_id() && pd_id() != t->pd_id()) {
if (!_core() && pd_id() != t->pd_id()) {
PERR("not entitled to resume thread");
user_arg_0(-1);
return;
}
/* resume targeted thread */
user_arg_0(t->resume());
user_arg_0(t->_resume());
}
@ -276,14 +556,14 @@ void Thread::_syscall_resume_faulter()
return;
}
/* check permissions */
if (pd_id() != core_id() && pd_id() != t->pd_id()) {
if (!_core() && pd_id() != t->pd_id()) {
PERR("not entitled to resume thread");
user_arg_0(-1);
return;
}
/* writeback translation table and resume faulter */
Cpu::tlb_insertions();
t->resume();
t->_resume();
}
@ -293,7 +573,7 @@ void Thread::_syscall_resume_faulter()
void Thread::_syscall_yield_thread()
{
Thread * const t = Thread::pool()->object(user_arg_1());
if (t) { t->receive_yielded_cpu(); }
if (t) { t->_receive_yielded_cpu(); }
cpu_scheduler()->yield();
}
@ -311,7 +591,7 @@ void Thread::_syscall_current_thread_id()
void Thread::_syscall_get_thread()
{
/* check permissions */
if (pd_id() != core_id()) {
if (!_core()) {
PERR("not entitled to read address of platform thread");
user_arg_0(0);
return;
@ -335,7 +615,7 @@ void Thread::_syscall_get_thread()
*/
void Thread::_syscall_wait_for_request()
{
wait_for_request();
Ipc_node::await_request(_phys_utcb->base(), _phys_utcb->size());
}
@ -344,20 +624,30 @@ void Thread::_syscall_wait_for_request()
*/
void Thread::_syscall_request_and_wait()
{
/* get IPC receiver */
Thread * const t = Thread::pool()->object(user_arg_1());
assert(t);
Thread * const dst = Thread::pool()->object(user_arg_1());
size_t const msg_size = (size_t)user_arg_2();
assert(dst);
/* do IPC */
request_and_wait(t, (size_t)user_arg_2());
Ipc_node::send_request_await_reply(
dst, _phys_utcb->base(), msg_size,
_phys_utcb->ipc_msg_base(),
_phys_utcb->max_ipc_msg_size());
}
/**
* Do specific syscall for this thread, for details see 'syscall.h'
*/
void Thread::_syscall_reply() {
reply((size_t)user_arg_1(), (bool)user_arg_2()); }
void Thread::_syscall_reply()
{
size_t const msg_size = user_arg_1();
bool const await_request = user_arg_2();
Ipc_node::send_reply(_phys_utcb->base(), msg_size);
if (await_request) {
Ipc_node::await_request(_phys_utcb->base(), _phys_utcb->size());
} else { user_arg_0(0); }
}
/**
@ -366,7 +656,7 @@ void Thread::_syscall_reply() {
void Thread::_syscall_set_pager()
{
/* check permissions */
if (pd_id() != core_id()) {
if (!_core()) {
PERR("not entitled to set pager");
return;
}
@ -388,7 +678,7 @@ void Thread::_syscall_set_pager()
*/
void Thread::_syscall_update_pd()
{
assert(pd_id() == core_id());
assert(_core());
Cpu::flush_tlb_by_pid(user_arg_1());
}
@ -398,11 +688,11 @@ void Thread::_syscall_update_pd()
*/
void Thread::_syscall_update_region()
{
assert(pd_id() == core_id());
assert(_core());
/* FIXME we don't handle instruction caches by now */
Cpu::flush_data_cache_by_virt_region((addr_t)user_arg_1(),
(size_t)user_arg_2());
(size_t)user_arg_2());
}
@ -411,7 +701,7 @@ void Thread::_syscall_update_region()
*/
void Thread::_syscall_allocate_irq()
{
assert(pd_id() == core_id());
assert(_core());
unsigned irq = user_arg_1();
user_arg_0(allocate_irq(irq));
}
@ -422,7 +712,7 @@ void Thread::_syscall_allocate_irq()
*/
void Thread::_syscall_free_irq()
{
assert(pd_id() == core_id());
assert(_core());
unsigned irq = user_arg_1();
user_arg_0(free_irq(irq));
}
@ -433,7 +723,7 @@ void Thread::_syscall_free_irq()
*/
void Thread::_syscall_await_irq()
{
assert(pd_id() == core_id());
assert(_core());
await_irq();
}
@ -452,10 +742,10 @@ void Thread::_syscall_print_char()
*/
void Thread::_syscall_read_thread_state()
{
assert(pd_id() == core_id());
assert(_core());
Thread * const t = Thread::pool()->object(user_arg_1());
if (!t) PDBG("Targeted thread unknown");
Thread_state * const ts = (Thread_state *)phys_utcb()->base();
Thread_state * const ts = (Thread_state *)_phys_utcb->base();
t->Cpu::Context::read_cpu_state(ts);
}
@ -465,10 +755,10 @@ void Thread::_syscall_read_thread_state()
*/
void Thread::_syscall_write_thread_state()
{
assert(pd_id() == core_id());
assert(_core());
Thread * const t = Thread::pool()->object(user_arg_1());
if (!t) PDBG("Targeted thread unknown");
Thread_state * const ts = (Thread_state *)phys_utcb()->base();
Thread_state * const ts = (Thread_state *)_phys_utcb->base();
t->Cpu::Context::write_cpu_state(ts);
}
@ -479,7 +769,7 @@ void Thread::_syscall_write_thread_state()
void Thread::_syscall_new_signal_receiver()
{
/* check permissions */
if (pd_id() != core_id()) {
if (!_core()) {
PERR("not entitled to create signal receiver");
user_arg_0(0);
return;
@ -497,7 +787,7 @@ void Thread::_syscall_new_signal_receiver()
void Thread::_syscall_new_signal_context()
{
/* check permissions */
if (pd_id() != core_id()) {
if (!_core()) {
PERR("not entitled to create signal context");
user_arg_0(0);
return;
@ -611,7 +901,7 @@ void Thread::_syscall_ack_signal()
void Thread::_syscall_kill_signal_context()
{
/* check permissions */
if (pd_id() != core_id()) {
if (!_core()) {
PERR("not entitled to kill signal context");
user_arg_0(-1);
return;
@ -640,7 +930,7 @@ void Thread::_syscall_kill_signal_context()
void Thread::_syscall_kill_signal_receiver()
{
/* check permissions */
if (pd_id() != core_id()) {
if (!_core()) {
PERR("not entitled to kill signal receiver");
user_arg_0(-1);
return;
@ -669,7 +959,7 @@ void Thread::_syscall_kill_signal_receiver()
void Thread::_syscall_new_vm()
{
/* check permissions */
assert(pd_id() == core_id());
assert(_core());
/* dispatch arguments */
void * const allocator = (void * const)user_arg_1();
@ -693,7 +983,7 @@ void Thread::_syscall_new_vm()
void Thread::_syscall_run_vm()
{
/* check permissions */
assert(pd_id() == core_id());
assert(_core());
/* get targeted vm via its id */
Vm * const vm = Vm::pool()->object(user_arg_1());
@ -710,7 +1000,7 @@ void Thread::_syscall_run_vm()
void Thread::_syscall_pause_vm()
{
/* check permissions */
assert(pd_id() == core_id());
assert(_core());
/* get targeted vm via its id */
Vm * const vm = Vm::pool()->object(user_arg_1());
@ -728,43 +1018,43 @@ void Thread::_syscall()
{
switch (user_arg_0())
{
case NEW_THREAD : _syscall_new_thread(); return;
case DELETE_THREAD : _syscall_delete_thread(); return;
case START_THREAD : _syscall_start_thread(); return;
case PAUSE_THREAD : _syscall_pause_thread(); return;
case RESUME_THREAD : _syscall_resume_thread(); return;
case RESUME_FAULTER : _syscall_resume_faulter(); return;
case GET_THREAD : _syscall_get_thread(); return;
case CURRENT_THREAD_ID : _syscall_current_thread_id(); return;
case YIELD_THREAD : _syscall_yield_thread(); return;
case READ_THREAD_STATE : _syscall_read_thread_state(); return;
case WRITE_THREAD_STATE : _syscall_write_thread_state(); return;
case REQUEST_AND_WAIT : _syscall_request_and_wait(); return;
case REPLY : _syscall_reply(); return;
case WAIT_FOR_REQUEST : _syscall_wait_for_request(); return;
case SET_PAGER : _syscall_set_pager(); return;
case UPDATE_PD : _syscall_update_pd(); return;
case UPDATE_REGION : _syscall_update_region(); return;
case NEW_PD : _syscall_new_pd(); return;
case ALLOCATE_IRQ : _syscall_allocate_irq(); return;
case AWAIT_IRQ : _syscall_await_irq(); return;
case FREE_IRQ : _syscall_free_irq(); return;
case PRINT_CHAR : _syscall_print_char(); return;
case NEW_SIGNAL_RECEIVER : _syscall_new_signal_receiver(); return;
case NEW_SIGNAL_CONTEXT : _syscall_new_signal_context(); return;
case KILL_SIGNAL_CONTEXT : _syscall_kill_signal_context(); return;
case KILL_SIGNAL_RECEIVER : _syscall_kill_signal_receiver(); return;
case AWAIT_SIGNAL : _syscall_await_signal(); return;
case SUBMIT_SIGNAL : _syscall_submit_signal(); return;
case SIGNAL_PENDING : _syscall_signal_pending(); return;
case ACK_SIGNAL : _syscall_ack_signal(); return;
case NEW_VM : _syscall_new_vm(); return;
case RUN_VM : _syscall_run_vm(); return;
case PAUSE_VM : _syscall_pause_vm(); return;
case KILL_PD : _syscall_kill_pd(); return;
case NEW_THREAD: _syscall_new_thread(); return;
case DELETE_THREAD: _syscall_delete_thread(); return;
case START_THREAD: _syscall_start_thread(); return;
case PAUSE_THREAD: _syscall_pause_thread(); return;
case RESUME_THREAD: _syscall_resume_thread(); return;
case RESUME_FAULTER: _syscall_resume_faulter(); return;
case GET_THREAD: _syscall_get_thread(); return;
case CURRENT_THREAD_ID: _syscall_current_thread_id(); return;
case YIELD_THREAD: _syscall_yield_thread(); return;
case READ_THREAD_STATE: _syscall_read_thread_state(); return;
case WRITE_THREAD_STATE: _syscall_write_thread_state(); return;
case REQUEST_AND_WAIT: _syscall_request_and_wait(); return;
case REPLY: _syscall_reply(); return;
case WAIT_FOR_REQUEST: _syscall_wait_for_request(); return;
case SET_PAGER: _syscall_set_pager(); return;
case UPDATE_PD: _syscall_update_pd(); return;
case UPDATE_REGION: _syscall_update_region(); return;
case NEW_PD: _syscall_new_pd(); return;
case ALLOCATE_IRQ: _syscall_allocate_irq(); return;
case AWAIT_IRQ: _syscall_await_irq(); return;
case FREE_IRQ: _syscall_free_irq(); return;
case PRINT_CHAR: _syscall_print_char(); return;
case NEW_SIGNAL_RECEIVER: _syscall_new_signal_receiver(); return;
case NEW_SIGNAL_CONTEXT: _syscall_new_signal_context(); return;
case KILL_SIGNAL_CONTEXT: _syscall_kill_signal_context(); return;
case KILL_SIGNAL_RECEIVER: _syscall_kill_signal_receiver(); return;
case AWAIT_SIGNAL: _syscall_await_signal(); return;
case SUBMIT_SIGNAL: _syscall_submit_signal(); return;
case SIGNAL_PENDING: _syscall_signal_pending(); return;
case ACK_SIGNAL: _syscall_ack_signal(); return;
case NEW_VM: _syscall_new_vm(); return;
case RUN_VM: _syscall_run_vm(); return;
case PAUSE_VM: _syscall_pause_vm(); return;
case KILL_PD: _syscall_kill_pd(); return;
default:
PERR("invalid syscall");
stop();
_stop();
reset_lap_time();
}
}

View File

@ -11,8 +11,8 @@
* under the terms of the GNU General Public License version 2.
*/
#ifndef _CORE__KERNEL__THREAD_H_
#define _CORE__KERNEL__THREAD_H_
#ifndef _KERNEL__THREAD_H_
#define _KERNEL__THREAD_H_
/* core includes */
#include <kernel/configuration.h>
@ -20,7 +20,6 @@
#include <kernel/signal_receiver.h>
#include <kernel/ipc_node.h>
#include <kernel/irq_receiver.h>
#include <kernel/pd.h>
#include <cpu.h>
namespace Genode
@ -31,14 +30,13 @@ namespace Genode
namespace Kernel
{
class Thread;
class Pd;
typedef Genode::Cpu Cpu;
typedef Genode::Pagefault Pagefault;
typedef Genode::Native_utcb Native_utcb;
unsigned core_id();
void handle_interrupt(void);
void reset_lap_time();
void reset_lap_time();
/**
* Kernel backend for userland execution-contexts
@ -92,179 +90,48 @@ class Kernel::Thread
Signal_receiver * _signal_receiver;
/**
* Resume execution
* Notice that another thread yielded the CPU to this thread
*/
void _schedule()
{
cpu_scheduler()->insert(this);
_state = SCHEDULED;
}
void _receive_yielded_cpu();
/**
* Return kernel backend of protection domain the thread is in
*/
Pd * _pd() const;
/***************************
** Signal_context_killer **
***************************/
/**
* Return wether this is a core thread
*/
bool _core() const;
void _signal_context_kill_pending()
{
assert(_state == SCHEDULED);
_state = AWAITS_SIGNAL_CONTEXT_KILL;
cpu_scheduler()->remove(this);
}
/**
* Resume execution rawly
*/
void _schedule();
void _signal_context_kill_done()
{
assert(_state == AWAITS_SIGNAL_CONTEXT_KILL);
user_arg_0(0);
_schedule();
}
/**
* Pause execution
*/
void _pause();
/**
* Suspend unrecoverably from execution
*/
void _stop();
/****************************
** Signal_receiver_killer **
****************************/
/**
* Try to escape from blocking state, if in any, and resume execution
*
* \retval -1 failed
* \retval 0 succeeded, execution was paused
* \retval 1 succeeded, execution was not paused
*/
int _resume();
void _signal_receiver_kill_pending()
{
assert(_state == SCHEDULED);
_state = AWAITS_SIGNAL_RECEIVER_KILL;
cpu_scheduler()->remove(this);
}
void _signal_receiver_kill_done()
{
assert(_state == AWAITS_SIGNAL_RECEIVER_KILL);
user_arg_0(0);
_schedule();
}
/********************
** Signal_handler **
********************/
void _await_signal(Signal_receiver * const receiver)
{
cpu_scheduler()->remove(this);
_state = AWAITS_SIGNAL;
_signal_receiver = receiver;
}
void _receive_signal(void * const base, size_t const size)
{
assert(_state == AWAITS_SIGNAL && size <= phys_utcb()->size());
Genode::memcpy(phys_utcb()->base(), base, size);
_schedule();
}
/**************
** Ipc_node **
**************/
void _received_ipc_request(size_t const s)
{
switch (_state) {
case SCHEDULED:
user_arg_0(s);
return;
default:
PERR("wrong thread state to receive IPC");
stop();
return;
}
}
void _await_ipc()
{
switch (_state) {
case SCHEDULED:
cpu_scheduler()->remove(this);
_state = AWAITS_IPC;
case AWAITS_PAGER:
return;
default:
PERR("wrong thread state to await IPC");
stop();
return;
}
}
void _await_ipc_succeeded(bool const reply, size_t const s)
{
switch (_state) {
case AWAITS_IPC:
/* FIXME: return error codes on all IPC transfers */
if (reply) {
phys_utcb()->ipc_msg_size(s);
user_arg_0(0);
_schedule();
} else {
user_arg_0(s);
_schedule();
}
return;
case AWAITS_PAGER_IPC:
_schedule();
return;
case AWAITS_PAGER:
_state = AWAITS_RESUME;
return;
default:
PERR("wrong thread state to receive IPC");
stop();
return;
}
}
void _await_ipc_failed(bool const reply)
{
switch (_state) {
case AWAITS_IPC:
/* FIXME: return error codes on all IPC transfers */
if (reply) {
user_arg_0(-1);
_schedule();
} else {
PERR("failed to receive IPC");
stop();
}
return;
case SCHEDULED:
PERR("failed to receive IPC");
stop();
return;
case AWAITS_PAGER_IPC:
PERR("failed to get pagefault resolved");
stop();
return;
case AWAITS_PAGER:
PERR("failed to get pagefault resolved");
stop();
return;
default:
PERR("wrong thread state to cancel IPC");
stop();
return;
}
}
/***************
** Irq_owner **
***************/
void _received_irq()
{
assert(_state == AWAITS_IRQ);
_schedule();
}
void _awaits_irq()
{
cpu_scheduler()->remove(this);
_state = AWAITS_IRQ;
}
/**
* Handle an exception thrown by the memory management unit
*/
void _mmu_exception();
/**
* Handle syscall request of this thread
@ -311,44 +178,57 @@ class Kernel::Thread
void _syscall_run_vm();
void _syscall_pause_vm();
/***************************
** Signal_context_killer **
***************************/
void _signal_context_kill_pending();
void _signal_context_kill_done();
/****************************
** Signal_receiver_killer **
****************************/
void _signal_receiver_kill_pending();
void _signal_receiver_kill_done();
/********************
** Signal_handler **
********************/
void _await_signal(Signal_receiver * const receiver);
void _receive_signal(void * const base, size_t const size);
/**************
** Ipc_node **
**************/
void _received_ipc_request(size_t const s);
void _await_ipc();
void _await_ipc_succeeded(bool const reply, size_t const s);
void _await_ipc_failed(bool const reply);
/***************
** Irq_owner **
***************/
void _received_irq();
void _awaits_irq();
public:
/**
* Constructor
*
* \param platform_thread userland backend of execution context
* \param platform_thread corresponding userland object
*/
Thread(Platform_thread * const platform_thread);
/**
* Return wether the thread is a core thread
*/
bool core() { return pd_id() == core_id(); }
/**
* Return kernel backend of protection domain the thread is in
*/
Pd * pd() { return Pd::pool()->object(pd_id()); }
/**
* Return user label of the thread
*/
char const * label();
/**
* return user label of the protection domain the thread is in
*/
char const * pd_label();
/**
* Suspend the thread unrecoverably
*/
void stop()
{
if (_state == SCHEDULED) { cpu_scheduler()->remove(this); }
_state = STOPPED;
}
/**
* Prepare thread to get scheduled the first time
*
@ -359,190 +239,20 @@ class Kernel::Thread
* \param utcb_phys physical UTCB pointer
* \param utcb_virt virtual UTCB pointer
* \param main wether the thread is the first one in its PD
* \param start wether to start execution
*/
void prepare_to_start(void * const ip,
void * const sp,
unsigned const cpu_id,
unsigned const pd_id,
Native_utcb * const utcb_phys,
Native_utcb * const utcb_virt,
bool const main);
/**
* Start this thread
*
* \param ip initial instruction pointer
* \param sp initial stack pointer
* \param cpu_id target cpu
* \param pd_id target protection-domain
* \param utcb_p physical UTCB pointer
* \param utcb_v virtual UTCB pointer
* \param main wether the thread is the first one in its PD
*/
void start(void * const ip,
void * const sp,
unsigned const cpu_id,
unsigned const pd_id,
Native_utcb * const utcb_p,
Native_utcb * const utcb_v,
bool const main)
{
prepare_to_start(ip, sp, cpu_id, pd_id, utcb_p, utcb_v, main);
_schedule();
}
/**
* Pause this thread
*/
void pause()
{
assert(_state == AWAITS_RESUME || _state == SCHEDULED);
cpu_scheduler()->remove(this);
_state = AWAITS_RESUME;
}
/**
* Resume this thread
*/
int resume()
{
switch (_state) {
case AWAITS_RESUME:
_schedule();
return 0;
case AWAITS_PAGER:
_state = AWAITS_PAGER_IPC;
return 0;
case AWAITS_PAGER_IPC:
Ipc_node::cancel_waiting();
return 0;
case SCHEDULED:
return 1;
case AWAITS_IPC:
Ipc_node::cancel_waiting();
return 0;
case AWAITS_IRQ:
Irq_receiver::cancel_waiting();
return 0;
case AWAITS_SIGNAL:
Signal_handler::cancel_waiting();
return 0;
case AWAITS_SIGNAL_CONTEXT_KILL:
Signal_context_killer::cancel_waiting();
return 0;
case AWAITS_SIGNAL_RECEIVER_KILL:
Signal_receiver_killer::cancel_waiting();
return 0;
case AWAITS_START:
case STOPPED:;
}
PERR("failed to resume thread");
return -1;
}
/**
* Send a request and await the reply
*/
void request_and_wait(Thread * const dest, size_t const size)
{
Ipc_node::send_request_await_reply(
dest, phys_utcb()->base(), size,
phys_utcb()->ipc_msg_base(),
phys_utcb()->max_ipc_msg_size());
}
/**
* Wait for any request
*/
void wait_for_request()
{
Ipc_node::await_request(phys_utcb()->base(), phys_utcb()->size());
}
/**
* Reply to the last request
*/
void reply(size_t const size, bool const await_request)
{
Ipc_node::send_reply(phys_utcb()->base(), size);
if (await_request) {
Ipc_node * const ipc = static_cast<Ipc_node *>(this);
ipc->await_request(phys_utcb()->base(), phys_utcb()->size());
}
else { user_arg_0(0); }
}
/**
* Handle an exception thrown by the MMU
*/
void handle_mmu_exception()
{
/* pause thread */
cpu_scheduler()->remove(this);
_state = AWAITS_PAGER;
/* check out cause and attributes */
addr_t va = 0;
bool w = 0;
if (!pagefault(va, w)) {
PERR("unknown MMU exception");
return;
}
/* inform pager */
_pagefault = Pagefault(id(), (Tlb *)tlb(), ip, va, w);
void * const base = &_pagefault;
size_t const size = sizeof(_pagefault);
Ipc_node::send_request_await_reply(_pager, base, size, base, size);
}
/**
* Get unique thread ID, avoid method ambiguousness
*/
unsigned id() const { return Object::id(); }
/**
* Notice that another thread yielded the CPU to us
*/
void receive_yielded_cpu()
{
if (_state == AWAITS_RESUME) { _schedule(); }
else { PERR("failed to receive yielded CPU"); }
}
void init(void * const ip, void * const sp, unsigned const cpu_id,
unsigned const pd_id, Native_utcb * const utcb_phys,
Native_utcb * const utcb_virt, bool const main,
bool const start);
/***********************
** Execution_context **
***********************/
void handle_exception()
{
switch(cpu_exception) {
case SUPERVISOR_CALL:
_syscall();
return;
case PREFETCH_ABORT:
handle_mmu_exception();
return;
case DATA_ABORT:
handle_mmu_exception();
return;
case INTERRUPT_REQUEST:
handle_interrupt();
return;
case FAST_INTERRUPT_REQUEST:
handle_interrupt();
return;
default:
PERR("unknown exception");
stop();
reset_lap_time();
}
}
void proceed()
{
mtc()->continue_user(static_cast<Cpu::Context *>(this));
}
void handle_exception();
void proceed();
/***************
@ -550,12 +260,11 @@ class Kernel::Thread
***************/
Platform_thread * platform_thread() const { return _platform_thread; }
void pager(Thread * const p) { _pager = p; }
unsigned pd_id() const { return _pd_id; }
Native_utcb * phys_utcb() const { return _phys_utcb; }
void pager(Thread * const p) { _pager = p; }
unsigned id() const { return Object::id(); }
char const * label() const;
unsigned pd_id() const { return _pd_id; }
char const * pd_label() const;
};
#endif /* _CORE__KERNEL__THREAD_H_ */
#endif /* _KERNEL__THREAD_H_ */