genode/base-nova/src/core/platform_thread.cc

306 lines
6.7 KiB
C++
Raw Normal View History

2011-12-22 16:19:25 +01:00
/*
* \brief Thread facility
* \author Norman Feske
* \author Sebastian Sumpf
* \author Alexander Boettcher
2011-12-22 16:19:25 +01:00
* \date 2009-10-02
*/
/*
2013-01-10 21:44:47 +01:00
* Copyright (C) 2009-2013 Genode Labs GmbH
2011-12-22 16:19:25 +01:00
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/printf.h>
#include <base/cap_sel_alloc.h>
#include <base/ipc_pager.h>
/* core includes */
#include <platform_thread.h>
#include <platform_pd.h>
#include <util.h>
#include <nova_util.h>
/* NOVA includes */
#include <nova/syscalls.h>
#include <nova/util.h>
2011-12-22 16:19:25 +01:00
using namespace Genode;
/*********************
** Platform thread **
*********************/
void Platform_thread::affinity(Affinity::Location location)
2011-12-22 16:19:25 +01:00
{
if (_sel_exc_base != Native_thread::INVALID_INDEX) {
PERR("Failure - affinity of thread could not be set");
return;
}
_location = location;
2011-12-22 16:19:25 +01:00
}
Affinity::Location Platform_thread::affinity() { return _location; }
int Platform_thread::start(void *ip, void *sp)
2011-12-22 16:19:25 +01:00
{
using namespace Nova;
if (!_pager) {
PERR("pager undefined");
return -1;
}
2012-08-08 17:12:10 +02:00
if (!_pd) {
PERR("protection domain undefined");
return -2;
}
2011-12-22 16:19:25 +01:00
if (!is_main_thread()) {
2012-08-08 17:12:10 +02:00
addr_t initial_sp = reinterpret_cast<addr_t>(sp);
addr_t utcb = is_vcpu() ? 0 : round_page(initial_sp);
2012-08-08 17:12:10 +02:00
if (_sel_exc_base == Native_thread::INVALID_INDEX) {
2012-08-08 17:12:10 +02:00
PERR("exception base not specified");
return -3;
}
/* ip == 0 means that caller will use the thread as worker */
bool thread_global = ip;
uint8_t res = create_ec(_sel_ec(), _pd->pd_sel(), _location.xpos(),
utcb, initial_sp, _sel_exc_base, thread_global);
if (res != Nova::NOVA_OK) {
2012-08-08 17:12:10 +02:00
PERR("creation of new thread failed %u", res);
return -4;
}
2012-08-08 17:12:10 +02:00
if (!thread_global) {
_features |= WORKER;
/* local/worker threads do not require a startup portal */
revoke(Obj_crd(_pager->exc_pt_sel_client() + PT_SEL_STARTUP, 0));
}
_pager->initial_eip((addr_t)ip);
_pager->initial_esp(initial_sp);
_pager->client_set_ec(_sel_ec());
return 0;
2011-12-22 16:19:25 +01:00
}
if (_sel_exc_base != Native_thread::INVALID_INDEX) {
PERR("thread already started");
return -5;
}
2011-12-22 16:19:25 +01:00
/*
* For the first thread of a new PD, use the initial stack pointer for
* reporting the thread's UTCB address.
*/
addr_t pd_utcb = Native_config::context_area_virtual_base() +
Native_config::context_area_virtual_size() -
get_page_size();
_sel_exc_base = _pager->exc_pt_sel_client();
addr_t pd_core_sel = Platform_pd::pd_core_sel();
addr_t remap_src[] = { _pd->parent_pt_sel() };
addr_t remap_dst[] = { PT_SEL_PARENT };
addr_t pd_sel;
Obj_crd initial_pts(_sel_exc_base, NUM_INITIAL_PT_LOG2);
uint8_t res;
2013-01-11 23:10:21 +01:00
/* remap exception portals for first thread */
for (unsigned i = 0; i < sizeof(remap_dst)/sizeof(remap_dst[0]); i++) {
if (map_local((Utcb *)Thread_base::myself()->utcb(),
Obj_crd(remap_src[i], 0),
Obj_crd(_sel_exc_base + remap_dst[i], 0)))
return -6;
2012-08-08 17:12:10 +02:00
}
2011-12-22 16:19:25 +01:00
pd_sel = cap_selector_allocator()->alloc();
2011-12-22 16:19:25 +01:00
/* create task */
res = create_pd(pd_sel, pd_core_sel, initial_pts);
if (res != NOVA_OK) {
2011-12-22 16:19:25 +01:00
PERR("create_pd returned %d", res);
goto cleanup_pd;
2012-08-08 17:12:10 +02:00
}
2011-12-22 16:19:25 +01:00
2013-01-11 23:10:21 +01:00
/* create first thread in task */
2011-12-22 16:19:25 +01:00
enum { THREAD_GLOBAL = true };
res = create_ec(_sel_ec(), pd_sel, _location.xpos(), pd_utcb, 0, 0,
2011-12-22 16:19:25 +01:00
THREAD_GLOBAL);
if (res != NOVA_OK) {
2012-08-08 17:12:10 +02:00
PERR("create_ec returned %d", res);
goto cleanup_pd;
2012-08-08 17:12:10 +02:00
}
2011-12-22 16:19:25 +01:00
2013-01-11 23:10:21 +01:00
/*
* We have to assign the pd here, because after create_sc the thread
* becomes running immediately.
*/
_pd->assign_pd(pd_sel);
_pager->client_set_ec(_sel_ec());
_pager->initial_eip((addr_t)ip);
_pager->initial_esp((addr_t)sp);
2013-01-11 23:10:21 +01:00
/* let the thread run */
res = create_sc(_sel_sc(), pd_sel, _sel_ec(), Qpd());
if (res != NOVA_OK) {
2013-01-11 23:10:21 +01:00
/*
* Reset pd cap since thread got not running and pd cap will
* be revoked during cleanup.
*/
_pd->assign_pd(Native_thread::INVALID_INDEX);
_pager->client_set_ec(Native_thread::INVALID_INDEX);
_pager->initial_eip(0);
_pager->initial_esp(0);
2011-12-22 16:19:25 +01:00
PERR("create_sc returned %d", res);
goto cleanup_ec;
2012-08-08 17:12:10 +02:00
}
2011-12-22 16:19:25 +01:00
return 0;
cleanup_ec:
/* cap_selector free for _sel_ec is done in de-constructor */
revoke(Obj_crd(_sel_ec(), 0));
cleanup_pd:
revoke(Obj_crd(pd_sel, 0));
cap_selector_allocator()->free(pd_sel, 0);
return -7;
2011-12-22 16:19:25 +01:00
}
NOVA: extend cpu_session with synchronous pause The kernel provides a "recall" feature issued on threads to force a thread into an exception. In the exception the current state of the thread can be obtained and its execution can be halted/paused. However, the recall exception is only delivered when the next time the thread would leave the kernel. That means the delivery is asynchronous and Genode has to wait until the exception triggered. Waiting for the exception can either be done in the cpu_session service or outside the service in the protection domain of the caller. It turned out that waiting inside the cpu_service is prone to deadlock the system. The cpu_session interface is one of many session interfaces handled by the same thread inside Core. Deadlock situation: * The caller (thread_c) to pause some thread_p manages to establish the call to the cpu_session thread_s of Core but get be interrupted before issuing the actual pause (recall) command. * Now the - to be recalled thread_p - is scheduled and tries to invoke another service of Core, like making log output. * Since the Core thread_s is handling the session request of thread_c, the kernel uses the timeslice of thread_p to help to finish the request handled by thread_s. * Thread_s issues the actual pause/recall on thread_p and blocks inside Core to wait for the recall exception to be issued. * thread_p will leave not the kernel before finishing it actual IPC with thread_s which is blocked waiting for thread_p. That is the reason why the waiting/blocking for the recall exception taking place must be done on NOVA in the context of the caller (thread_1). Introduce a pause_sync call to the cpu_session which returns a semaphore capability to the caller. The caller blocks on the semaphore and is woken up when the pager of thread_p receives the recall exception with the state of thread_p.
2012-08-24 09:29:54 +02:00
Native_capability Platform_thread::pause()
{
if (!_pager) return Native_capability();
Native_capability notify_sm = _pager->notify_sm();
if (!notify_sm.valid()) return notify_sm;
if (_pager->client_recall() != Nova::NOVA_OK)
return Native_capability();
/* If the thread is blocked in its own SM, get him out */
cancel_blocking();
/* local thread may never get be canceled if it doesn't receive an IPC */
if (is_worker()) return Native_capability();
return notify_sm;
NOVA: extend cpu_session with synchronous pause The kernel provides a "recall" feature issued on threads to force a thread into an exception. In the exception the current state of the thread can be obtained and its execution can be halted/paused. However, the recall exception is only delivered when the next time the thread would leave the kernel. That means the delivery is asynchronous and Genode has to wait until the exception triggered. Waiting for the exception can either be done in the cpu_session service or outside the service in the protection domain of the caller. It turned out that waiting inside the cpu_service is prone to deadlock the system. The cpu_session interface is one of many session interfaces handled by the same thread inside Core. Deadlock situation: * The caller (thread_c) to pause some thread_p manages to establish the call to the cpu_session thread_s of Core but get be interrupted before issuing the actual pause (recall) command. * Now the - to be recalled thread_p - is scheduled and tries to invoke another service of Core, like making log output. * Since the Core thread_s is handling the session request of thread_c, the kernel uses the timeslice of thread_p to help to finish the request handled by thread_s. * Thread_s issues the actual pause/recall on thread_p and blocks inside Core to wait for the recall exception to be issued. * thread_p will leave not the kernel before finishing it actual IPC with thread_s which is blocked waiting for thread_p. That is the reason why the waiting/blocking for the recall exception taking place must be done on NOVA in the context of the caller (thread_1). Introduce a pause_sync call to the cpu_session which returns a semaphore capability to the caller. The caller blocks on the semaphore and is woken up when the pager of thread_p receives the recall exception with the state of thread_p.
2012-08-24 09:29:54 +02:00
}
2011-12-22 16:19:25 +01:00
void Platform_thread::resume()
{
using namespace Nova;
if (!is_worker()) {
uint8_t res = create_sc(_sel_sc(), _pd->pd_sel(), _sel_ec(), Qpd());
if (res == NOVA_OK) return;
}
if (!_pager) return;
/* Thread was paused beforehand and blocked in pager - wake up pager */
_pager->wake_up();
2011-12-22 16:19:25 +01:00
}
2013-01-11 23:10:21 +01:00
Thread_state Platform_thread::state()
2011-12-22 16:19:25 +01:00
{
if (!_pager) throw Cpu_session::State_access_failed();
Thread_state s;
if (_pager->copy_thread_state(&s))
return s;
if (is_worker())
s.sp = _pager->initial_esp();
return s;
2013-01-11 23:10:21 +01:00
}
void Platform_thread::state(Thread_state s)
{
/* not permitted for main thread */
if (is_main_thread()) throw Cpu_session::State_access_failed();
/* you can do it only once */
if (_sel_exc_base != Native_thread::INVALID_INDEX)
throw Cpu_session::State_access_failed();
2011-12-22 16:19:25 +01:00
2013-01-11 23:10:21 +01:00
/*
* s.sel_exc_base exception base of thread in caller
* protection domain - not in Core !
* s.is_vcpu If true it will run as vCPU,
* otherwise it will be a thread.
*/
_sel_exc_base = s.sel_exc_base;
if (s.is_vcpu) _features |= VCPU;
2013-01-11 23:10:21 +01:00
}
2011-12-22 16:19:25 +01:00
void Platform_thread::cancel_blocking()
{
if (!_pager) return;
_pager->client_cancel_blocking();
}
2011-12-22 16:19:25 +01:00
2013-01-11 23:10:21 +01:00
void Platform_thread::single_step(bool on)
{
if (!_pager) return;
_pager->single_step(on);
}
2011-12-22 16:19:25 +01:00
2013-01-11 23:10:21 +01:00
unsigned long Platform_thread::pager_object_badge() const
{
return reinterpret_cast<unsigned long>(_name);
}
2011-12-22 16:19:25 +01:00
Weak_ptr<Address_space> Platform_thread::address_space()
{
return _pd->Address_space::weak_ptr();
}
2012-08-08 17:12:10 +02:00
Platform_thread::Platform_thread(const char *name, unsigned, int thread_id)
2013-01-11 23:10:21 +01:00
:
_pd(0), _pager(0), _id_base(cap_selector_allocator()->alloc(1)),
_sel_exc_base(Native_thread::INVALID_INDEX), _location(boot_cpu(), 0),
_features(0)
{
strncpy(_name, name, sizeof(_name));
}
2011-12-22 16:19:25 +01:00
Platform_thread::~Platform_thread()
{
using namespace Nova;
/* free ec and sc caps */
revoke(Obj_crd(_id_base, 1));
cap_selector_allocator()->free(_id_base, 1);
2011-12-22 16:19:25 +01:00
}