genode/repos/base-nova/src/core/platform_thread.cc

365 lines
8.6 KiB
C++
Raw Normal View History

2011-12-22 16:19:25 +01:00
/*
* \brief Thread facility
* \author Norman Feske
* \author Sebastian Sumpf
* \author Alexander Boettcher
2011-12-22 16:19:25 +01:00
* \date 2009-10-02
*/
/*
* Copyright (C) 2009-2017 Genode Labs GmbH
2011-12-22 16:19:25 +01:00
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
2011-12-22 16:19:25 +01:00
*/
/* Genode includes */
#include <base/log.h>
2011-12-22 16:19:25 +01:00
/* core includes */
#include <ipc_pager.h>
#include <platform.h>
2011-12-22 16:19:25 +01:00
#include <platform_thread.h>
#include <platform_pd.h>
#include <util.h>
#include <nova_util.h>
/* base-internal includes */
#include <base/internal/stack_area.h>
2011-12-22 16:19:25 +01:00
/* NOVA includes */
#include <nova/syscalls.h>
#include <nova/util.h>
2011-12-22 16:19:25 +01:00
using namespace Genode;
static uint8_t map_thread_portals(Pager_object &pager,
addr_t const target_exc_base,
Nova::Utcb &utcb)
{
using Nova::Obj_crd;
using Nova::NUM_INITIAL_PT_LOG2;
addr_t const source_pd = platform_specific().core_pd_sel();
addr_t const source_exc_base = pager.exc_pt_sel_client();
addr_t const target_pd = pager.pd_sel();
/* xxx better map portals with solely pt_call and sm separately ? xxx */
addr_t const rights = Obj_crd::RIGHT_EC_RECALL |
Obj_crd::RIGHT_PT_CTRL | Obj_crd::RIGHT_PT_CALL | Obj_crd::RIGHT_PT_XCPU |
Obj_crd::RIGHT_SM_UP | Obj_crd::RIGHT_SM_DOWN;
Obj_crd const source_initial_caps(source_exc_base, NUM_INITIAL_PT_LOG2,
rights);
Obj_crd const target_initial_caps(target_exc_base, NUM_INITIAL_PT_LOG2,
rights);
return async_map(pager, source_pd, target_pd,
source_initial_caps, target_initial_caps, utcb);
}
2011-12-22 16:19:25 +01:00
/*********************
** Platform thread **
*********************/
Follow practices suggested by "Effective C++" The patch adjust the code of the base, base-<kernel>, and os repository. To adapt existing components to fix violations of the best practices suggested by "Effective C++" as reported by the -Weffc++ compiler argument. The changes follow the patterns outlined below: * A class with virtual functions can no longer publicly inherit base classed without a vtable. The inherited object may either be moved to a member variable, or inherited privately. The latter would be used for classes that inherit 'List::Element' or 'Avl_node'. In order to enable the 'List' and 'Avl_tree' to access the meta data, the 'List' must become a friend. * Instead of adding a virtual destructor to abstract base classes, we inherit the new 'Interface' class, which contains a virtual destructor. This way, single-line abstract base classes can stay as compact as they are now. The 'Interface' utility resides in base/include/util/interface.h. * With the new warnings enabled, all member variables must be explicitly initialized. Basic types may be initialized with '='. All other types are initialized with braces '{ ... }' or as class initializers. If basic types and non-basic types appear in a row, it is nice to only use the brace syntax (also for basic types) and align the braces. * If a class contains pointers as members, it must now also provide a copy constructor and assignment operator. In the most cases, one would make them private, effectively disallowing the objects to be copied. Unfortunately, this warning cannot be fixed be inheriting our existing 'Noncopyable' class (the compiler fails to detect that the inheriting class cannot be copied and still gives the error). For now, we have to manually add declarations for both the copy constructor and assignment operator as private class members. Those declarations should be prepended with a comment like this: /* * Noncopyable */ Thread(Thread const &); Thread &operator = (Thread const &); In the future, we should revisit these places and try to replace the pointers with references. In the presence of at least one reference member, the compiler would no longer implicitly generate a copy constructor. So we could remove the manual declaration. Issue #465
2017-12-21 15:42:15 +01:00
void Platform_thread::affinity(Affinity::Location)
2011-12-22 16:19:25 +01:00
{
error("dynamic affinity change not supported on NOVA");
2011-12-22 16:19:25 +01:00
}
Affinity::Location Platform_thread::affinity() const { return _location; }
int Platform_thread::start(void *ip, void *sp)
2011-12-22 16:19:25 +01:00
{
using namespace Nova;
if (!_pager) {
error("pager undefined");
2011-12-22 16:19:25 +01:00
return -1;
}
if (!_pd || (main_thread() && !vcpu() &&
_pd->parent_pt_sel() == Native_thread::INVALID_INDEX)) {
error("protection domain undefined");
2012-08-08 17:12:10 +02:00
return -2;
}
2011-12-22 16:19:25 +01:00
Utcb &utcb = *reinterpret_cast<Utcb *>(Thread::myself()->utcb());
unsigned const kernel_cpu_id = platform_specific().kernel_cpu_id(_location.xpos());
addr_t const source_pd = platform_specific().core_pd_sel();
addr_t const pt_oom = _pager->get_oom_portal();
if (!pt_oom || map_local(source_pd, utcb,
Obj_crd(pt_oom, 0), Obj_crd(_sel_pt_oom(), 0))) {
error("setup of out-of-memory notification portal - failed");
return -8;
}
if (!main_thread()) {
addr_t const initial_sp = reinterpret_cast<addr_t>(sp);
addr_t const utcb_addr = vcpu() ? 0 : round_page(initial_sp);
2012-08-08 17:12:10 +02:00
if (_sel_exc_base == Native_thread::INVALID_INDEX) {
error("exception base not specified");
2012-08-08 17:12:10 +02:00
return -3;
}
uint8_t res = syscall_retry(*_pager,
[&]() {
return create_ec(_sel_ec(), _pd->pd_sel(), kernel_cpu_id,
utcb_addr, initial_sp, _sel_exc_base,
!worker());
});
if (res != Nova::NOVA_OK) {
error("creation of new thread failed ", res);
return -4;
}
if (vcpu()) {
if (!remote_pd())
res = map_pagefault_portal(*_pager, _pager->exc_pt_sel_client(),
_sel_exc_base, _pd->pd_sel(), utcb);
} else
res = map_thread_portals(*_pager, _sel_exc_base, utcb);
if (res != NOVA_OK) {
revoke(Obj_crd(_sel_ec(), 0));
error("creation of new thread/vcpu failed ", res);
return -3;
}
if (worker()) {
/* local/worker threads do not require a startup portal */
revoke(Obj_crd(_pager->exc_pt_sel_client() + PT_SEL_STARTUP, 0));
}
_pager->initial_eip((addr_t)ip);
_pager->initial_esp(initial_sp);
_pager->client_set_ec(_sel_ec());
return 0;
2011-12-22 16:19:25 +01:00
}
if (!vcpu() && _sel_exc_base != Native_thread::INVALID_INDEX) {
error("thread already started");
return -5;
}
addr_t pd_utcb = 0;
if (!vcpu()) {
_sel_exc_base = _pager->exc_pt_sel_client();
pd_utcb = stack_area_virtual_base() + stack_virtual_size() - get_page_size();
addr_t remap_src[] = { _pd->parent_pt_sel() };
addr_t remap_dst[] = { PT_SEL_PARENT };
/* remap exception portals for first thread */
for (unsigned i = 0; i < sizeof(remap_dst)/sizeof(remap_dst[0]); i++) {
if (map_local(source_pd, utcb,
Obj_crd(remap_src[i], 0),
Obj_crd(_sel_exc_base + remap_dst[i], 0)))
return -6;
}
2012-08-08 17:12:10 +02:00
}
2011-12-22 16:19:25 +01:00
2013-01-11 23:10:21 +01:00
/* create first thread in task */
2011-12-22 16:19:25 +01:00
enum { THREAD_GLOBAL = true };
uint8_t res = create_ec(_sel_ec(), _pd->pd_sel(), kernel_cpu_id,
pd_utcb, 0, vcpu() ? _sel_exc_base : 0,
THREAD_GLOBAL);
if (res != NOVA_OK) {
error("create_ec returned ", res);
return -7;
2012-08-08 17:12:10 +02:00
}
2011-12-22 16:19:25 +01:00
_pager->client_set_ec(_sel_ec());
_pager->initial_eip((addr_t)ip);
_pager->initial_esp((addr_t)sp);
if (vcpu())
_features |= REMOTE_PD;
else
res = map_thread_portals(*_pager, 0, utcb);
if (res == NOVA_OK) {
res = syscall_retry(*_pager,
[&]() {
/* let the thread run */
return create_sc(_sel_sc(), _pd->pd_sel(), _sel_ec(),
Qpd(Qpd::DEFAULT_QUANTUM, _priority));
});
}
if (res != NOVA_OK) {
_pager->client_set_ec(Native_thread::INVALID_INDEX);
_pager->initial_eip(0);
_pager->initial_esp(0);
error("create_sc returned ", res);
/* cap_selector free for _sel_ec is done in de-constructor */
revoke(Obj_crd(_sel_ec(), 0));
return -8;
}
_features |= SC_CREATED;
2011-12-22 16:19:25 +01:00
return 0;
}
void Platform_thread::pause()
NOVA: extend cpu_session with synchronous pause The kernel provides a "recall" feature issued on threads to force a thread into an exception. In the exception the current state of the thread can be obtained and its execution can be halted/paused. However, the recall exception is only delivered when the next time the thread would leave the kernel. That means the delivery is asynchronous and Genode has to wait until the exception triggered. Waiting for the exception can either be done in the cpu_session service or outside the service in the protection domain of the caller. It turned out that waiting inside the cpu_service is prone to deadlock the system. The cpu_session interface is one of many session interfaces handled by the same thread inside Core. Deadlock situation: * The caller (thread_c) to pause some thread_p manages to establish the call to the cpu_session thread_s of Core but get be interrupted before issuing the actual pause (recall) command. * Now the - to be recalled thread_p - is scheduled and tries to invoke another service of Core, like making log output. * Since the Core thread_s is handling the session request of thread_c, the kernel uses the timeslice of thread_p to help to finish the request handled by thread_s. * Thread_s issues the actual pause/recall on thread_p and blocks inside Core to wait for the recall exception to be issued. * thread_p will leave not the kernel before finishing it actual IPC with thread_s which is blocked waiting for thread_p. That is the reason why the waiting/blocking for the recall exception taking place must be done on NOVA in the context of the caller (thread_1). Introduce a pause_sync call to the cpu_session which returns a semaphore capability to the caller. The caller blocks on the semaphore and is woken up when the pager of thread_p receives the recall exception with the state of thread_p.
2012-08-24 09:29:54 +02:00
{
if (!_pager)
return;
_pager->client_recall(true);
NOVA: extend cpu_session with synchronous pause The kernel provides a "recall" feature issued on threads to force a thread into an exception. In the exception the current state of the thread can be obtained and its execution can be halted/paused. However, the recall exception is only delivered when the next time the thread would leave the kernel. That means the delivery is asynchronous and Genode has to wait until the exception triggered. Waiting for the exception can either be done in the cpu_session service or outside the service in the protection domain of the caller. It turned out that waiting inside the cpu_service is prone to deadlock the system. The cpu_session interface is one of many session interfaces handled by the same thread inside Core. Deadlock situation: * The caller (thread_c) to pause some thread_p manages to establish the call to the cpu_session thread_s of Core but get be interrupted before issuing the actual pause (recall) command. * Now the - to be recalled thread_p - is scheduled and tries to invoke another service of Core, like making log output. * Since the Core thread_s is handling the session request of thread_c, the kernel uses the timeslice of thread_p to help to finish the request handled by thread_s. * Thread_s issues the actual pause/recall on thread_p and blocks inside Core to wait for the recall exception to be issued. * thread_p will leave not the kernel before finishing it actual IPC with thread_s which is blocked waiting for thread_p. That is the reason why the waiting/blocking for the recall exception taking place must be done on NOVA in the context of the caller (thread_1). Introduce a pause_sync call to the cpu_session which returns a semaphore capability to the caller. The caller blocks on the semaphore and is woken up when the pager of thread_p receives the recall exception with the state of thread_p.
2012-08-24 09:29:54 +02:00
}
2011-12-22 16:19:25 +01:00
void Platform_thread::resume()
{
using namespace Nova;
if (worker() || sc_created()) {
if (_pager)
_pager->wake_up();
return;
}
if (!_pd || !_pager) {
error("protection domain undefined - resuming thread failed");
return;
}
uint8_t res = syscall_retry(*_pager,
[&]() {
return create_sc(_sel_sc(), _pd->pd_sel(), _sel_ec(),
Qpd(Qpd::DEFAULT_QUANTUM, _priority));
});
if (res == NOVA_OK)
_features |= SC_CREATED;
else
error("create_sc failed ", res);
2011-12-22 16:19:25 +01:00
}
2013-01-11 23:10:21 +01:00
Thread_state Platform_thread::state()
2011-12-22 16:19:25 +01:00
{
if (!_pager) throw Cpu_thread::State_access_failed();
Thread_state s;
if (_pager->copy_thread_state(&s))
return s;
throw Cpu_thread::State_access_failed();
2013-01-11 23:10:21 +01:00
}
void Platform_thread::state(Thread_state s)
{
if (!_pager) throw Cpu_thread::State_access_failed();
if (!_pager->copy_thread_state(s))
throw Cpu_thread::State_access_failed();
/* the new state is transferred to the kernel by the recall handler */
_pager->client_recall(false);
2013-01-11 23:10:21 +01:00
}
2011-12-22 16:19:25 +01:00
void Platform_thread::cancel_blocking()
{
if (!_pager) return;
_pager->client_cancel_blocking();
}
2011-12-22 16:19:25 +01:00
2013-01-11 23:10:21 +01:00
void Platform_thread::single_step(bool on)
{
if (!_pager) return;
_pager->single_step(on);
}
2011-12-22 16:19:25 +01:00
const char * Platform_thread::pd_name() const {
return _pd ? _pd->name() : "unknown"; }
2011-12-22 16:19:25 +01:00
Trace::Execution_time Platform_thread::execution_time() const
{
unsigned long long time = 0;
/* for ECs without a SC we simply return 0 */
if (!sc_created())
return { time, time, Nova::Qpd::DEFAULT_QUANTUM, _priority };
uint8_t res = Nova::sc_ctrl(_sel_sc(), time);
if (res != Nova::NOVA_OK)
warning("sc_ctrl failed res=", res);
return { time, time, Nova::Qpd::DEFAULT_QUANTUM, _priority };
}
void Platform_thread::pager(Pager_object &pager)
{
_pager = &pager;
_pager->assign_pd(_pd->pd_sel());
}
void Platform_thread::thread_type(Nova_native_cpu::Thread_type thread_type,
Nova_native_cpu::Exception_base exception_base)
{
/* you can do it only once */
if (_sel_exc_base != Native_thread::INVALID_INDEX)
return;
if (!main_thread() || (thread_type == Nova_native_cpu::Thread_type::VCPU))
_sel_exc_base = exception_base.exception_base;
if (thread_type == Nova_native_cpu::Thread_type::LOCAL)
_features |= WORKER;
else if (thread_type == Nova_native_cpu::Thread_type::VCPU)
_features |= VCPU;
}
Platform_thread::Platform_thread(size_t, const char *name, unsigned prio,
Follow practices suggested by "Effective C++" The patch adjust the code of the base, base-<kernel>, and os repository. To adapt existing components to fix violations of the best practices suggested by "Effective C++" as reported by the -Weffc++ compiler argument. The changes follow the patterns outlined below: * A class with virtual functions can no longer publicly inherit base classed without a vtable. The inherited object may either be moved to a member variable, or inherited privately. The latter would be used for classes that inherit 'List::Element' or 'Avl_node'. In order to enable the 'List' and 'Avl_tree' to access the meta data, the 'List' must become a friend. * Instead of adding a virtual destructor to abstract base classes, we inherit the new 'Interface' class, which contains a virtual destructor. This way, single-line abstract base classes can stay as compact as they are now. The 'Interface' utility resides in base/include/util/interface.h. * With the new warnings enabled, all member variables must be explicitly initialized. Basic types may be initialized with '='. All other types are initialized with braces '{ ... }' or as class initializers. If basic types and non-basic types appear in a row, it is nice to only use the brace syntax (also for basic types) and align the braces. * If a class contains pointers as members, it must now also provide a copy constructor and assignment operator. In the most cases, one would make them private, effectively disallowing the objects to be copied. Unfortunately, this warning cannot be fixed be inheriting our existing 'Noncopyable' class (the compiler fails to detect that the inheriting class cannot be copied and still gives the error). For now, we have to manually add declarations for both the copy constructor and assignment operator as private class members. Those declarations should be prepended with a comment like this: /* * Noncopyable */ Thread(Thread const &); Thread &operator = (Thread const &); In the future, we should revisit these places and try to replace the pointers with references. In the presence of at least one reference member, the compiler would no longer implicitly generate a copy constructor. So we could remove the manual declaration. Issue #465
2017-12-21 15:42:15 +01:00
Affinity::Location affinity, int)
2013-01-11 23:10:21 +01:00
:
_pd(0), _pager(0), _id_base(cap_map().insert(2)),
_sel_exc_base(Native_thread::INVALID_INDEX), _location(affinity),
_features(0),
_priority(scale_priority(prio, name)),
_name(name)
{ }
2011-12-22 16:19:25 +01:00
Platform_thread::~Platform_thread()
{
if (_pager) {
/* reset pager and badge used for debug output */
_pager->reset_badge();
_pager = 0;
}
2011-12-22 16:19:25 +01:00
using namespace Nova;
/* free ec and sc caps */
revoke(Obj_crd(_id_base, 2));
cap_map().remove(_id_base, 2, false);
2011-12-22 16:19:25 +01:00
}