hw: kernel backed capabilities (Fix #1443)

This commit is contained in:
Stefan Kalkowski 2015-05-19 14:18:40 +02:00 committed by Christian Helmuth
parent 4431ab7354
commit e081554731
77 changed files with 2367 additions and 1685 deletions

View File

@ -1,11 +1,12 @@
/*
* \brief IPC message buffers
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2012-01-03
*/
/*
* Copyright (C) 2012-2013 Genode Labs GmbH
* Copyright (C) 2012-2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
@ -14,58 +15,118 @@
#ifndef _INCLUDE__BASE__IPC_MSGBUF_H_
#define _INCLUDE__BASE__IPC_MSGBUF_H_
#include <base/native_capability.h>
#include <util/string.h>
namespace Genode
{
class Native_utcb;
/**
* IPC message buffer layout
*/
class Msgbuf_base
{
protected:
size_t _size; /* buffer size in bytes */
public:
char buf[]; /* begin of actual message buffer */
/*************************************************
** 'buf' must be the last member of this class **
*************************************************/
/**
* Return size of message buffer
*/
inline size_t size() const { return _size; }
/**
* Return address of message buffer
*/
inline void *addr() { return &buf[0]; }
};
class Msgbuf_base;
/**
* Instance of IPC message buffer with specified buffer size
*
* 'Msgbuf_base' must be the last class this class inherits from.
*/
template <unsigned BUF_SIZE>
class Msgbuf : public Msgbuf_base
{
public:
/**************************************************
** 'buf' must be the first member of this class **
**************************************************/
char buf[BUF_SIZE];
/**
* Constructor
*/
Msgbuf() { _size = BUF_SIZE; }
};
template <unsigned BUF_SIZE> class Msgbuf;
}
#endif /* _INCLUDE__BASE__IPC_MSGBUF_H_ */
class Genode::Msgbuf_base
{
public:
enum { MAX_CAP_ARGS = 4 };
private:
friend class Native_utcb;
size_t _size; /* buffer size in bytes */
Native_capability _caps[MAX_CAP_ARGS]; /* capability buffer */
size_t _snd_cap_cnt = 0; /* capability counter */
size_t _rcv_cap_cnt = 0; /* capability counter */
public:
/*************************************************
** 'buf' must be the last member of this class **
*************************************************/
char buf[]; /* begin of actual message buffer */
Msgbuf_base(size_t size) : _size(size) { }
void const * base() const { return &buf; }
/**
* Return size of message buffer
*/
size_t size() const { return _size; }
/**
* Return address of message buffer
*/
void *addr() { return &buf[0]; }
/**
* Reset capability buffer.
*/
void reset()
{
_snd_cap_cnt = 0;
_rcv_cap_cnt = 0;
}
/**
* Return how many capabilities are accepted by this message buffer
*/
size_t cap_rcv_window() { return _rcv_cap_cnt; }
/**
* Set how many capabilities are accepted by this message buffer
*/
void cap_rcv_window(size_t cnt) { _rcv_cap_cnt = cnt; }
/**
* Add capability to buffer
*/
void cap_add(Native_capability const &cap)
{
if (_snd_cap_cnt < MAX_CAP_ARGS)
_caps[_snd_cap_cnt++] = cap;
}
/**
* Return last capability from buffer.
*/
Native_capability cap_get()
{
return (_rcv_cap_cnt < _snd_cap_cnt)
? _caps[_rcv_cap_cnt++] : Native_capability();
}
};
template <unsigned BUF_SIZE>
class Genode::Msgbuf : public Genode::Msgbuf_base
{
public:
/**************************************************
** 'buf' must be the first member of this class **
**************************************************/
char buf[BUF_SIZE];
/**
* Constructor
*/
Msgbuf() : Msgbuf_base(BUF_SIZE) { }
};
#endif /* _INCLUDE__BASE__IPC_MSGBUF_H_ */

View File

@ -0,0 +1,95 @@
/*
* \brief Native capability of base-hw
* \author Stefan Kalkowski
* \date 2015-05-15
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _BASE__NATIVE_CAPABILITY_H_
#define _BASE__NATIVE_CAPABILITY_H_
/* Genode includes */
#include <kernel/interface.h>
#include <base/stdint.h>
namespace Genode { class Native_capability; }
class Genode::Native_capability
{
public:
using Dst = Kernel::capid_t;
private:
Dst _dst;
void _inc() const;
void _dec() const;
public:
struct Raw
{
Dst dst;
/* obsolete in base-hw, but still used in generic code path */
addr_t local_name;
};
/**
* Create an invalid capability
*/
Native_capability() : _dst(Kernel::cap_id_invalid()) { }
/**
* Create a capability out of a kernel's capability id
*/
Native_capability(Kernel::capid_t dst) : _dst(dst) { _inc(); }
/**
* Create a capability from another one
*/
Native_capability(const Native_capability &o) : _dst(o._dst) { _inc(); }
~Native_capability() { _dec(); }
/**
* Returns true if it is a valid capability otherwise false
*/
bool valid() const { return (_dst != Kernel::cap_id_invalid()); }
/*****************
** Accessors **
*****************/
addr_t local_name() const { return _dst; }
Dst dst() const { return _dst; }
/**************************
** Operator overloads **
**************************/
bool operator==(const Native_capability &o) const {
return _dst == _dst; }
Native_capability& operator=(const Native_capability &o)
{
if (this == &o) return *this;
_dec();
_dst = o._dst;
_inc();
return *this;
}
};
#endif /* _BASE__NATIVE_CAPABILITY_H_ */

View File

@ -0,0 +1,28 @@
/*
* \brief Native extensions of the Genode environment
* \author Stefan Kalkowski
* \date 2015-05-20
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _BASE__NATIVE_ENV_H_
#define _BASE__NATIVE_ENV_H_
/* Genode includes */
#include <base/stdint.h>
namespace Genode
{
/**
* Upgrade quota of the PD session within my Genode environment
*/
void upgrade_pd_session_quota(Genode::size_t);
};
#endif /**_BASE__NATIVE_ENV_H_ */

View File

@ -6,7 +6,7 @@
*/
/*
* Copyright (C) 2012-2013 Genode Labs GmbH
* Copyright (C) 2012-2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
@ -16,300 +16,170 @@
#define _BASE__NATIVE_TYPES_H_
/* Genode includes */
#include <kernel/interface.h>
#include <base/native_capability.h>
#include <base/stdint.h>
#include <util/string.h>
/* base-hw includes */
#include <kernel/log.h>
#include <base/native_capability.h>
#include <base/ipc_msgbuf.h>
namespace Genode
{
class Platform_thread;
class Native_thread;
typedef unsigned Native_thread_id;
struct Native_thread
{
Platform_thread * platform_thread;
Native_thread_id thread_id;
};
using Native_thread_id = Kernel::capid_t;
typedef int Native_connection_state;
/* FIXME needs to be MMU dependent */
enum { MIN_MAPPING_SIZE_LOG2 = 12 };
/**
* Return kernel thread-name of the caller
*/
Native_thread_id thread_get_my_native_id();
/**
* Return an invalid kernel thread-name
*/
inline Native_thread_id thread_invalid_id() { return 0; }
/**
* Data bunch with variable size that is communicated between threads
*
* \param MAX_SIZE maximum size the object is allowed to take
*/
template <size_t MAX_SIZE>
struct Message_tpl;
/**
* Information that a thread creator hands out to a new thread
*/
class Start_info;
/**
* Info that a core-thread creator hands out to Platform_thread::start
* Coherent address region
*/
class Core_start_info;
struct Native_region;
struct Native_config;
struct Native_pd_args { };
/**
* Get the the minimal supported page-size log 2
*/
constexpr size_t get_page_size_log2() { return 12; }
/**
* Get the the minimal supported page-size
*/
constexpr size_t get_page_size() { return 1 << get_page_size_log2(); }
/**
* Memory region that is exclusive to every thread and known by the kernel
*/
class Native_utcb;
struct Cap_dst_policy
{
typedef Native_thread_id Dst;
/**
* Validate capability destination
*/
static bool valid(Dst pt) { return pt != 0; }
/**
* Get invalid capability destination
*/
static Dst invalid() { return 0; }
/**
* Copy capability 'src' to a given memory destination 'dst'
*/
static void
copy(void * dst, Native_capability_tpl<Cap_dst_policy> * src);
};
typedef Native_capability_tpl<Cap_dst_policy> Native_capability;
/**
* Coherent address region
*/
struct Native_region
{
addr_t base;
size_t size;
};
struct Native_config
{
/**
* Thread-context area configuration.
*/
static constexpr addr_t context_area_virtual_base() {
return 0xe0000000UL; }
static constexpr addr_t context_area_virtual_size() {
return 0x10000000UL; }
/**
* Size of virtual address region holding the context of one thread
*/
static constexpr addr_t context_virtual_size() { return 0x00100000UL; }
};
struct Native_pd_args { };
}
template <Genode::size_t MAX_SIZE>
class Genode::Message_tpl
struct Genode::Native_thread
{
private:
size_t _data_size;
uint8_t _data[];
/**
* Return size of payload-preceding meta data
*/
size_t _header_size() const { return (addr_t)_data - (addr_t)this; }
/**
* Return maximum payload size
*/
size_t _max_data_size() const { return MAX_SIZE - _header_size(); }
/**
* Return size of header and current payload
*/
size_t _size() const { return _header_size() + _data_size; }
public:
/**
* Get properties of receive buffer
*
* \return buf_base base of receive buffer
* \return buf_size size of receive buffer
*/
void buffer_info(void * & buf_base, size_t & buf_size) const
{
buf_base = (void *)this;
buf_size = MAX_SIZE;
}
/**
* Get properties of request message and receive buffer
*
* \return buf_base base of receive buffer and request message
* \return buf_size size of receive buffer
* \return msg_size size of request message
*/
void request_info(void * & buf_base, size_t & buf_size,
size_t & msg_size) const
{
buf_base = (void *)this;
buf_size = MAX_SIZE;
msg_size = _size();
}
/**
* Get properties of reply message
*
* \return msg_base base of reply message
* \return msg_size size of reply message
*/
void reply_info(void * & msg_base, size_t & msg_size) const
{
msg_base = (void *)this;
msg_size = _size();
}
/**
* Install message that shall be send
*
* \param data base of payload
* \param data_size size of payload
* \param name local name that shall be the first payload word
*/
void prepare_send(void * const data, size_t data_size,
unsigned long const name)
{
/* limit data size */
if (data_size > _max_data_size()) {
Kernel::log() << "oversized message outgoing\n";
data_size = _max_data_size();
}
/* copy data */
*(unsigned long *)_data = name;
void * const data_dst = (void *)((addr_t)_data + sizeof(name));
void * const data_src = (void *)((addr_t)data + sizeof(name));
memcpy(data_dst, data_src, data_size - sizeof(name));
_data_size = data_size;
}
/**
* Read out message that was received
*
* \param buf_base base of read buffer
* \param buf_size size of read buffer
*/
void finish_receive(void * const buf_base, size_t const buf_size)
{
/* limit data size */
if (_data_size > buf_size) {
Kernel::log() << "oversized message incoming\n";
_data_size = buf_size;
}
/* copy data */
memcpy(buf_base, _data, _data_size);
}
Platform_thread * platform_thread;
Native_capability cap;
};
class Genode::Start_info
/**
* Coherent address region
*/
struct Genode::Native_region
{
private:
Native_thread_id _thread_id;
Native_capability _utcb_ds;
public:
/**
* Set-up valid startup message
*
* \param thread_id kernel name of the thread that is started
*/
void init(Native_thread_id const thread_id,
Native_capability const & utcb_ds)
{
_thread_id = thread_id;
_utcb_ds = utcb_ds;
}
/***************
** Accessors **
***************/
Native_thread_id thread_id() const { return _thread_id; }
Native_capability utcb_ds() const { return _utcb_ds; }
addr_t base;
size_t size;
};
class Genode::Core_start_info
struct Genode::Native_config
{
private:
/**
* Thread-context area configuration.
*/
static constexpr addr_t context_area_virtual_base() {
return 0xe0000000UL; }
static constexpr addr_t context_area_virtual_size() {
return 0x10000000UL; }
unsigned _cpu_id;
public:
/**
* Set-up valid core startup-message for starting on 'cpu'
*/
void init(unsigned const cpu) { _cpu_id = cpu; }
/***************
** Accessors **
***************/
unsigned cpu_id() const { return _cpu_id; }
/**
* Size of virtual address region holding the context of one thread
*/
static constexpr addr_t context_virtual_size() { return 0x00100000UL; }
};
class Genode::Native_utcb
{
public:
enum { MAX_CAP_ARGS = Msgbuf_base::MAX_CAP_ARGS};
enum Offsets { PARENT, UTCB_DATASPACE, THREAD_MYSELF };
private:
uint8_t _data[1 << MIN_MAPPING_SIZE_LOG2];
Kernel::capid_t _caps[MAX_CAP_ARGS]; /* capability buffer */
size_t _cap_cnt = 0; /* capability counter */
size_t _size = 0; /* bytes to transfer */
uint8_t _buf[get_page_size() - sizeof(_caps) -
sizeof(_cap_cnt) - sizeof(_size)];
public:
typedef Message_tpl<sizeof(_data)/sizeof(_data[0])> Message;
/***************
** Accessors **
***************/
Message * message() const { return (Message *)_data; }
Start_info * start_info() const { return (Start_info *)_data; }
Core_start_info * core_start_info() const
Native_utcb& operator= (const Native_utcb &o)
{
return (Core_start_info *)_data;
_cap_cnt = 0;
_size = o._size;
memcpy(_buf, o._buf, _size);
return *this;
}
size_t size() const { return sizeof(_data)/sizeof(_data[0]); }
/**
* Set the destination capability id (server object identity)
*/
void destination(Kernel::capid_t id) {
*reinterpret_cast<long*>(_buf) = id; }
void * base() const { return (void *)_data; }
/**
* Return the count of capabilities in the UTCB
*/
size_t cap_cnt() { return _cap_cnt; }
/**
* Set the count of capabilities in the UTCB
*/
void cap_cnt(size_t cnt) { _cap_cnt = cnt; }
/**
* Return the start address of the payload data
*/
void const * base() const { return &_buf; }
/**
* Copy data from the message buffer 'o' to this UTCB
*/
void copy_from(Msgbuf_base &o, size_t size)
{
_size = size;
_cap_cnt = o._snd_cap_cnt;
for (unsigned i = 0; i < _cap_cnt; i++)
_caps[i] = o._caps[i].dst();
memcpy(_buf, o.buf, min(_size, o._size));
}
/**
* Copy data from this UTCB to the message buffer 'o'
*/
void copy_to(Msgbuf_base &o)
{
o._snd_cap_cnt = _cap_cnt;
for (unsigned i = 0; i < _cap_cnt; i++) o._caps[i] = _caps[i];
memcpy(o.buf, _buf, min(_size, o._size));
}
/**
* Return the capability id at index 'i'
*/
Kernel::capid_t cap_get(unsigned i) {
return (i < _cap_cnt) ? _caps[i] : Kernel::cap_id_invalid(); }
/**
* Set the capability id 'cap_id' at the next index
*/
void cap_add(Kernel::capid_t cap_id) {
if (_cap_cnt < MAX_CAP_ARGS) _caps[_cap_cnt++] = cap_id; }
};
namespace Genode
{
static constexpr addr_t VIRT_ADDR_SPACE_START = 0x1000;
@ -319,9 +189,8 @@ namespace Genode
{
return (Native_utcb *)
((VIRT_ADDR_SPACE_START + VIRT_ADDR_SPACE_SIZE - sizeof(Native_utcb))
& ~((1 << MIN_MAPPING_SIZE_LOG2) - 1));
& ~(get_page_size() - 1));
}
}
#endif /* _BASE__NATIVE_TYPES_H_ */

View File

@ -0,0 +1,33 @@
/*
* \brief Connection to CAP service
* \author Stefan Kalkowski
* \author Norman Feske
* \date 2015-05-20
*
* This is a shadow copy of the generic header in base,
* due to higher memory donation requirements in base-hw
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _INCLUDE__CAP_SESSION__CONNECTION_H_
#define _INCLUDE__CAP_SESSION__CONNECTION_H_
#include <cap_session/client.h>
#include <base/connection.h>
namespace Genode { struct Cap_connection; }
struct Genode::Cap_connection : Connection<Cap_session>, Cap_session_client
{
Cap_connection() : Connection<Cap_session>(session("ram_quota=8K")),
Cap_session_client(cap()) { }
};
#endif /* _INCLUDE__CAP_SESSION__CONNECTION_H_ */

View File

@ -17,18 +17,13 @@
/* base-hw includes */
#include <kernel/interface_support.h>
namespace Genode
{
class Native_utcb;
class Platform_pd;
}
namespace Kernel
{
typedef Genode::addr_t addr_t;
typedef Genode::size_t size_t;
typedef Genode::Platform_pd Platform_pd;
typedef Genode::Native_utcb Native_utcb;
using addr_t = Genode::addr_t;
using size_t = Genode::size_t;
using capid_t = Genode::uint16_t;
constexpr capid_t cap_id_invalid() { return 0; }
/**
* Kernel names of the kernel calls
@ -47,6 +42,7 @@ namespace Kernel
constexpr Call_arg call_id_print_char() { return 11; }
constexpr Call_arg call_id_update_data_region() { return 12; }
constexpr Call_arg call_id_update_instr_region() { return 13; }
constexpr Call_arg call_id_delete_cap() { return 14; }
/*****************************************************************
@ -97,11 +93,11 @@ namespace Kernel
/**
* Cancel blocking of a thread of the current domain if possible
*
* \param thread_id kernel name of the targeted thread
* \param thread_id capability id of the targeted thread
*
* \return wether thread was in a cancelable blocking beforehand
*/
inline bool resume_local_thread(unsigned const thread_id)
inline bool resume_local_thread(capid_t const thread_id)
{
return call(call_id_resume_local_thread(), thread_id);
}
@ -110,12 +106,12 @@ namespace Kernel
/**
* Let the current thread give up its remaining timeslice
*
* \param thread_id kernel name of the benefited thread
* \param thread_id capability id of the benefited thread
*
* If thread_id is valid the call will resume the targeted thread
* additionally.
*/
inline void yield_thread(unsigned const thread_id)
inline void yield_thread(capid_t const thread_id)
{
call(call_id_yield_thread(), thread_id);
}
@ -145,38 +141,43 @@ namespace Kernel
/**
* Send request message and await receipt of corresponding reply message
*
* \param thread_id kernel name of targeted thread
* \param thread_id capability id of targeted thread
*
* \retval 0 succeeded
* \retval -1 failed
* \retval -2 failed due to out-of-memory for capability reception
*
* If the call returns successful, the received message is located at the
* base of the callers userland thread-context.
*/
inline int send_request_msg(unsigned const thread_id)
inline int send_request_msg(capid_t const thread_id, unsigned rcv_caps)
{
return call(call_id_send_request_msg(), thread_id);
return call(call_id_send_request_msg(), thread_id, rcv_caps);
}
/**
* Await receipt of request message
*
* \param rcv_caps number of capabilities willing to accept
*
* \retval 0 succeeded
* \retval -1 failed
* \retval -1 canceled
* \retval -2 failed due to out-of-memory for capability reception
*
* If the call returns successful, the received message is located at the
* base of the callers userland thread-context.
*/
inline int await_request_msg()
inline int await_request_msg(unsigned rcv_caps)
{
return call(call_id_await_request_msg());
return call(call_id_await_request_msg(), rcv_caps);
}
/**
* Reply to lastly received request message
*
* \param rcv_caps number of capabilities to accept when awaiting again
* \param await_request_msg wether the call shall await a request message
*
* \retval 0 await_request_msg == 0 or request-message receipt succeeded
@ -185,9 +186,9 @@ namespace Kernel
* If the call returns successful and await_request_msg == 1, the received
* message is located at the base of the callers userland thread-context.
*/
inline int send_reply_msg(bool const await_request_msg)
inline int send_reply_msg(unsigned rcv_caps, bool const await_request_msg)
{
return call(call_id_send_reply_msg(), await_request_msg);
return call(call_id_send_reply_msg(), rcv_caps, await_request_msg);
}
@ -206,7 +207,7 @@ namespace Kernel
/**
* Await any context of a receiver and optionally ack a context before
*
* \param receiver_id kernel name of the targeted signal receiver
* \param receiver_id capability id of the targeted signal receiver
*
* \retval 0 suceeded
* \retval -1 failed
@ -221,7 +222,7 @@ namespace Kernel
* deliver again unless its last delivery has been acknowledged via
* ack_signal.
*/
inline int await_signal(unsigned const receiver_id)
inline int await_signal(capid_t const receiver_id)
{
return call(call_id_await_signal(), receiver_id);
}
@ -230,12 +231,12 @@ namespace Kernel
/**
* Return wether any context of a receiver is pending
*
* \param receiver kernel name of the targeted signal receiver
* \param receiver capability id of the targeted signal receiver
*
* \retval 0 none of the contexts is pending or the receiver doesn't exist
* \retval 1 a context of the signal receiver is pending
*/
inline bool signal_pending(unsigned const receiver)
inline bool signal_pending(capid_t const receiver)
{
return call(call_id_signal_pending(), receiver);
}
@ -244,13 +245,13 @@ namespace Kernel
/**
* Trigger a specific signal context
*
* \param context kernel name of the targeted signal context
* \param context capability id of the targeted signal context
* \param num how often the context shall be triggered by this call
*
* \retval 0 suceeded
* \retval -1 failed
*/
inline int submit_signal(unsigned const context, unsigned const num)
inline int submit_signal(capid_t const context, unsigned const num)
{
return call(call_id_submit_signal(), context, num);
}
@ -259,9 +260,9 @@ namespace Kernel
/**
* Acknowledge the processing of the last delivery of a signal context
*
* \param context kernel name of the targeted signal context
* \param context capability id of the targeted signal context
*/
inline void ack_signal(unsigned const context)
inline void ack_signal(capid_t const context)
{
call(call_id_ack_signal(), context);
}
@ -270,15 +271,25 @@ namespace Kernel
/**
* Halt processing of a signal context synchronously
*
* \param context kernel name of the targeted signal context
* \param context capability id of the targeted signal context
*
* \retval 0 suceeded
* \retval -1 failed
*/
inline int kill_signal_context(unsigned const context)
inline int kill_signal_context(capid_t const context)
{
return call(call_id_kill_signal_context(), context);
}
/**
* Delete a capability id
*
* \param cap capability id to delete
*/
inline void delete_cap(capid_t const cap)
{
call(call_id_delete_cap(), cap);
}
}
#endif /* _KERNEL__INTERFACE_H_ */

View File

@ -0,0 +1,42 @@
/*
* \brief Connection to PD service
* \author Stefan Kalkowski
* \author Norman Feske
* \date 2015-05-20
*
* This is a shadow copy of the generic header in base,
* due to higher memory donation requirements in base-hw
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _INCLUDE__PD_SESSION__CONNECTION_H_
#define _INCLUDE__PD_SESSION__CONNECTION_H_
#include <pd_session/client.h>
#include <base/connection.h>
namespace Genode { struct Pd_connection; }
struct Genode::Pd_connection : Connection<Pd_session>, Pd_session_client
{
enum { RAM_QUOTA = 20*1024 };
/**
* Constructor
*
* \param label session label
*/
Pd_connection(char const *label = "", Native_pd_args const *pd_args = 0)
: Connection<Pd_session>(session("ram_quota=%u, label=\"%s\"",
RAM_QUOTA, label)),
Pd_session_client(cap()) { }
};
#endif /* _INCLUDE__PD_SESSION__CONNECTION_H_ */

View File

@ -0,0 +1,35 @@
/*
* \brief Connection to signal service
* \author Stefan Kalkowski
* \author Norman Feske
* \date 2015-05-20
*
* This is a shadow copy of the generic header in base,
* due to higher memory donation requirements in base-hw
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _INCLUDE__SIGNAL_SESSION__CONNECTION_H_
#define _INCLUDE__SIGNAL_SESSION__CONNECTION_H_
#include <signal_session/client.h>
#include <base/connection.h>
namespace Genode { struct Signal_connection; }
struct Genode::Signal_connection : Connection<Signal_session>,
Signal_session_client
{
Signal_connection()
: Connection<Signal_session>(session("ram_quota=32K")),
Signal_session_client(cap()) { }
};
#endif /* _INCLUDE__CAP_SESSION__CONNECTION_H_ */

View File

@ -10,7 +10,6 @@ LIBS += cxx
# add C++ sources
SRC_CC += ipc/ipc.cc
SRC_CC += ipc/ipc_marshal_cap.cc
SRC_CC += avl_tree/avl_tree.cc
SRC_CC += allocator/slab.cc
SRC_CC += allocator/allocator_avl.cc

View File

@ -15,8 +15,12 @@ SRC_CC += env/context_area.cc
SRC_CC += env/reinitialize.cc
SRC_CC += thread/start.cc
SRC_CC += irq/platform.cc
SRC_CC += env.cc
SRC_CC += capability.cc
# add include paths
INC_DIR += $(REP_DIR)/src/base/lock
INC_DIR += $(BASE_DIR)/src/base/lock
INC_DIR += $(BASE_DIR)/src/base/env
# declare source locations

View File

@ -19,7 +19,6 @@ INC_DIR += $(BASE_DIR)/src/platform
# add C++ sources
SRC_CC += console.cc
SRC_CC += cap_session_component.cc
SRC_CC += cpu_session_component.cc
SRC_CC += cpu_session_support.cc
SRC_CC += core_rm_session.cc
@ -42,6 +41,7 @@ SRC_CC += rom_session_component.cc
SRC_CC += signal_session_component.cc
SRC_CC += trace_session_component.cc
SRC_CC += thread_start.cc
SRC_CC += env.cc
SRC_CC += rm_session_support.cc
SRC_CC += pager.cc
SRC_CC += _main.cc
@ -54,7 +54,9 @@ SRC_CC += kernel/ipc_node.cc
SRC_CC += kernel/irq.cc
SRC_CC += kernel/pd.cc
SRC_CC += kernel/cpu.cc
SRC_CC += kernel/object.cc
SRC_CC += init_main_thread.cc
SRC_CC += capability.cc
# add assembly sources
SRC_S += boot_modules.s

View File

@ -13,7 +13,6 @@ SRC_CC += spec/arndale/pic.cc
SRC_CC += spec/arndale/platform_services.cc
SRC_CC += spec/arm_v7/kernel/vm_thread.cc
SRC_CC += spec/arm_v7/virtualization/kernel/vm.cc
SRC_CC += spec/arm_v7/virtualization/kernel/vm_thread.cc
SRC_CC += spec/arm_v7/virtualization/kernel/cpu_context.cc
SRC_CC += spec/arm_v7/vm_session_component.cc
SRC_CC += spec/arm_v7/virtualization/vm_session_component.cc

View File

@ -15,7 +15,6 @@ SRC_CC += spec/imx53/trustzone/platform_services.cc
SRC_CC += spec/imx53/trustzone/pic.cc
SRC_CC += spec/arm_v7/kernel/vm_thread.cc
SRC_CC += spec/arm_v7/trustzone/kernel/vm.cc
SRC_CC += spec/arm_v7/trustzone/kernel/vm_thread.cc
SRC_CC += spec/arm_v7/vm_session_component.cc
SRC_CC += spec/arm_v7/trustzone/vm_session_component.cc

View File

@ -0,0 +1,38 @@
/*
* \brief Implementation of platform-specific capabilities
* \author Stefan Kalkowski
* \date 2015-05-20
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <spin_lock.h>
#include <base/capability.h>
static volatile int spinlock = SPINLOCK_UNLOCKED;
static Genode::uint8_t ref_counter[1 << (sizeof(Kernel::capid_t)*8)];
void Genode::Native_capability::_inc() const
{
if (!valid()) return;
spinlock_lock(&spinlock);
ref_counter[_dst]++;
spinlock_unlock(&spinlock);
}
void Genode::Native_capability::_dec() const
{
if (!valid()) return;
spinlock_lock(&spinlock);
if (!--ref_counter[_dst]) { Kernel::delete_cap(_dst); }
spinlock_unlock(&spinlock);
}

View File

@ -0,0 +1,26 @@
/*
* \brief Implementation of non-core PD session upgrade
* \author Stefan Kalkowski
* \date 2015-05-20
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <pd_session/client.h>
#include <base/env.h>
#include <base/native_env.h>
void Genode::upgrade_pd_session_quota(Genode::size_t quota)
{
char buf[128];
snprintf(buf, sizeof(buf), "ram_quota=%zu", quota);
Pd_session_capability cap =
*static_cast<Pd_session_client*>(env()->pd_session());
env()->parent()->upgrade(cap, buf);
}

View File

@ -12,19 +12,26 @@
*/
/* Genode includes */
#include <base/env.h>
#include <base/ipc.h>
#include <base/allocator.h>
#include <base/thread.h>
#include <base/native_env.h>
#include <util/construct_at.h>
#include <util/retry.h>
/* base-hw includes */
#include <kernel/interface.h>
#include <kernel/log.h>
namespace Hw { extern Genode::Untyped_capability _main_thread_cap; }
using namespace Genode;
enum
{
/* size of the callee-local name of a targeted RPC object */
RPC_OBJECT_ID_SIZE = sizeof(umword_t),
RPC_OBJECT_ID_SIZE = sizeof(Native_thread_id),
/*
* The RPC framework marshalls a return value into reply messages to
@ -35,6 +42,18 @@ enum
};
/*****************************
** IPC marshalling support **
*****************************/
void Ipc_ostream::_marshal_capability(Native_capability const &cap) {
_snd_msg->cap_add(cap); }
void Ipc_istream::_unmarshal_capability(Native_capability &cap) {
cap = _rcv_msg->cap_get(); }
/*****************
** Ipc_ostream **
*****************/
@ -44,7 +63,8 @@ Ipc_ostream::Ipc_ostream(Native_capability dst, Msgbuf_base *snd_msg)
Ipc_marshaller(&snd_msg->buf[0], snd_msg->size()),
_snd_msg(snd_msg), _dst(dst)
{
_write_offset = RPC_OBJECT_ID_SIZE;
_write_offset = align_natural<unsigned>(RPC_OBJECT_ID_SIZE);
_snd_msg->reset();
}
@ -62,9 +82,10 @@ void Ipc_istream::_wait()
Ipc_istream::Ipc_istream(Msgbuf_base *rcv_msg)
:
Ipc_unmarshaller(&rcv_msg->buf[0], rcv_msg->size()),
Native_capability(Genode::thread_get_my_native_id(), 0),
Native_capability(Thread_base::myself() ? Thread_base::myself()->tid().cap
: Hw::_main_thread_cap),
_rcv_msg(rcv_msg), _rcv_cs(-1)
{ _read_offset = RPC_OBJECT_ID_SIZE; }
{ _read_offset = align_natural<unsigned>(RPC_OBJECT_ID_SIZE); }
Ipc_istream::~Ipc_istream() { }
@ -76,24 +97,35 @@ Ipc_istream::~Ipc_istream() { }
void Ipc_client::_call()
{
/* send request and receive corresponding reply */
unsigned const local_name = Ipc_ostream::_dst.local_name();
Native_utcb * const utcb = Thread_base::myself()->utcb();
utcb->message()->prepare_send(_snd_msg->buf, _write_offset, local_name);
if (Kernel::send_request_msg(Ipc_ostream::_dst.dst())) {
PERR("failed to receive reply");
throw Blocking_canceled();
}
utcb->message()->finish_receive(_rcv_msg->buf, _rcv_msg->size());
retry<Genode::Allocator::Out_of_memory>(
[&] () {
/* reset unmarshaller */
_write_offset = _read_offset = RPC_OBJECT_ID_SIZE;
/* send request and receive corresponding reply */
Thread_base::myself()->utcb()->copy_from(*_snd_msg, _write_offset);
switch (Kernel::send_request_msg(Ipc_ostream::dst().dst(),
_rcv_msg->cap_rcv_window())) {
case -1: throw Blocking_canceled();
case -2: throw Allocator::Out_of_memory();
default:
_rcv_msg->reset();
_snd_msg->reset();
Thread_base::myself()->utcb()->copy_to(*_rcv_msg);
/* reset unmarshaller */
_write_offset = _read_offset =
align_natural<unsigned>(RPC_OBJECT_ID_SIZE);
}
},
[&] () { upgrade_pd_session_quota(3*4096); });
}
Ipc_client::Ipc_client(Native_capability const &srv, Msgbuf_base *snd_msg,
Msgbuf_base *rcv_msg, unsigned short)
: Ipc_istream(rcv_msg), Ipc_ostream(srv, snd_msg), _result(0) { }
Msgbuf_base *rcv_msg, unsigned short rcv_caps)
: Ipc_istream(rcv_msg), Ipc_ostream(srv, snd_msg), _result(0) {
rcv_msg->cap_rcv_window(rcv_caps); }
/****************
@ -104,8 +136,7 @@ Ipc_server::Ipc_server(Msgbuf_base *snd_msg,
Msgbuf_base *rcv_msg) :
Ipc_istream(rcv_msg),
Ipc_ostream(Native_capability(), snd_msg),
_reply_needed(false)
{ }
_reply_needed(false) { }
void Ipc_server::_prepare_next_reply_wait()
@ -114,34 +145,41 @@ void Ipc_server::_prepare_next_reply_wait()
_reply_needed = true;
/* leave space for RPC method return value */
_write_offset = RPC_OBJECT_ID_SIZE + RPC_RETURN_VALUE_SIZE;
_write_offset = align_natural<unsigned>(RPC_OBJECT_ID_SIZE +
RPC_RETURN_VALUE_SIZE);
/* reset unmarshaller */
_read_offset = RPC_OBJECT_ID_SIZE;
_read_offset = align_natural<unsigned>(RPC_OBJECT_ID_SIZE);
}
void Ipc_server::_wait()
{
/* receive request */
if (Kernel::await_request_msg()) {
PERR("failed to receive request");
throw Blocking_canceled();
}
Native_utcb * const utcb = Thread_base::myself()->utcb();
utcb->message()->finish_receive(_rcv_msg->buf, _rcv_msg->size());
retry<Genode::Allocator::Out_of_memory>(
[&] () {
/* update server state */
_prepare_next_reply_wait();
/* receive request */
switch (Kernel::await_request_msg(Msgbuf_base::MAX_CAP_ARGS)) {
case -1: throw Blocking_canceled();
case -2: throw Allocator::Out_of_memory();
default:
_rcv_msg->reset();
Thread_base::myself()->utcb()->copy_to(*_rcv_msg);
/* update server state */
_prepare_next_reply_wait();
}
},
[&] () { upgrade_pd_session_quota(3*4096); });
}
void Ipc_server::_reply()
{
unsigned const local_name = Ipc_ostream::_dst.local_name();
Native_utcb * const utcb = Thread_base::myself()->utcb();
utcb->message()->prepare_send(_snd_msg->buf, _write_offset, local_name);
Kernel::send_reply_msg(false);
Thread_base::myself()->utcb()->copy_from(*_snd_msg, _write_offset);
_snd_msg->reset();
Kernel::send_reply_msg(0, false);
}
@ -152,16 +190,23 @@ void Ipc_server::_reply_wait()
_wait();
return;
}
/* send reply and receive next request */
unsigned const local_name = Ipc_ostream::_dst.local_name();
Native_utcb * const utcb = Thread_base::myself()->utcb();
utcb->message()->prepare_send(_snd_msg->buf, _write_offset, local_name);
if (Kernel::send_reply_msg(true)) {
PERR("failed to receive request");
throw Blocking_canceled();
}
utcb->message()->finish_receive(_rcv_msg->buf, _rcv_msg->size());
/* update server state */
_prepare_next_reply_wait();
retry<Genode::Allocator::Out_of_memory>(
[&] () {
/* send reply and receive next request */
Thread_base::myself()->utcb()->copy_from(*_snd_msg, _write_offset);
switch (Kernel::send_reply_msg(Msgbuf_base::MAX_CAP_ARGS, true)) {
case -1: throw Blocking_canceled();
case -2: throw Allocator::Out_of_memory();
default:
_rcv_msg->reset();
_snd_msg->reset();
Thread_base::myself()->utcb()->copy_to(*_rcv_msg);
/* update server state */
_prepare_next_reply_wait();
}
},
[&] () { upgrade_pd_session_quota(3*4096); });
}

View File

@ -18,13 +18,15 @@
#include <base/native_types.h>
#include <base/thread.h>
extern Genode::Native_thread_id _main_thread_id;
namespace Hw {
extern Genode::Untyped_capability _main_thread_cap;
}
/**
* Yield execution time-slice of current thread
*/
static inline void thread_yield() { Kernel::yield_thread(0); }
static inline void thread_yield() {
Kernel::yield_thread(Kernel::cap_id_invalid()); }
/**
@ -33,7 +35,7 @@ static inline void thread_yield() { Kernel::yield_thread(0); }
static inline Genode::Native_thread_id
native_thread_id(Genode::Thread_base * const t)
{
return t ? t->tid().thread_id : _main_thread_id;
return t ? t->tid().cap.dst() : Hw::_main_thread_cap.dst();
}

View File

@ -0,0 +1,99 @@
/*
* \brief base-hw specific part of RPC framework
* \author Stefan Kalkowski
* \date 2015-03-05
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/rpc_server.h>
#include <base/sleep.h>
#include <base/env.h>
#include <util/retry.h>
#include <cap_session/client.h>
using namespace Genode;
/***********************
** Server entrypoint **
***********************/
Untyped_capability Rpc_entrypoint::_manage(Rpc_object_base *obj)
{
Untyped_capability new_obj_cap =
retry<Genode::Cap_session::Out_of_metadata>(
[&] () { return _cap_session->alloc(_cap); },
[&] () {
Cap_session_client *client =
dynamic_cast<Cap_session_client*>(_cap_session);
if (client)
env()->parent()->upgrade(*client, "ram_quota=16K");
});
/* add server object to object pool */
obj->cap(new_obj_cap);
insert(obj);
/* return capability that uses the object id as badge */
return new_obj_cap;
}
void Rpc_entrypoint::entry()
{
Ipc_server srv(&_snd_buf, &_rcv_buf);
_ipc_server = &srv;
_cap = srv;
_cap_valid.unlock();
/*
* Now, the capability of the server activation is initialized
* an can be passed around. However, the processing of capability
* invocations should not happen until activation-using server
* is completely initialized. Thus, we wait until the activation
* gets explicitly unblocked by calling 'Rpc_entrypoint::activate()'.
*/
_delay_start.lock();
while (!_exit_handler.exit) {
int opcode = 0;
srv >> IPC_REPLY_WAIT >> opcode;
/* set default return value */
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
/* atomically lookup and lock referenced object */
Object_pool<Rpc_object_base>::Guard curr_obj(lookup_and_lock(srv.badge()));
if (!curr_obj)
continue;
{
Lock::Guard lock_guard(_curr_obj_lock);
_curr_obj = curr_obj;
}
/* dispatch request */
try { srv.ret(_curr_obj->dispatch(opcode, srv, srv)); }
catch (Blocking_canceled) { }
{
Lock::Guard lock_guard(_curr_obj_lock);
_curr_obj = 0;
}
}
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
srv << IPC_REPLY;
/* defer the destruction of 'Ipc_server' until '~Rpc_entrypoint' is ready */
_delay_exit.lock();
}

View File

@ -104,7 +104,7 @@ Signal_receiver::Signal_receiver()
return;
}
PINF("upgrading quota donation for SIGNAL session");
env()->parent()->upgrade(s->cap(), "ram_quota=4K");
env()->parent()->upgrade(s->cap(), "ram_quota=8K");
session_upgraded = 1;
}
}
@ -159,7 +159,7 @@ Signal_context_capability Signal_receiver::manage(Signal_context * const c)
return Signal_context_capability();
}
PINF("upgrading quota donation for signal session");
env()->parent()->upgrade(s->cap(), "ram_quota=4K");
env()->parent()->upgrade(s->cap(), "ram_quota=8K");
session_upgraded = 1;
}
}
@ -193,8 +193,9 @@ Signal Signal_receiver::wait_for_signal()
PERR("failed to receive signal");
return Signal(Signal::Data());
}
/* get signal data */
Signal s(*(Signal::Data *)Thread_base::myself()->utcb());
Signal s(*(Signal::Data *)Thread_base::myself()->utcb()->base());
return s;
}

View File

@ -22,18 +22,10 @@
using namespace Genode;
Ram_dataspace_capability _main_thread_utcb_ds;
Native_thread_id _main_thread_id;
/**************************
** Native types support **
**************************/
Native_thread_id Genode::thread_get_my_native_id()
{
Thread_base * const t = Thread_base::myself();
return t ? t->tid().thread_id : _main_thread_id;
namespace Hw {
Ram_dataspace_capability _main_thread_utcb_ds;
Untyped_capability _main_thread_cap;
Untyped_capability _parent_cap;
}
@ -44,15 +36,17 @@ Native_thread_id Genode::thread_get_my_native_id()
void prepare_init_main_thread()
{
using namespace Genode;
using namespace Hw;
/*
* Make data from the startup info persistantly available by copying it
* before the UTCB gets polluted by the following function calls.
*/
Native_utcb * const utcb = Thread_base::myself()->utcb();
_main_thread_id = utcb->start_info()->thread_id();
_main_thread_utcb_ds =
reinterpret_cap_cast<Ram_dataspace>(utcb->start_info()->utcb_ds());
Native_utcb * utcb = Thread_base::myself()->utcb();
_parent_cap = utcb->cap_get(Native_utcb::PARENT);
Untyped_capability ds_cap(utcb->cap_get(Native_utcb::UTCB_DATASPACE));
_main_thread_utcb_ds = reinterpret_cap_cast<Ram_dataspace>(ds_cap);
_main_thread_cap = utcb->cap_get(Native_utcb::THREAD_MYSELF);
}
@ -78,8 +72,5 @@ void Thread_base::_thread_start()
Genode::sleep_forever();
}
void Thread_base::_thread_bootstrap()
{
Native_utcb * const utcb = Thread_base::myself()->utcb();
_tid.thread_id = utcb->start_info()->thread_id();
}
void Thread_base::_thread_bootstrap() {
_tid.cap = myself()->utcb()->cap_get(Native_utcb::THREAD_MYSELF); }

View File

@ -22,9 +22,10 @@ using namespace Genode;
namespace Genode { Rm_session * env_context_area_rm_session(); }
extern Ram_dataspace_capability _main_thread_utcb_ds;
extern Native_thread_id _main_thread_id;
namespace Hw {
extern Ram_dataspace_capability _main_thread_utcb_ds;
extern Untyped_capability _main_thread_cap;
}
/*****************
** Thread_base **
@ -50,14 +51,14 @@ void Thread_base::_init_platform_thread(size_t weight, Type type)
if (type == REINITIALIZED_MAIN) { rm->detach(utcb_new); }
/* remap initial main-thread UTCB according to context-area spec */
try { rm->attach_at(_main_thread_utcb_ds, utcb_new, utcb_size); }
try { rm->attach_at(Hw::_main_thread_utcb_ds, utcb_new, utcb_size); }
catch(...) {
PERR("failed to re-map UTCB");
while (1) ;
}
/* adjust initial object state in case of a main thread */
tid().thread_id = _main_thread_id;
_thread_cap = env()->parent()->main_thread_cap();
tid().cap = Hw::_main_thread_cap;
_thread_cap = env()->parent()->main_thread_cap();
}

View File

@ -0,0 +1,17 @@
/*
* \brief Implementation of platform-specific capabilities for core
* \author Stefan Kalkowski
* \date 2015-05-20
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <base/capability.h>
void Genode::Native_capability::_inc() const { }
void Genode::Native_capability::_dec() const { }

View File

@ -0,0 +1,18 @@
/*
* \brief Implementation of core's PD session upgrade
* \author Stefan Kalkowski
* \date 2015-05-20
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/native_env.h>
#include <assert.h>
void Genode::upgrade_pd_session_quota(Genode::size_t quota) { assert(false); }

View File

@ -0,0 +1,122 @@
/*
* \brief Capability allocation service
* \author Stefan Kalkowski
* \date 2015-03-05
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _CORE__INCLUDE__CAP_SESSION_COMPONENT_H_
#define _CORE__INCLUDE__CAP_SESSION_COMPONENT_H_
#include <base/allocator_guard.h>
#include <base/rpc_server.h>
#include <cap_session/cap_session.h>
#include <util/arg_string.h>
#include <util/construct_at.h>
#include <kernel/object.h>
namespace Genode { class Cap_session_component; }
class Genode::Cap_session_component : public Rpc_object<Cap_session>
{
private:
/**
* Kernel object placeholder hold in a list
*/
struct Kobject : List<Kobject>::Element
{
using Identity = Kernel::Core_object_identity<Kernel::Thread>;
Native_capability cap;
uint8_t data[sizeof(Identity)]
__attribute__((aligned(sizeof(addr_t))));
};
using Slab = Tslab<Kobject, get_page_size()>;
Allocator_guard _guard;
uint8_t _initial_sb[get_page_size()]; /* initial slab block */
Slab _slab;
List<Kobject> _list;
Lock _lock;
/**
* Returns the right meta-data allocator,
* for core it returns a non-guarded one, otherwise a guard
*/
Allocator * _md_alloc(Allocator *md_alloc)
{
Allocator * core_mem_alloc =
static_cast<Allocator*>(platform()->core_mem_alloc());
return (md_alloc == core_mem_alloc) ? core_mem_alloc : &_guard;
}
public:
Cap_session_component(Allocator *md_alloc, const char *args)
: _guard(md_alloc,
Arg_string::find_arg(args, "ram_quota").long_value(0)),
_slab(_md_alloc(md_alloc), (Slab_block*)&_initial_sb) { }
~Cap_session_component()
{
Lock::Guard guard(_lock);
while (Kobject * obj = _list.first()) {
Kernel::delete_obj(obj->data);
_list.remove(obj);
destroy(&_slab, obj);
}
}
void upgrade_ram_quota(size_t ram_quota) { _guard.upgrade(ram_quota); }
Native_capability alloc(Native_capability ep)
{
Lock::Guard guard(_lock);
/* allocate kernel object */
Kobject * obj;
if (!_slab.alloc(sizeof(Kobject), (void**)&obj))
throw Out_of_metadata();
construct_at<Kobject>(obj);
/* create kernel object via syscall */
obj->cap = Kernel::new_obj(obj->data, ep.dst());
if (!obj->cap.valid()) {
PWRN("Invalid entrypoint %u for allocating a capability!",
ep.dst());
destroy(&_slab, obj);
return Native_capability();
}
/* store it in the list and return result */
_list.insert(obj);
return obj->cap;
}
void free(Native_capability cap)
{
Lock::Guard guard(_lock);
for (Kobject * obj = _list.first(); obj; obj = obj->next())
if (obj->cap.dst() == cap.dst()) {
Kernel::delete_obj(obj->data);
_list.remove(obj);
destroy(&_slab, obj);
return;
}
}
};
#endif /* _CORE__INCLUDE__CAP_SESSION_COMPONENT_H_ */

View File

@ -16,10 +16,7 @@
namespace Kernel
{
enum {
DEFAULT_STACK_SIZE = 16 * 1024,
MAX_KERNEL_OBJECTS = 8192,
};
enum { DEFAULT_STACK_SIZE = 16 * 1024 };
/* amount of priority bands amongst quota owners in CPU scheduling */
constexpr unsigned cpu_priorities = 4;

View File

@ -17,6 +17,8 @@
/* base-hw includes */
#include <kernel/interface.h>
namespace Genode { class Native_utcb; }
namespace Kernel
{
class Pd;
@ -25,57 +27,35 @@ namespace Kernel
class Signal_context;
class Vm;
class User_irq;
using Native_utcb = Genode::Native_utcb;
/**
* Kernel names of the kernel calls
*/
constexpr Call_arg call_id_new_thread() { return 14; }
constexpr Call_arg call_id_delete_thread() { return 15; }
constexpr Call_arg call_id_start_thread() { return 16; }
constexpr Call_arg call_id_resume_thread() { return 17; }
constexpr Call_arg call_id_access_thread_regs() { return 18; }
constexpr Call_arg call_id_route_thread_event() { return 19; }
constexpr Call_arg call_id_update_pd() { return 20; }
constexpr Call_arg call_id_new_pd() { return 21; }
constexpr Call_arg call_id_delete_pd() { return 22; }
constexpr Call_arg call_id_new_signal_receiver() { return 23; }
constexpr Call_arg call_id_new_signal_context() { return 24; }
constexpr Call_arg call_id_delete_signal_context() { return 25; }
constexpr Call_arg call_id_delete_signal_receiver() { return 26; }
constexpr Call_arg call_id_new_vm() { return 27; }
constexpr Call_arg call_id_run_vm() { return 28; }
constexpr Call_arg call_id_pause_vm() { return 29; }
constexpr Call_arg call_id_pause_thread() { return 30; }
constexpr Call_arg call_id_delete_vm() { return 31; }
constexpr Call_arg call_id_new_irq() { return 32; }
constexpr Call_arg call_id_delete_irq() { return 33; }
constexpr Call_arg call_id_thread_quota() { return 34; }
constexpr Call_arg call_id_ack_irq() { return 35; }
/**
* Create a domain
*
* \param dst appropriate memory donation for the kernel object
* \param pd core local Platform_pd object
*
* \retval 0 when successful, otherwise !=0
*/
inline int long new_pd(void * const dst, Platform_pd * const pd)
{
return call(call_id_new_pd(), (Call_arg)dst, (Call_arg)pd);
}
/**
* Destruct a domain
*
* \param pd pointer to pd kernel object
*/
inline void delete_pd(Pd * const pd)
{
call(call_id_delete_pd(), (Call_arg)pd);
}
constexpr Call_arg call_id_new_thread() { return 15; }
constexpr Call_arg call_id_delete_thread() { return 16; }
constexpr Call_arg call_id_start_thread() { return 17; }
constexpr Call_arg call_id_pause_thread() { return 18; }
constexpr Call_arg call_id_resume_thread() { return 19; }
constexpr Call_arg call_id_access_thread_regs() { return 20; }
constexpr Call_arg call_id_route_thread_event() { return 21; }
constexpr Call_arg call_id_thread_quota() { return 22; }
constexpr Call_arg call_id_update_pd() { return 23; }
constexpr Call_arg call_id_new_pd() { return 24; }
constexpr Call_arg call_id_delete_pd() { return 25; }
constexpr Call_arg call_id_new_signal_receiver() { return 26; }
constexpr Call_arg call_id_new_signal_context() { return 27; }
constexpr Call_arg call_id_delete_signal_context() { return 28; }
constexpr Call_arg call_id_delete_signal_receiver() { return 29; }
constexpr Call_arg call_id_new_vm() { return 30; }
constexpr Call_arg call_id_run_vm() { return 31; }
constexpr Call_arg call_id_pause_vm() { return 32; }
constexpr Call_arg call_id_delete_vm() { return 33; }
constexpr Call_arg call_id_new_irq() { return 34; }
constexpr Call_arg call_id_delete_irq() { return 35; }
constexpr Call_arg call_id_ack_irq() { return 36; }
constexpr Call_arg call_id_new_obj() { return 37; }
constexpr Call_arg call_id_delete_obj() { return 38; }
/**
* Update locally effective domain configuration to in-memory state
@ -92,25 +72,6 @@ namespace Kernel
}
/**
* Create a thread
*
* \param p memory donation for the new kernel thread object
* \param priority scheduling priority of the new thread
* \param quota CPU quota of the new thread
* \param label debugging label of the new thread
*
* \retval >0 kernel name of the new thread
* \retval 0 failed
*/
inline unsigned new_thread(void * const p, unsigned const priority,
size_t const quota, char const * const label)
{
return call(call_id_new_thread(), (Call_arg)p, (Call_arg)priority,
(Call_arg)quota, (Call_arg)label);
}
/**
* Configure the CPU quota of a thread
*
@ -134,17 +95,6 @@ namespace Kernel
}
/**
* Destruct a thread
*
* \param thread pointer to thread kernel object
*/
inline void delete_thread(Thread * const thread)
{
call(call_id_delete_thread(), (Call_arg)thread);
}
/**
* Start execution of a thread
*
@ -181,15 +131,15 @@ namespace Kernel
* Set or unset the handler of an event that can be triggered by a thread
*
* \param thread pointer to thread kernel object
* \param event_id kernel name of the targeted thread event
* \param signal_context_id kernel name of the handlers signal context
* \param event_id capability id of the targeted thread event
* \param signal_context_id capability id of the handlers signal context
*
* \retval 0 succeeded
* \retval -1 failed
*/
inline int route_thread_event(Thread * const thread,
unsigned const event_id,
unsigned const signal_context_id)
capid_t const event_id,
capid_t const signal_context_id)
{
return call(call_id_route_thread_event(), (Call_arg)thread,
event_id, signal_context_id);
@ -238,109 +188,14 @@ namespace Kernel
}
/**
* Create a signal receiver
*
* \param p memory donation for the kernel signal-receiver object
*
* \retval >0 kernel name of the new signal receiver
* \retval 0 failed
*/
inline unsigned new_signal_receiver(addr_t const p)
{
return call(call_id_new_signal_receiver(), p);
}
/**
* Create a signal context and assign it to a signal receiver
*
* \param p memory donation for the kernel signal-context object
* \param receiver pointer to signal receiver kernel object
* \param imprint user label of the signal context
*
* \retval >0 kernel name of the new signal context
* \retval 0 failed
*/
inline unsigned new_signal_context(addr_t const p,
Signal_receiver * const receiver,
unsigned const imprint)
{
return call(call_id_new_signal_context(), p,
(Call_arg)receiver, imprint);
}
/**
* Destruct a signal context
*
* \param context pointer to signal context kernel object
*/
inline void delete_signal_context(Signal_context * const context)
{
call(call_id_delete_signal_context(), (Call_arg)context);
}
/**
* Destruct a signal receiver
*
* \param receiver pointer to signal receiver kernel object
*
* \retval 0 suceeded
* \retval -1 failed
*/
inline void delete_signal_receiver(Signal_receiver * const receiver)
{
call(call_id_delete_signal_receiver(), (Call_arg)receiver);
}
/**
* Create a virtual machine that is stopped initially
*
* \param dst memory donation for the VM object
* \param state location of the CPU state of the VM
* \param signal_context_id kernel name of the signal context for VM events
* \param table guest-physical to host-physical translation
* table pointer
*
* \retval 0 when successful, otherwise !=0
*
* Regaining of the supplied memory is not supported by now.
*/
inline int new_vm(void * const dst, void * const state,
unsigned const signal_context_id,
void * const table)
{
return call(call_id_new_vm(), (Call_arg)dst, (Call_arg)state,
(Call_arg)table, signal_context_id);
}
/**
* Execute a virtual-machine (again)
*
* \param vm pointer to vm kernel object
*
* \retval 0 when successful, otherwise !=0
*/
inline int run_vm(Vm * const vm)
inline void run_vm(Vm * const vm)
{
return call(call_id_run_vm(), (Call_arg) vm);
}
/**
* Destruct a virtual-machine
*
* \param vm pointer to vm kernel object
*
* \retval 0 when successful, otherwise !=0
*/
inline int delete_vm(Vm * const vm)
{
return call(call_id_delete_vm(), (Call_arg) vm);
call(call_id_run_vm(), (Call_arg) vm);
}
@ -348,12 +203,10 @@ namespace Kernel
* Stop execution of a virtual-machine
*
* \param vm pointer to vm kernel object
*
* \retval 0 when successful, otherwise !=0
*/
inline int pause_vm(Vm * const vm)
inline void pause_vm(Vm * const vm)
{
return call(call_id_pause_vm(), (Call_arg) vm);
call(call_id_pause_vm(), (Call_arg) vm);
}
/**
@ -361,10 +214,10 @@ namespace Kernel
*
* \param p memory donation for the irq object
* \param irq_nr interrupt number
* \param signal_context_id kernel name of the signal context
* \param signal_context_id capability id of the signal context
*/
inline int new_irq(addr_t const p, unsigned irq_nr,
unsigned signal_context_id)
capid_t signal_context_id)
{
return call(call_id_new_irq(), (Call_arg) p, irq_nr, signal_context_id);
}
@ -388,6 +241,27 @@ namespace Kernel
{
call(call_id_delete_irq(), (Call_arg) irq);
}
/**
* Create a new object identity for a thread
*
* \param dst memory donation for the new object
* \param cap capability id of the targeted thread
*/
inline capid_t new_obj(void * const dst, capid_t const cap)
{
return call(call_id_new_obj(), (Call_arg)dst, (Call_arg)cap);
}
/**
* Destroy an object identity
*
* \param dst pointer to the object identity object
*/
inline void delete_obj(void * const dst)
{
call(call_id_delete_obj(), (Call_arg)dst);
}
}
#endif /* _KERNEL__CORE_INTERFACE_H_ */

View File

@ -1,6 +1,7 @@
/*
* \brief Backend for end points of synchronous interprocess communication
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2012-11-30
*/
@ -14,19 +15,28 @@
#ifndef _KERNEL__IPC_NODE_H_
#define _KERNEL__IPC_NODE_H_
/* Genode includes */
#include <util/construct_at.h>
/* core includes */
#include <kernel/fifo.h>
#include <kernel/interface.h>
namespace Genode { class Msgbuf_base; };
namespace Kernel
{
class Pd;
/**
* Backend for end points of synchronous interprocess communication
*/
class Ipc_node;
using Ipc_node_queue = Kernel::Fifo<Ipc_node>;
}
class Kernel::Ipc_node
class Kernel::Ipc_node : public Ipc_node_queue::Element
{
protected:
@ -39,48 +49,40 @@ class Kernel::Ipc_node
PREPARE_AND_AWAIT_REPLY = 5,
};
void _init(Genode::Native_utcb * utcb, Ipc_node * callee);
private:
class Message_buf;
friend class Core_thread;
typedef Kernel::Fifo<Message_buf> Message_fifo;
State _state = INACTIVE;
capid_t _capid = cap_id_invalid();
Ipc_node * _caller = nullptr;
Ipc_node * _callee = nullptr;
bool _help = false;
size_t _rcv_caps = 0; /* max capability num to receive */
Genode::Native_utcb * _utcb = nullptr;
Ipc_node_queue _request_queue;
/**
* Describes the buffer for incoming or outgoing messages
*/
class Message_buf : public Message_fifo::Element
{
public:
/* pre-allocation array for obkject identity references */
void * _obj_id_ref_ptr[Genode::Msgbuf_base::MAX_CAP_ARGS];
void * base;
size_t size;
Ipc_node * src;
};
Message_fifo _request_queue;
Message_buf _inbuf;
Message_buf _outbuf;
Ipc_node * _outbuf_dst;
bool _outbuf_dst_help;
State _state;
inline void copy_msg(Ipc_node * const sender);
/**
* Buffer next request from request queue in 'r' to handle it
*/
void _receive_request(Message_buf * const r);
void _receive_request(Ipc_node * const caller);
/**
* Receive a given reply if one is expected
*
* \param base base of the reply payload
* \param size size of the reply payload
*/
void _receive_reply(void * const base, size_t const size);
void _receive_reply(Ipc_node * callee);
/**
* Insert 'r' into request queue, buffer it if we were waiting for it
*/
void _announce_request(Message_buf * const r);
void _announce_request(Ipc_node * const node);
/**
* Cancel all requests in request queue
@ -100,7 +102,7 @@ class Kernel::Ipc_node
/**
* A request 'r' in inbuf or request queue was cancelled by sender
*/
void _announced_request_cancelled(Message_buf * const r);
void _announced_request_cancelled(Ipc_node * const node);
/**
* The request in the outbuf was cancelled by receiver
@ -134,31 +136,28 @@ class Kernel::Ipc_node
protected:
Pd * _pd; /* pointer to PD this IPC node is part of */
/***************
** Accessors **
***************/
Ipc_node * outbuf_dst() { return _outbuf_dst; }
State state() { return _state; }
Ipc_node * callee() { return _callee; }
State state() { return _state; }
public:
Ipc_node();
~Ipc_node();
/**
* Send a request and wait for the according reply
*
* \param dst targeted IPC node
* \param buf_base base of receive buffer and request message
* \param buf_size size of receive buffer
* \param msg_size size of request message
* \param callee targeted IPC node
* \param help wether the request implies a helping relationship
*/
void send_request(Ipc_node * const dst, void * const buf_base,
size_t const buf_size, size_t const msg_size,
bool help);
void send_request(Ipc_node * const callee, capid_t capid, bool help,
unsigned rcv_caps);
/**
* Return root destination of the helping-relation tree we are in
@ -172,37 +171,38 @@ class Kernel::Ipc_node
{
/* if we have a helper in the receive buffer, call 'f' for it */
if (_state == PREPARE_REPLY || _state == PREPARE_AND_AWAIT_REPLY) {
if (_inbuf.src->_outbuf_dst_help) { f(_inbuf.src); } }
if (_caller->_help) { f(_caller); } }
/* call 'f' for each helper in our request queue */
_request_queue.for_each([f] (Message_buf * const b) {
if (b->src->_outbuf_dst_help) { f(b->src); } });
_request_queue.for_each([f] (Ipc_node * const node) {
if (node->_help) { f(node); } });
}
/**
* Wait until a request has arrived and load it for handling
*
* \param buf_base base of receive buffer
* \param buf_size size of receive buffer
*
* \return wether a request could be received already
*/
bool await_request(void * const buf_base,
size_t const buf_size);
bool await_request(unsigned rcv_caps);
/**
* Reply to last request if there's any
*
* \param msg_base base of reply message
* \param msg_size size of reply message
*/
void send_reply(void * const msg_base,
size_t const msg_size);
void send_reply();
/**
* If IPC node waits, cancel '_outbuf' to stop waiting
*/
void cancel_waiting();
/***************
** Accessors **
***************/
Pd * const pd() const { return _pd; }
char const * pd_label() const;
Genode::Native_utcb * utcb() { return _utcb; }
};
#endif /* _KERNEL__IPC_NODE_H_ */

View File

@ -19,6 +19,7 @@
#include <base/native_types.h>
#include <irq_session/irq_session.h>
#include <unmanaged_singleton.h>
#include <util/avl_tree.h>
/* core includes */
#include <kernel/signal_receiver.h>
@ -45,31 +46,35 @@ namespace Genode
}
class Kernel::Irq : public Object_pool<Irq>::Item
class Kernel::Irq : public Genode::Avl_node<Irq>
{
public:
using Pool = Object_pool<Irq>;
struct Pool : Genode::Avl_tree<Irq>
{
Irq * object(unsigned const id) const
{
Irq * const irq = first();
if (!irq) return nullptr;
return irq->find(id);
}
};
protected:
Pool &_pool;
/**
* Get kernel name of the interrupt
*/
unsigned _id() const { return Pool::Item::id(); };
unsigned _irq_nr; /* kernel name of the interrupt */
Pool &_pool;
public:
/**
* Constructor
*
* \param irq_id kernel name of the interrupt
* \param pool pool this interrupt shall belong to
* \param irq interrupt number
* \param pool pool this interrupt shall belong to
*/
Irq(unsigned const irq_id, Pool &pool)
: Pool::Item(irq_id), _pool(pool) { _pool.insert(this); }
Irq(unsigned const irq, Pool &pool)
: _irq_nr(irq), _pool(pool) { _pool.insert(this); }
virtual ~Irq() { _pool.remove(this); }
@ -87,10 +92,30 @@ class Kernel::Irq : public Object_pool<Irq>::Item
* Allow interrupt to occur
*/
void enable() const;
unsigned irq_number() { return _irq_nr; }
/************************
* 'Avl_node' interface *
************************/
bool higher(Irq * i) const { return i->_irq_nr > _irq_nr; }
/**
* Find irq with 'nr' within this AVL subtree
*/
Irq * find(unsigned const nr)
{
if (nr == _irq_nr) return this;
Irq * const subtree = Genode::Avl_node<Irq>::child(nr > _irq_nr);
return (subtree) ? subtree->find(nr): nullptr;
}
};
class Kernel::User_irq : public Kernel::Irq
class Kernel::User_irq : public Kernel::Irq, public Kernel::Object
{
private:

View File

@ -16,13 +16,21 @@
#define _KERNEL__KERNEL_H_
#include <pic.h>
#include <kernel/pd.h>
/**
* Main routine of every kernel pass
*/
extern "C" void kernel();
namespace Kernel {
class Pd;
class Mode_transition_control;
Pd * core_pd();
Mode_transition_control * mtc();
Pic * pic();
Native_utcb * core_main_thread_utcb_phys_addr();
}
#endif /* _KERNEL__KERNEL_H_ */

View File

@ -1,6 +1,5 @@
/*
* \brief Objects that are findable through unique IDs
* \author Martin Stein
* \brief Kernel object identities and references
* \author Stefan Kalkowski
* \date 2012-11-30
*/
@ -18,133 +17,172 @@
/* Genode includes */
#include <util/avl_tree.h>
#include <util/bit_allocator.h>
#include <util/list.h>
/* core includes */
#include <assert.h>
#include <kernel/configuration.h>
#include <kernel/interface.h>
#include <kernel/kernel.h>
namespace Kernel
{
/**
* Map unique sortable IDs to objects
*
* \param T object type that inherits from Object_pool<T>::Item
*/
template <typename T>
class Object_pool;
class Pd; /* forward declaration */
/**
* Manage allocation of a static set of IDs
* Base class of all Kernel objects
*/
using Id_allocator = Genode::Bit_allocator<MAX_KERNEL_OBJECTS>;
Id_allocator & id_alloc();
/**
* Make all objects of a deriving class findable through unique IDs
*
* \param T object type
* \param POOL accessor function of object pool
*/
template <typename T, Kernel::Object_pool<T> * (* POOL)()>
class Object;
/**
* An object identity helps to distinguish different capability owners
* that reference a Kernel object
*/
class Object_identity;
/**
* An object identity reference is the in-kernel representation
* of a PD local capability. It references an object identity and is
* associated with a protection domain.
*/
class Object_identity_reference;
/**
* A tree of object identity references to retrieve the capabilities
* of one PD fastly.
*/
class Object_identity_reference_tree;
using Object_identity_reference_list
= Genode::List<Object_identity_reference>;
/**
* This class represents kernel object's identities including the
* corresponding object identity reference for core
*/
template <typename T> class Core_object_identity;
/**
* This class represents a kernel object, it's identity, and the
* corresponding object identity reference for core
*/
template <typename T> class Core_object;
}
template <typename T>
class Kernel::Object_pool
struct Kernel::Object
{
public:
/**
* Enable a deriving class T to be inserted into an Object_pool<T>
*/
class Item;
/**
* Insert 'object' into pool
*/
void insert(T * const object) { _tree.insert(object); }
/**
* Remove 'object' from pool
*/
void remove(T * const object) { _tree.remove(object); }
/**
* Return object with ID 'id', or 0 if such an object doesn't exist
*/
T * object(unsigned const id) const
{
Item * const root = _tree.first();
if (!root) { return 0; }
return static_cast<T *>(root->find(id));
}
private:
Genode::Avl_tree<Item> _tree;
virtual ~Object() { }
};
template <typename T>
class Kernel::Object_pool<T>::Item : public Genode::Avl_node<Item>
{
protected:
unsigned _id;
class Kernel::Object_identity
: public Kernel::Object_identity_reference_list
{
private:
Object & _object;
public:
/**
* Constructor
*/
Item(unsigned const id) : _id(id) { }
Object_identity(Object & object);
~Object_identity();
/**
* Find entry with 'object_id' within this AVL subtree
*/
Item * find(unsigned const object_id)
{
if (object_id == id()) { return this; }
Item * const subtree =
Genode::Avl_node<Item>::child(object_id > id());
if (!subtree) { return 0; }
return subtree->find(object_id);
}
template <typename KOBJECT>
KOBJECT * object() { return dynamic_cast<KOBJECT*>(&_object); }
};
/**
* ID of this object
*/
unsigned id() const { return _id; }
class Kernel::Object_identity_reference
: public Genode::Avl_node<Kernel::Object_identity_reference>,
public Genode::List<Kernel::Object_identity_reference>::Element
{
private:
capid_t _capid;
Object_identity *_identity;
Pd &_pd;
public:
Object_identity_reference(Object_identity *oi, Pd &pd);
~Object_identity_reference();
/***************
** Accessors **
***************/
template <typename KOBJECT>
KOBJECT * object() {
return _identity ? _identity->object<KOBJECT>() : nullptr; }
Object_identity_reference * factory(void * dst, Pd &pd);
Pd & pd() { return _pd; }
capid_t capid() { return _capid; }
void invalidate();
/************************
* 'Avl_node' interface *
** Avl_node interface **
************************/
bool higher(Item * i) const { return i->id() > id(); }
bool higher(Object_identity_reference * oir) const {
return oir->_capid > _capid; }
/**********************
** Lookup functions **
**********************/
Object_identity_reference * find(Pd * pd);
Object_identity_reference * find(capid_t capid);
};
template <typename T, Kernel::Object_pool<T> * (* POOL)()>
class Kernel::Object : public Object_pool<T>::Item
class Kernel::Object_identity_reference_tree
: public Genode::Avl_tree<Kernel::Object_identity_reference>
{
public:
using Pool = Object_pool<T>;
Object_identity_reference * find(capid_t id);
/**
* Map of unique IDs to objects of T
*/
static Pool * pool() { return POOL(); }
protected:
Object() : Pool::Item(id_alloc().alloc()) {
POOL()->insert(static_cast<T *>(this)); }
~Object()
template <typename KOBJECT>
KOBJECT * find(capid_t id)
{
POOL()->remove(static_cast<T *>(this));
id_alloc().free(Pool::Item::id());
Object_identity_reference * oir = find(id);
return (oir) ? oir->object<KOBJECT>() : nullptr;
}
};
template <typename T>
class Kernel::Core_object_identity : public Object_identity,
public Object_identity_reference
{
public:
Core_object_identity(T & object)
: Object_identity(object),
Object_identity_reference(this, *core_pd()) { }
virtual void destroy() { this->~Object_identity(); }
capid_t core_capid() { return capid(); }
};
template <typename T>
class Kernel::Core_object : public T, public Kernel::Core_object_identity<T>
{
public:
template <typename... ARGS>
Core_object(ARGS &&... args)
: T(args...), Core_object_identity<T>(*static_cast<T*>(this)) { }
void destroy() {
Core_object_identity<T>::destroy();
this->~T();
}
};

View File

@ -17,8 +17,13 @@
/* core includes */
#include <kernel/early_translations.h>
#include <kernel/object.h>
#include <kernel/cpu.h>
#include <kernel/object.h>
namespace Genode {
class Page_slab;
class Platform_pd;
}
namespace Kernel
{
@ -43,10 +48,6 @@ namespace Kernel
* Kernel backend of protection domains
*/
class Pd;
typedef Object_pool<Pd> Pd_pool;
Pd_pool * pd_pool();
}
class Kernel::Mode_transition_control
@ -129,16 +130,22 @@ class Kernel::Mode_transition_control
} __attribute__((aligned(Mode_transition_control::ALIGN)));
class Kernel::Pd : public Object<Pd, pd_pool>, public Cpu::Pd
class Kernel::Pd : public Cpu::Pd,
public Kernel::Object
{
public:
typedef Genode::Translation_table Table;
static constexpr unsigned max_cap_ids = 1 << (sizeof(capid_t) * 8);
using Table = Genode::Translation_table;
using Capid_allocator = Genode::Bit_allocator<max_cap_ids>;
private:
Table * const _table;
Platform_pd * const _platform_pd;
Table * const _table;
Genode::Platform_pd * const _platform_pd;
Capid_allocator _capid_alloc;
Object_identity_reference_tree _cap_tree;
public:
@ -148,7 +155,7 @@ class Kernel::Pd : public Object<Pd, pd_pool>, public Cpu::Pd
* \param table translation table of the PD
* \param platform_pd core object of the PD
*/
Pd(Table * const table, Platform_pd * const platform_pd);
Pd(Table * const table, Genode::Platform_pd * const platform_pd);
~Pd();
@ -158,12 +165,25 @@ class Kernel::Pd : public Object<Pd, pd_pool>, public Cpu::Pd
void admit(Cpu::Context * const c);
static capid_t syscall_create(void * const dst,
Genode::Translation_table * tt,
Genode::Platform_pd * const pd)
{
return call(call_id_new_pd(), (Call_arg)dst,
(Call_arg)tt, (Call_arg)pd);
}
static void syscall_destroy(Pd * const pd) {
call(call_id_delete_pd(), (Call_arg)pd); }
/***************
** Accessors **
***************/
Platform_pd * platform_pd() const { return _platform_pd; }
Table * translation_table() const { return _table; }
Genode::Platform_pd * platform_pd() const { return _platform_pd; }
Table * translation_table() const { return _table; }
Capid_allocator & capid_alloc() { return _capid_alloc; }
Object_identity_reference_tree & cap_tree() { return _cap_tree; }
};
#endif /* _KERNEL__PD_H_ */

View File

@ -17,7 +17,7 @@
/* Genode includes */
#include <base/signal.h>
/* core include */
#include <kernel/core_interface.h>
#include <kernel/object.h>
namespace Kernel
@ -46,12 +46,6 @@ namespace Kernel
* Combines signal contexts to an entity that handlers can listen to
*/
class Signal_receiver;
typedef Object_pool<Signal_context> Signal_context_pool;
typedef Object_pool<Signal_receiver> Signal_receiver_pool;
Signal_context_pool * signal_context_pool();
Signal_receiver_pool * signal_receiver_pool();
}
class Kernel::Signal_ack_handler
@ -167,8 +161,7 @@ class Kernel::Signal_context_killer
void cancel_waiting();
};
class Kernel::Signal_context
: public Object<Signal_context, signal_context_pool>
class Kernel::Signal_context : public Kernel::Object
{
friend class Signal_receiver;
friend class Signal_context_killer;
@ -265,10 +258,34 @@ class Kernel::Signal_context
* \retval -1 failed
*/
int kill(Signal_context_killer * const k);
/**
* Create a signal context and assign it to a signal receiver
*
* \param p memory donation for the kernel signal-context object
* \param receiver pointer to signal receiver kernel object
* \param imprint user label of the signal context
*
* \retval capability id of the new kernel object
*/
static capid_t syscall_create(void * p,
Signal_receiver * const receiver,
unsigned const imprint)
{
return call(call_id_new_signal_context(), (Call_arg)p,
(Call_arg)receiver, (Call_arg)imprint);
}
/**
* Destruct a signal context
*
* \param context pointer to signal context kernel object
*/
static void syscall_destroy(Signal_context * const context) {
call(call_id_delete_signal_context(), (Call_arg)context); }
};
class Kernel::Signal_receiver
: public Object<Signal_receiver, signal_receiver_pool>
class Kernel::Signal_receiver : public Kernel::Object
{
friend class Signal_context;
friend class Signal_handler;
@ -326,6 +343,24 @@ class Kernel::Signal_receiver
* Return wether any of the contexts of this receiver is deliverable
*/
bool deliverable();
/**
* Syscall to create a signal receiver
*
* \param p memory donation for the kernel signal-receiver object
*
* \retval capability id of the new kernel object
*/
static capid_t syscall_create(void * p) {
return call(call_id_new_signal_receiver(), (Call_arg)p); }
/**
* Syscall to destruct a signal receiver
*
* \param receiver pointer to signal receiver kernel object
*/
static void syscall_destroy(Signal_receiver * const receiver) {
call(call_id_delete_signal_receiver(), (Call_arg)receiver); }
};
#endif /* _KERNEL__SIGNAL_RECEIVER_ */

View File

@ -0,0 +1,26 @@
/*
* \brief Automated testing of kernel internals
* \author Stefan Kalkowski
* \author Martin Stein
* \date 2015-05-21
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _KERNEL__TEST_H_
#define _KERNEL__TEST_H_
namespace Kernel {
/**
* Hook that enables automated testing of kernel internals
*/
void test();
}
#endif /* _KERNEL__TEST_H_ */

View File

@ -19,32 +19,29 @@
#include <kernel/ipc_node.h>
#include <kernel/cpu.h>
#include <kernel/thread_base.h>
#include <kernel/object.h>
#include <base/signal.h>
namespace Kernel
{
class Thread;
class Pd;
typedef Genode::Native_utcb Native_utcb;
/**
* Kernel backend for userland execution-contexts
*/
class Thread;
typedef Object_pool<Thread> Thread_pool;
Thread_pool * thread_pool();
class Core_thread;
}
class Kernel::Thread
:
public Cpu::User_context,
public Object<Thread, thread_pool>,
public Cpu_domain_update, public Ipc_node, public Signal_context_killer,
public Signal_handler, public Thread_base, public Cpu_job
: public Kernel::Object,
public Cpu::User_context,
public Cpu_domain_update, public Ipc_node, public Signal_context_killer,
public Signal_handler, public Thread_base, public Cpu_job
{
friend class Thread_event;
friend class Core_thread;
private:
@ -61,11 +58,9 @@ class Kernel::Thread
STOPPED = 7,
};
State _state;
Pd * _pd;
Native_utcb * _utcb_phys;
Signal_receiver * _signal_receiver;
char const * const _label;
State _state;
Signal_receiver * _signal_receiver;
char const * const _label;
/**
* Notice that another thread yielded the CPU to this thread
@ -81,8 +76,8 @@ class Kernel::Thread
* \retval 0 succeeded
* \retval -1 failed
*/
int _route_event(unsigned const event_id,
unsigned const signal_context_id);
int _route_event(unsigned const event_id,
Signal_context * const signal_context_id);
/**
* Map kernel name of thread event to the corresponding member
@ -183,11 +178,6 @@ class Kernel::Thread
*/
addr_t Thread::* _reg(addr_t const id) const;
/**
* Print table of all threads and their current activity
*/
void _print_activity_table();
/**
* Print the activity of the thread
*
@ -210,11 +200,8 @@ class Kernel::Thread
** Kernel-call back-ends, see kernel-interface headers **
*********************************************************/
void _call_new_pd();
void _call_delete_pd();
void _call_new_thread();
void _call_thread_quota();
void _call_delete_thread();
void _call_start_thread();
void _call_pause_current_thread();
void _call_pause_thread();
@ -228,15 +215,11 @@ class Kernel::Thread
void _call_update_data_region();
void _call_update_instr_region();
void _call_print_char();
void _call_new_signal_receiver();
void _call_new_signal_context();
void _call_await_signal();
void _call_signal_pending();
void _call_submit_signal();
void _call_ack_signal();
void _call_kill_signal_context();
void _call_delete_signal_context();
void _call_delete_signal_receiver();
void _call_new_vm();
void _call_delete_vm();
void _call_run_vm();
@ -244,8 +227,27 @@ class Kernel::Thread
void _call_access_thread_regs();
void _call_route_thread_event();
void _call_new_irq();
void _call_delete_irq();
void _call_ack_irq();
void _call_new_obj();
void _call_delete_obj();
void _call_delete_cap();
template <typename T, typename... ARGS>
void _call_new(ARGS &&... args)
{
using Object = Core_object<T>;
void * dst = (void *)user_arg_1();
Object * o = Genode::construct_at<Object>(dst, args...);
user_arg_0(o->core_capid());
}
template <typename T>
void _call_delete()
{
using Object = Core_object<T>;
reinterpret_cast<Object*>(user_arg_1())->~Object();
}
/***************************
@ -294,15 +296,30 @@ class Kernel::Thread
char const * const label);
/**
* Prepare thread to get active the first time
* Syscall to create a thread
*
* \param cpu targeted CPU
* \param pd targeted domain
* \param utcb core local pointer to userland thread-context
* \param start wether to start executing the thread
* \param p memory donation for the new kernel thread object
* \param priority scheduling priority of the new thread
* \param quota CPU quota of the new thread
* \param label debugging label of the new thread
*
* \retval capability id of the new kernel object
*/
void init(Cpu * const cpu, Pd * const pd, Native_utcb * const utcb,
bool const start);
static capid_t syscall_create(void * const p, unsigned const priority,
size_t const quota,
char const * const label)
{
return call(call_id_new_thread(), (Call_arg)p, (Call_arg)priority,
(Call_arg)quota, (Call_arg)label);
}
/**
* Syscall to destroy a thread
*
* \param thread pointer to thread kernel object
*/
static void syscall_destroy(Thread * thread) {
call(call_id_delete_thread(), (Call_arg)thread); }
/*************
@ -318,10 +335,22 @@ class Kernel::Thread
** Accessors **
***************/
unsigned id() const { return Object::id(); }
char const * label() const { return _label; }
char const * pd_label() const;
Pd * const pd() const { return _pd; }
};
/**
* The first core thread in the system bootstrapped by the Kernel
*/
class Kernel::Core_thread : public Core_object<Kernel::Thread>
{
private:
Core_thread();
public:
static Thread & singleton();
};
#endif /* _KERNEL__THREAD_H_ */

View File

@ -52,7 +52,7 @@ class Kernel::Thread_event : public Signal_ack_handler
/**
* Kernel name of assigned signal context or 0 if not assigned
*/
unsigned signal_context_id() const;
Signal_context * const signal_context() const;
/**
* Override signal context of the event

View File

@ -27,19 +27,17 @@ namespace Kernel
* Kernel backend for a virtual machine
*/
class Vm;
typedef Object_pool<Vm> Vm_pool;
Vm_pool * vm_pool();
}
class Kernel::Vm : public Object<Vm, vm_pool>, public Cpu_job
class Kernel::Vm : public Cpu_job,
public Kernel::Object
{
private:
enum State { ACTIVE, INACTIVE };
unsigned _id;
Genode::Vm_state * const _state;
Signal_context * const _context;
void * const _table;
@ -58,6 +56,8 @@ class Kernel::Vm : public Object<Vm, vm_pool>, public Cpu_job
Signal_context * const context,
void * const table);
~Vm();
/**
* Inject an interrupt to this VM
*
@ -66,6 +66,36 @@ class Kernel::Vm : public Object<Vm, vm_pool>, public Cpu_job
void inject_irq(unsigned irq);
/**
* Create a virtual machine that is stopped initially
*
* \param dst memory donation for the VM object
* \param state location of the CPU state of the VM
* \param signal_context_id kernel name of the signal context for VM events
* \param table guest-physical to host-physical translation
* table pointer
*
* \retval cap id when successful, otherwise invalid cap id
*/
static capid_t syscall_create(void * const dst, void * const state,
capid_t const signal_context_id,
void * const table)
{
return call(call_id_new_vm(), (Call_arg)dst, (Call_arg)state,
(Call_arg)table, signal_context_id);
}
/**
* Destruct a virtual-machine
*
* \param vm pointer to vm kernel object
*
* \retval 0 when successful, otherwise !=0
*/
static void syscall_destroy(Vm * const vm) {
call(call_id_delete_vm(), (Call_arg) vm); }
/****************
** Vm_session **
****************/

View File

@ -0,0 +1,74 @@
/*
* \brief Kernel object handling in core
* \author Stefan Kalkowski
* \date 2015-04-21
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _CORE__INCLUDE__OBJECT_H_
#define _CORE__INCLUDE__OBJECT_H_
#include <base/native_types.h>
#include <kernel/interface.h>
#include <kernel/object.h>
#include <util/construct_at.h>
namespace Genode
{
/**
* Represents a kernel object in core
*
* \param T type of the kernel object
*/
template <typename T> class Kernel_object;
}
template <typename T>
class Genode::Kernel_object
{
private:
uint8_t _data[sizeof(Kernel::Core_object<T>)]
__attribute__((aligned(sizeof(addr_t))));
protected:
Untyped_capability _cap;
public:
Kernel_object() {}
/**
* Creates a kernel object either via a syscall or directly
*/
template <typename... ARGS>
Kernel_object(bool syscall, ARGS &&... args)
: _cap(syscall ? T::syscall_create(&_data, args...)
: Kernel::cap_id_invalid()) {
if (!syscall) construct_at<T>(&_data, args...); }
~Kernel_object() { T::syscall_destroy(kernel_object()); }
T * kernel_object() { return reinterpret_cast<T*>(_data); }
/**
* Create the kernel object explicitely via this function
*/
template <typename... ARGS>
bool create(ARGS &&... args)
{
if (_cap.valid()) return false;
_cap = T::syscall_create(&_data, args...);
return _cap.valid();
}
};
#endif /* _CORE__INCLUDE__OBJECT_H_ */

View File

@ -0,0 +1,85 @@
/*
* \brief Core-specific instance of the PD session interface
* \author Christian Helmuth
* \author Stefan Kalkowski
* \date 2006-07-17
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _CORE__INCLUDE__PD_SESSION_COMPONENT_H_
#define _CORE__INCLUDE__PD_SESSION_COMPONENT_H_
/* Genode includes */
#include <base/allocator_guard.h>
#include <base/rpc_server.h>
#include <pd_session/pd_session.h>
#include <util/arg_string.h>
/* core includes */
#include <platform_pd.h>
namespace Genode { class Pd_session_component; }
class Genode::Pd_session_component : public Rpc_object<Pd_session>
{
private:
/**
* Read and store the PD label
*/
struct Label {
enum { MAX_LEN = 64 };
char string[MAX_LEN];
Label(char const *args)
{
Arg_string::find_arg(args, "label").string(string,
sizeof(string), "");
}
} const _label;
Allocator_guard _md_alloc; /* guarded meta-data allocator */
Platform_pd _pd;
Parent_capability _parent;
Rpc_entrypoint *_thread_ep;
size_t _ram_quota(char const * args) {
return Arg_string::find_arg(args, "ram_quota").long_value(0); }
public:
Pd_session_component(Rpc_entrypoint * thread_ep,
Allocator * md_alloc,
char const * args)
: _label(args),
_md_alloc(md_alloc, _ram_quota(args)),
_pd(&_md_alloc, _label.string),
_thread_ep(thread_ep) { }
/**
* Register quota donation at allocator guard
*/
void upgrade_ram_quota(size_t ram_quota)
{
_md_alloc.upgrade(ram_quota);
_pd.upgrade_slab(_md_alloc);
}
/**************************/
/** PD session interface **/
/**************************/
int bind_thread(Thread_capability);
int assign_parent(Parent_capability);
};
#endif /* _CORE__INCLUDE__PD_SESSION_COMPONENT_H_ */

View File

@ -15,18 +15,14 @@
#ifndef _CORE__INCLUDE__PLATFORM_PD_H_
#define _CORE__INCLUDE__PLATFORM_PD_H_
/* Genode includes */
#include <base/printf.h>
#include <root/root.h>
#include <util/construct_at.h>
/* Core includes */
#include <translation_table.h>
#include <platform.h>
#include <platform_thread.h>
#include <address_space.h>
#include <page_slab.h>
#include <kernel/kernel.h>
#include <object.h>
#include <kernel/object.h>
#include <kernel/pd.h>
namespace Hw
{
@ -40,6 +36,8 @@ namespace Genode
{
class Platform_thread; /* forward declaration */
class Capability_space;
/**
* Platform specific part of a Genode protection domain
*/
@ -121,14 +119,37 @@ class Hw::Address_space : public Genode::Address_space
};
class Genode::Platform_pd : public Hw::Address_space
class Genode::Capability_space
{
private:
Native_capability _parent;
bool _thread_associated = false;
char const * const _label;
uint8_t _kernel_object[sizeof(Kernel::Pd)];
enum { SLAB_SIZE = 2 * get_page_size() };
using Cap_slab = Tslab<Kernel::Object_identity_reference,
SLAB_SIZE>;
uint8_t _initial_sb[SLAB_SIZE];
Cap_slab _slab;
public:
Capability_space();
Cap_slab & capability_slab() { return _slab; }
void upgrade_slab(Allocator &alloc);
};
class Genode::Platform_pd : public Hw::Address_space,
public Genode::Capability_space,
public Kernel_object<Kernel::Pd>
{
private:
Native_capability _parent;
bool _thread_associated = false;
char const * const _label;
protected:
@ -175,7 +196,8 @@ class Genode::Platform_pd : public Hw::Address_space
** Accessors **
***************/
char const * const label() { return _label; }
char const * const label() { return _label; }
Native_capability parent() { return _parent; }
};
@ -184,7 +206,7 @@ class Genode::Core_platform_pd : public Genode::Platform_pd
private:
static inline Translation_table * const _table();
static inline Page_slab * const _slab();
static inline Page_slab * const _slab();
/**
* Establish initial one-to-one mappings for core/kernel.

View File

@ -20,13 +20,13 @@
#include <base/native_types.h>
#include <base/thread.h>
/* base-hw includes */
#include <kernel/core_interface.h>
#include <kernel/log.h>
/* core includes */
#include <address_space.h>
#include <object.h>
#include <kernel/core_interface.h>
#include <kernel/thread.h>
#include <kernel/log.h>
namespace Genode {
@ -39,19 +39,17 @@ namespace Genode {
/**
* Userland interface for the management of kernel thread-objects
*/
class Platform_thread
class Platform_thread : public Kernel_object<Kernel::Thread>
{
enum { LABEL_MAX_LEN = 32 };
Platform_pd * _pd;
Weak_ptr<Address_space> _address_space;
unsigned _id;
Rm_client * _rm_client;
Native_utcb * _utcb_core_addr; /* UTCB address in core */
Native_utcb * _utcb_pd_addr; /* UTCB address in pd */
Ram_dataspace_capability _utcb; /* UTCB dataspace */
char _label[LABEL_MAX_LEN];
char _kernel_thread[sizeof(Kernel::Thread)];
Platform_pd * _pd;
Weak_ptr<Address_space> _address_space;
Rm_client * _rm_client;
Native_utcb * _utcb_core_addr; /* UTCB addr in core */
Native_utcb * _utcb_pd_addr; /* UTCB addr in pd */
Ram_dataspace_capability _utcb; /* UTCB dataspace */
char _label[LABEL_MAX_LEN];
/*
* Wether this thread is the main thread of a program.
@ -75,6 +73,12 @@ namespace Genode {
*/
bool _attaches_utcb_by_itself();
unsigned _priority(unsigned virt_prio)
{
return Cpu_session::scale_priority(Kernel::Cpu_priority::max,
virt_prio);
}
public:
/**
@ -125,12 +129,12 @@ namespace Genode {
/**
* Pause this thread
*/
void pause() { Kernel::pause_thread(kernel_thread()); }
void pause() { Kernel::pause_thread(kernel_object()); }
/**
* Resume this thread
*/
void resume() { Kernel::resume_thread(kernel_thread()); }
void resume() { Kernel::resume_thread(kernel_object()); }
/**
* Cancel currently blocking operation
@ -187,12 +191,7 @@ namespace Genode {
Platform_pd * pd() const { return _pd; }
Native_thread_id id() const { return _id; }
Ram_dataspace_capability utcb() const { return _utcb; }
Kernel::Thread * const kernel_thread() {
return reinterpret_cast<Kernel::Thread*>(_kernel_thread); }
};
}

View File

@ -17,140 +17,66 @@
/* Genode includes */
#include <signal_session/signal_session.h>
#include <base/rpc_server.h>
#include <base/slab.h>
#include <base/tslab.h>
#include <base/allocator_guard.h>
#include <base/object_pool.h>
/* core includes */
#include <object.h>
#include <kernel/signal_receiver.h>
#include <util.h>
namespace Genode
{
/**
* Combines kernel data and core data of an object a signal session manages
*
* \param T type of the kernel data
*/
template <typename T>
class Signal_session_object;
typedef Signal_session_object<Kernel::Signal_receiver>
Signal_session_receiver;
typedef Signal_session_object<Kernel::Signal_context>
Signal_session_context;
/**
* Traits that are used in signal session components
*
* FIXME: This class is merely necessary because GCC 4.7.2 appears to have
* a problem with using a static-constexpr method for the
* dimensioning of a member array within the same class.
*/
class Signal_session_traits;
/**
* Server-sided implementation of a signal session
* Server-side implementation of a signal session
*/
class Signal_session_component;
}
template <typename T>
class Genode::Signal_session_object
:
public Object_pool<Signal_session_object<T> >::Entry
{
public:
typedef Object_pool<Signal_session_object<T> > Pool;
/**
* Constructor
*/
Signal_session_object(Untyped_capability cap) : Pool::Entry(cap) { }
/**
* Kernel name of the object
*/
unsigned id() const { return Pool::Entry::cap().dst(); }
/**
* Size of the data starting at the base of this object
*/
static constexpr size_t size()
{
return sizeof(Signal_session_object<T>) + sizeof(T);
}
/**
* Base of the kernel donation associated with a specific SLAB address
*
* \param slab_addr SLAB address
*/
static constexpr addr_t kernel_donation(void * const slab_addr)
{
return (addr_t)slab_addr + sizeof(Signal_session_object<T>);
}
};
class Genode::Signal_session_traits
class Genode::Signal_session_component : public Rpc_object<Signal_session>
{
private:
/**
* Return the raw size of a slab
*/
static constexpr size_t _slab_raw() { return get_page_size(); }
struct Receiver : Kernel_object<Kernel::Signal_receiver>,
Object_pool<Receiver>::Entry
{
using Pool = Object_pool<Receiver>;
/**
* Return the size of the static buffer for meta data per slab
*/
static constexpr size_t _slab_buffer() { return 128; }
Receiver();
};
/**
* Return the size available for allocations per slab
*/
static constexpr size_t _slab_avail() { return _slab_raw() - _slab_buffer(); }
/**
* Return the amount of allocatable slots per slab
*
* \param T object type of the slab
*/
template <typename T>
static constexpr size_t _slab_slots() { return _slab_avail() / T::size(); }
struct Context : Kernel_object<Kernel::Signal_context>,
Object_pool<Context>::Entry
{
using Pool = Object_pool<Context>;
protected:
Context(Receiver &rcv, unsigned const imprint);
};
/**
* Return the size of allocatable space per slab
*
* \param T object type of the slab
*/
template <typename T>
static constexpr size_t _slab_size() { return _slab_slots<T>() * T::size(); }
};
template <typename T, size_t BLOCK_SIZE = get_page_size()>
class Slab : public Tslab<T, BLOCK_SIZE>
{
private:
class Genode::Signal_session_component
:
public Rpc_object<Signal_session>,
public Signal_session_traits
{
private:
uint8_t _first_block[BLOCK_SIZE];
typedef Signal_session_receiver Receiver;
typedef Signal_session_context Context;
typedef Signal_session_traits Traits;
public:
Allocator_guard _allocator;
Slab _receivers_slab;
Receiver::Pool _receivers;
Slab _contexts_slab;
Context::Pool _contexts;
Slab(Allocator * const allocator)
: Tslab<T, BLOCK_SIZE>(allocator,
(Slab_block*)&_first_block) { }
};
char _first_receivers_slab [Traits::_slab_size<Receiver>()];
char _first_contexts_slab [Traits::_slab_size<Context>()];
Allocator_guard _allocator;
Slab<Receiver> _receivers_slab;
Receiver::Pool _receivers;
Slab<Context> _contexts_slab;
Context::Pool _contexts;
/**
* Destruct receiver 'r'
@ -171,31 +97,9 @@ class Genode::Signal_session_component
* \param quota amount of RAM quota donated to this session
*/
Signal_session_component(Allocator * const allocator,
size_t const quota)
:
_allocator(allocator, quota),
_receivers_slab(Receiver::size(), Traits::_slab_size<Receiver>(),
(Slab_block *)&_first_receivers_slab, &_allocator),
_contexts_slab(Context::size(), Traits::_slab_size<Context>(),
(Slab_block *)&_first_contexts_slab, &_allocator)
{ }
size_t const quota);
/**
* Destructor
*/
~Signal_session_component()
{
while (1) {
Context * const c = _contexts.first_locked();
if (!c) { break; }
_destruct_context(c);
}
while (1) {
Receiver * const r = _receivers.first_locked();
if (!r) { break; }
_destruct_receiver(r);
}
}
~Signal_session_component();
/**
* Raise the quota of this session by 'q'

View File

@ -38,7 +38,7 @@ class Genode::Arm
public:
static constexpr addr_t exception_entry = 0xffff0000;
static constexpr addr_t mtc_size = 1 << MIN_PAGE_SIZE_LOG2;
static constexpr addr_t mtc_size = get_page_size();
static constexpr addr_t data_access_align = 4;
/**

View File

@ -22,24 +22,24 @@
/* Core includes */
#include <dataspace_component.h>
#include <object.h>
#include <kernel/vm.h>
namespace Genode {
class Vm_session_component;
}
class Genode::Vm_session_component :
public Genode::Rpc_object<Genode::Vm_session>
class Genode::Vm_session_component
: public Genode::Rpc_object<Genode::Vm_session>,
public Kernel_object<Kernel::Vm>
{
private:
Rpc_entrypoint *_ds_ep;
Range_allocator *_ram_alloc;
char _vm[sizeof(Kernel::Vm)];
Dataspace_component _ds;
Dataspace_capability _ds_cap;
addr_t _ds_addr;
bool _initialized = false;
static size_t _ds_size() {
return align_addr(sizeof(Cpu_state_modes),
@ -47,9 +47,6 @@ class Genode::Vm_session_component :
addr_t _alloc_ds(size_t &ram_quota);
Kernel::Vm * _kernel_object() {
return reinterpret_cast<Kernel::Vm*>(_vm); }
public:
Vm_session_component(Rpc_entrypoint *ds_ep,

View File

@ -23,14 +23,16 @@
/* Core includes */
#include <dataspace_component.h>
#include <object.h>
#include <kernel/vm.h>
namespace Genode {
class Vm_session_component;
}
class Genode::Vm_session_component :
public Genode::Rpc_object<Genode::Vm_session>
class Genode::Vm_session_component
: public Genode::Rpc_object<Genode::Vm_session>,
public Kernel_object<Kernel::Vm>
{
private:
@ -39,13 +41,11 @@ class Genode::Vm_session_component :
Rpc_entrypoint *_ds_ep;
Range_allocator *_ram_alloc;
char _vm[sizeof(Kernel::Vm)];
Dataspace_component _ds;
Dataspace_capability _ds_cap;
addr_t _ds_addr;
Translation_table *_table;
Page_slab *_pslab;
bool _initialized = false;
static size_t _ds_size() {
return align_addr(sizeof(Cpu_state_modes),
@ -54,9 +54,6 @@ class Genode::Vm_session_component :
addr_t _alloc_ds(size_t &ram_quota);
void _attach(addr_t phys_addr, addr_t vm_addr, size_t size);
Kernel::Vm * _kernel_object() {
return reinterpret_cast<Kernel::Vm*>(_vm); }
public:
Vm_session_component(Rpc_entrypoint *ds_ep,

View File

@ -22,31 +22,8 @@ namespace Genode
{
enum {
ACTIVITY_TABLE_ON_FAULTS = 0,
MIN_PAGE_SIZE_LOG2 = 12,
};
/**
* Identification that core threads use to get access to their metadata
*/
typedef addr_t Core_thread_id;
/**
* Allows core threads to get their core-thread ID via their stack pointer
*/
enum { CORE_STACK_ALIGNM_LOG2 = 15 };
/**
* Get the the minimal supported page-size log 2
*/
constexpr size_t get_page_size_log2() { return MIN_PAGE_SIZE_LOG2; }
/**
* Get the the minimal supported page-size
*/
constexpr size_t get_page_size() { return 1 << get_page_size_log2(); }
/**
* Get the base mask for the minimal supported page-size
*/
@ -152,4 +129,3 @@ void Genode::print_page_fault(char const * const fault_msg,
}
#endif /* _UTIL_H_ */

View File

@ -57,7 +57,7 @@ Irq_session_component::~Irq_session_component()
using namespace Kernel;
User_irq * kirq = reinterpret_cast<User_irq*>(&_kernel_object);
_irq_alloc->free((void *)(addr_t)static_cast<Kernel::Irq*>(kirq)->id());
_irq_alloc->free((void *)(addr_t)static_cast<Kernel::Irq*>(kirq)->irq_number());
if (_sig_cap.valid())
Kernel::delete_irq(kirq);
}

View File

@ -17,8 +17,10 @@
#include <kernel/kernel.h>
#include <kernel/thread.h>
#include <kernel/irq.h>
#include <kernel/pd.h>
#include <pic.h>
#include <timer.h>
#include <assert.h>
/* base includes */
#include <unmanaged_singleton.h>
@ -179,6 +181,7 @@ void Cpu::exception()
{
/* update old job */
Job * const old_job = scheduled_job();
old_job->exception(_id);
/* update scheduler */
@ -277,3 +280,29 @@ Cpu_pool::Cpu_pool()
for (unsigned id = 0; id < NR_OF_CPUS; id++) {
new (_cpus[id]) Cpu(id, &_timer); }
}
/*****************
** Cpu_context **
*****************/
/**
* Enable kernel-entry assembly to get an exclusive stack for every CPU
*/
enum { KERNEL_STACK_SIZE = 64 * 1024 };
Genode::size_t kernel_stack_size = KERNEL_STACK_SIZE;
Genode::uint8_t kernel_stack[NR_OF_CPUS][KERNEL_STACK_SIZE]
__attribute__((aligned(16)));
Cpu_context::Cpu_context(Genode::Translation_table * const table)
{
sp = (addr_t)kernel_stack;
ip = (addr_t)kernel;
core_pd()->admit(this);
/*
* platform specific initialization, has to be done after
* setting the registers by now
*/
_init(KERNEL_STACK_SIZE, (addr_t)table);
}

View File

@ -1,6 +1,7 @@
/*
* \brief Backend for end points of synchronous interprocess communication
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2012-11-30
*/
@ -13,185 +14,224 @@
/* Genode includes */
#include <util/string.h>
#include <base/native_types.h>
/* core includes */
#include <platform_pd.h>
#include <kernel/ipc_node.h>
#include <assert.h>
#include <kernel/pd.h>
#include <kernel/kernel.h>
#include <kernel/thread.h>
using namespace Kernel;
void Ipc_node::_receive_request(Message_buf * const r)
void Ipc_node::copy_msg(Ipc_node * const sender)
{
/* FIXME: invalid requests should be discarded */
if (r->size > _inbuf.size) {
PWRN("oversized request");
r->size = _inbuf.size;
}
/* fetch message */
Genode::memcpy(_inbuf.base, r->base, r->size);
_inbuf.size = r->size;
_inbuf.src = r->src;
using namespace Genode;
using Reference = Object_identity_reference;
/* update state */
_state = PREPARE_REPLY;
/* copy payload and set destination capability id */
*_utcb = *sender->_utcb;
_utcb->destination(sender->_capid);
/* translate capabilities */
for (unsigned i = 0; i < _rcv_caps; i++) {
capid_t id = sender->_utcb->cap_get(i);
/* if there is no capability to send, just free the pre-allocation */
if (i >= sender->_utcb->cap_cnt()) {
pd()->platform_pd()->capability_slab().free(_obj_id_ref_ptr[i]);
continue;
}
/* within the same pd, we can simply copy the id */
if (pd() == sender->pd()) {
_utcb->cap_add(id);
pd()->platform_pd()->capability_slab().free(_obj_id_ref_ptr[i]);
continue;
}
/* lookup the capability id within the caller's cap space */
Reference *oir = (id == cap_id_invalid())
? nullptr : sender->pd()->cap_tree().find(id);
/* if the caller's capability is invalid, free the pre-allocation */
if (!oir) {
_utcb->cap_add(cap_id_invalid());
pd()->platform_pd()->capability_slab().free(_obj_id_ref_ptr[i]);
continue;
}
/* lookup the capability id within the callee's cap space */
Reference *dst_oir = oir->find(pd());
/* if it is not found, and the target is not core, create a reference */
if (!dst_oir && (pd() != core_pd())) {
dst_oir = oir->factory(_obj_id_ref_ptr[i], *pd());
if (!dst_oir)
pd()->platform_pd()->capability_slab().free(_obj_id_ref_ptr[i]);
} else /* otherwise free the pre-allocation */
pd()->platform_pd()->capability_slab().free(_obj_id_ref_ptr[i]);
/* add the translated capability id to the target buffer */
_utcb->cap_add(dst_oir ? dst_oir->capid() : cap_id_invalid());
}
}
void Ipc_node::_receive_reply(void * const base, size_t const size)
void Ipc_node::_receive_request(Ipc_node * const caller)
{
/* FIXME: when discard awaited replies userland must get a hint */
if (size > _inbuf.size) {
PDBG("discard invalid IPC reply");
return;
}
/* receive reply */
Genode::memcpy(_inbuf.base, base, size);
_inbuf.size = size;
copy_msg(caller);
_caller = caller;
_state = PREPARE_REPLY;
}
/* update state */
if (_state != PREPARE_AND_AWAIT_REPLY) { _state = INACTIVE; }
else { _state = PREPARE_REPLY; }
void Ipc_node::_receive_reply(Ipc_node * callee)
{
copy_msg(callee);
_state = (_state != PREPARE_AND_AWAIT_REPLY) ? INACTIVE
: PREPARE_REPLY;
_send_request_succeeded();
}
void Ipc_node::_announce_request(Message_buf * const r)
void Ipc_node::_announce_request(Ipc_node * const node)
{
/* directly receive request if we've awaited it */
if (_state == AWAIT_REQUEST) {
_receive_request(r);
_receive_request(node);
_await_request_succeeded();
return;
}
/* cannot receive yet, so queue request */
_request_queue.enqueue(r);
_request_queue.enqueue(node);
}
void Ipc_node::_cancel_request_queue()
{
while (1) {
Message_buf * const r = _request_queue.dequeue();
if (!r) { return; }
r->src->_outbuf_request_cancelled();
}
Ipc_node * node;
while ((node = _request_queue.dequeue()))
node->_outbuf_request_cancelled();
}
void Ipc_node::_cancel_outbuf_request()
{
if (_outbuf_dst) {
_outbuf_dst->_announced_request_cancelled(&_outbuf);
_outbuf_dst = 0;
if (_callee) {
_callee->_announced_request_cancelled(this);
_callee = nullptr;
}
}
void Ipc_node::_cancel_inbuf_request()
{
if (_inbuf.src) {
_inbuf.src->_outbuf_request_cancelled();
_inbuf.src = 0;
if (_caller) {
_caller->_outbuf_request_cancelled();
_caller = nullptr;
}
}
void Ipc_node::_announced_request_cancelled(Message_buf * const r)
void Ipc_node::_announced_request_cancelled(Ipc_node * const node)
{
if (_inbuf.src == r->src) {
_inbuf.src = 0;
return;
}
_request_queue.remove(r);
if (_caller == node) _caller = nullptr;
else _request_queue.remove(node);
}
void Ipc_node::_outbuf_request_cancelled()
{
if (_outbuf_dst) {
_outbuf_dst = 0;
if (!_inbuf.src) { _state = INACTIVE; }
else { _state = PREPARE_REPLY; }
_send_request_failed();
}
if (_callee == nullptr) return;
_callee = nullptr;
_state = (!_caller) ? INACTIVE : PREPARE_REPLY;
_send_request_failed();
}
bool Ipc_node::_helps_outbuf_dst()
{
return (_state == PREPARE_AND_AWAIT_REPLY ||
_state == AWAIT_REPLY) && _outbuf_dst_help;
_state == AWAIT_REPLY) && _help;
}
void Ipc_node::send_request(Ipc_node * const dst, void * const buf_base,
size_t const buf_size, size_t const msg_size,
bool help)
void Ipc_node::_init(Genode::Native_utcb * utcb, Ipc_node * starter)
{
_utcb = utcb;
_rcv_caps = starter->_utcb->cap_cnt();
Genode::Allocator &slab = pd()->platform_pd()->capability_slab();
for (unsigned i = 0; i < _rcv_caps; i++)
_obj_id_ref_ptr[i] = slab.alloc(sizeof(Object_identity_reference));
copy_msg(starter);
}
void Ipc_node::send_request(Ipc_node * const callee, capid_t capid, bool help,
unsigned rcv_caps)
{
/* assertions */
assert(_state == INACTIVE || _state == PREPARE_REPLY);
/* prepare transmission of request message */
_outbuf.base = buf_base;
_outbuf.size = msg_size;
_outbuf.src = this;
_outbuf_dst = dst;
_outbuf_dst_help = 0;
/*
* Prepare reception of reply message but don't clear
* '_inbuf.origin' because we might also prepare a reply.
*/
_inbuf.base = buf_base;
_inbuf.size = buf_size;
Genode::Allocator &slab = pd()->platform_pd()->capability_slab();
for (unsigned i = 0; i < rcv_caps; i++)
_obj_id_ref_ptr[i] = slab.alloc(sizeof(Object_identity_reference));
/* update state */
if (_state != PREPARE_REPLY) { _state = AWAIT_REPLY; }
else { _state = PREPARE_AND_AWAIT_REPLY; }
_state = (_state != PREPARE_REPLY) ? AWAIT_REPLY
: PREPARE_AND_AWAIT_REPLY;
_callee = callee;
_capid = capid;
_help = false;
_rcv_caps = rcv_caps;
/* announce request */
dst->_announce_request(&_outbuf);
_callee->_announce_request(this);
/* set help relation after announcement to simplify scheduling */
_outbuf_dst_help = help;
_help = help;
}
Ipc_node * Ipc_node::helping_sink() {
return _helps_outbuf_dst() ? _outbuf_dst->helping_sink() : this; }
return _helps_outbuf_dst() ? _callee->helping_sink() : this; }
bool Ipc_node::await_request(void * const buf_base,
size_t const buf_size)
bool Ipc_node::await_request(unsigned rcv_caps)
{
/* assertions */
assert(_state == INACTIVE);
/* prepare receipt of request */
_inbuf.base = buf_base;
_inbuf.size = buf_size;
_inbuf.src = 0;
Genode::Allocator &slab = pd()->platform_pd()->capability_slab();
for (unsigned i = 0; i < rcv_caps; i++)
_obj_id_ref_ptr[i] = slab.alloc(sizeof(Object_identity_reference));
_rcv_caps = rcv_caps;
/* if anybody already announced a request receive it */
if (!_request_queue.empty()) {
_receive_request(_request_queue.dequeue());
return true;
}
/* no request announced, so wait */
_state = AWAIT_REQUEST;
return false;
}
void Ipc_node::send_reply(void * const msg_base,
size_t const msg_size)
void Ipc_node::send_reply()
{
/* reply to the last request if we have to */
if (_state == PREPARE_REPLY) {
if (_inbuf.src) {
_inbuf.src->_receive_reply(msg_base, msg_size);
_inbuf.src = 0;
if (_caller != nullptr) {
_caller->_receive_reply(this);
_caller = nullptr;
}
_state = INACTIVE;
}
@ -205,11 +245,11 @@ void Ipc_node::cancel_waiting()
_cancel_outbuf_request();
_state = INACTIVE;
_send_request_failed();
return;
break;
case AWAIT_REQUEST:
_state = INACTIVE;
_await_request_failed();
return;
break;
case PREPARE_AND_AWAIT_REPLY:
_cancel_outbuf_request();
_state = PREPARE_REPLY;
@ -220,11 +260,8 @@ void Ipc_node::cancel_waiting()
}
Ipc_node::Ipc_node() : _state(INACTIVE)
{
_inbuf.src = 0;
_outbuf_dst = 0;
}
char const * Ipc_node::pd_label() const {
return (_pd) ? _pd->platform_pd()->label() : "?"; }
Ipc_node::~Ipc_node()
@ -233,3 +270,4 @@ Ipc_node::~Ipc_node()
_cancel_inbuf_request();
_cancel_outbuf_request();
}

View File

@ -18,10 +18,10 @@
#include <pic.h>
void Kernel::Irq::disable() const { pic()->mask(_id()); }
void Kernel::Irq::disable() const { pic()->mask(_irq_nr); }
void Kernel::Irq::enable() const { pic()->unmask(_id(), Cpu::executing_id()); }
void Kernel::Irq::enable() const { pic()->unmask(_irq_nr, Cpu::executing_id()); }
Kernel::Irq::Pool * Kernel::User_irq::_pool()

View File

@ -17,7 +17,7 @@
*/
/*
* Copyright (C) 2011-2013 Genode Labs GmbH
* Copyright (C) 2011-2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
@ -26,58 +26,29 @@
/* core includes */
#include <kernel/lock.h>
#include <kernel/pd.h>
#include <kernel/kernel.h>
#include <kernel/test.h>
#include <platform_pd.h>
#include <trustzone.h>
#include <timer.h>
#include <pic.h>
#include <map_local.h>
#include <platform_thread.h>
/* base includes */
#include <unmanaged_singleton.h>
#include <base/native_types.h>
/* base-hw includes */
#include <kernel/irq.h>
#include <kernel/perf_counter.h>
using namespace Kernel;
extern "C" void _core_start(void);
extern void * _start_secondary_cpus;
extern void * _start_secondary_cpus;
static_assert(sizeof(Genode::sizet_arithm_t) >= 2 * sizeof(size_t),
"Bad result type for size_t arithmetics.");
namespace Kernel
{
/* import Genode types */
typedef Genode::Core_thread_id Core_thread_id;
Pd_pool * pd_pool() { return unmanaged_singleton<Pd_pool>(); }
Thread_pool * thread_pool() { return unmanaged_singleton<Thread_pool>(); }
Signal_context_pool * signal_context_pool() { return unmanaged_singleton<Signal_context_pool>(); }
Signal_receiver_pool * signal_receiver_pool() { return unmanaged_singleton<Signal_receiver_pool>(); }
/**
* Hook that enables automated testing of kernel internals
*/
void test();
enum { STACK_SIZE = 64 * 1024 };
/**
* Return lock that guards all kernel data against concurrent access
*/
Lock & data_lock()
{
static Lock s;
return s;
}
}
Kernel::Id_allocator & Kernel::id_alloc() {
return *unmanaged_singleton<Id_allocator>(); }
Lock & Kernel::data_lock() { return *unmanaged_singleton<Kernel::Lock>(); }
Pd * Kernel::core_pd() {
@ -87,18 +58,6 @@ Pd * Kernel::core_pd() {
Pic * Kernel::pic() { return unmanaged_singleton<Pic>(); }
Native_utcb* Kernel::core_main_thread_utcb_phys_addr() {
return unmanaged_singleton<Native_utcb,Genode::get_page_size()>(); }
/**
* Enable kernel-entry assembly to get an exclusive stack for every CPU
*/
unsigned kernel_stack_size = Kernel::STACK_SIZE;
char kernel_stack[NR_OF_CPUS][Kernel::STACK_SIZE]
__attribute__((aligned(16)));
/**
* Setup kernel environment before activating secondary CPUs
*/
@ -129,35 +88,7 @@ extern "C" void init_kernel_up()
*/
void init_kernel_mp_primary()
{
using namespace Genode;
/* get stack memory that fullfills the constraints for core stacks */
enum {
STACK_ALIGNM = 1 << CORE_STACK_ALIGNM_LOG2,
STACK_SIZE = DEFAULT_STACK_SIZE,
};
static_assert(STACK_SIZE <= STACK_ALIGNM - sizeof(Core_thread_id),
"stack size does not fit stack alignment of core");
static char s[STACK_SIZE] __attribute__((aligned(STACK_ALIGNM)));
/* provide thread ident at the aligned base of the stack */
*(Core_thread_id *)s = 0;
/* initialize UTCB and map it */
Native_utcb * utcb = Kernel::core_main_thread_utcb_phys_addr();
Genode::map_local((addr_t)utcb, (addr_t)utcb_main_thread(),
sizeof(Native_utcb) / get_page_size());
static Kernel::Thread t(Cpu_priority::max, 0, "core");
/* start thread with stack pointer at the top of stack */
utcb->start_info()->init(t.id(), Dataspace_capability());
t.ip = (addr_t)&_core_start;
t.sp = (addr_t)s + STACK_SIZE;
t.init(cpu_pool()->primary_cpu(), core_pd(),
Genode::utcb_main_thread(), 1);
/* kernel initialization finished */
Core_thread::singleton();
Genode::printf("kernel initialized\n");
test();
}
@ -222,25 +153,8 @@ extern "C" void init_kernel_mp()
}
/**
* Main routine of every kernel pass
*/
extern "C" void kernel()
{
data_lock().lock();
cpu_pool()->cpu(Cpu::executing_id())->exception();
}
Kernel::Cpu_context::Cpu_context(Genode::Translation_table * const table)
{
sp = (addr_t)kernel_stack;
ip = (addr_t)kernel;
core_pd()->admit(this);
/*
* platform specific initialization, has to be done after
* setting the registers by now
*/
_init(STACK_SIZE, (addr_t)table);
}

View File

@ -0,0 +1,74 @@
#include <kernel/object.h>
#include <kernel/pd.h>
#include <kernel/kernel.h>
#include <util/construct_at.h>
using namespace Kernel;
Object_identity::Object_identity(Object & object) : _object(object) { }
Object_identity::~Object_identity()
{
for (Object_identity_reference * oir = first(); oir; oir = first())
oir->invalidate();
}
Object_identity_reference *
Object_identity_reference::find(Pd * pd)
{
if (!_identity) return nullptr;
for (Object_identity_reference * oir = _identity->first();
oir; oir = oir->next())
if (pd == &(oir->_pd)) return oir;
return nullptr;
}
Object_identity_reference *
Object_identity_reference::find(capid_t capid)
{
using Avl_node_base = Genode::Avl_node<Object_identity_reference>;
if (capid == _capid) return this;
Object_identity_reference * subtree =
Avl_node_base::child(capid > _capid);
return (subtree) ? subtree->find(capid) : nullptr;
}
Object_identity_reference * Object_identity_reference::factory(void * dst,
Pd & pd)
{
using namespace Genode;
return !_identity ?
nullptr : construct_at<Object_identity_reference>(dst, _identity, pd);
}
void Object_identity_reference::invalidate() {
if (_identity) _identity->remove(this); }
Object_identity_reference::Object_identity_reference(Object_identity *oi,
Pd &pd)
: _capid(pd.capid_alloc().alloc()), _identity(oi), _pd(pd)
{
if (_identity) _identity->insert(this);
_pd.cap_tree().insert(this);
}
Object_identity_reference::~Object_identity_reference()
{
invalidate();
_pd.cap_tree().remove(this);
_pd.capid_alloc().free(_capid);
}
Object_identity_reference * Object_identity_reference_tree::find(capid_t id) {
return (first()) ? first()->find(id) : nullptr; }

View File

@ -16,17 +16,20 @@
#include <base/thread_state.h>
#include <unmanaged_singleton.h>
#include <cpu_session/cpu_session.h>
#include <util/construct_at.h>
/* core includes */
#include <kernel/kernel.h>
#include <kernel/thread.h>
#include <kernel/irq.h>
#include <map_local.h>
#include <platform_pd.h>
#include <pic.h>
extern "C" void _core_start(void);
using namespace Kernel;
typedef Genode::Thread_state Thread_state;
bool Thread::_core() const { return pd() == core_pd(); }
@ -62,8 +65,8 @@ void Thread::_await_signal(Signal_receiver * const receiver)
void Thread::_receive_signal(void * const base, size_t const size)
{
assert(_state == AWAITS_SIGNAL && size <= _utcb_phys->size());
Genode::memcpy(_utcb_phys->base(), base, size);
assert(_state == AWAITS_SIGNAL);
Genode::memcpy((void*)utcb()->base(), base, size);
_become_active();
}
@ -159,31 +162,6 @@ void Thread::_become_inactive(State const s)
}
void Thread::init(Cpu * const cpu, Pd * const pd,
Native_utcb * const utcb_phys, bool const start)
{
assert(_state == AWAITS_START)
Cpu_job::affinity(cpu);
_utcb_phys = utcb_phys;
/* join protection domain */
_pd = pd;
_pd->admit(this);
/* print log message */
if (START_VERBOSE) {
Genode::printf("start thread %u '%s' in program '%s' ",
id(), label(), pd_label());
if (NR_OF_CPUS) {
Genode::printf("on CPU %u/%u ", cpu->id(), NR_OF_CPUS); }
Genode::printf("\n");
}
/* start execution */
if (start) { _become_active(); }
}
void Thread::_stop() { _become_inactive(STOPPED); }
@ -201,30 +179,6 @@ void Thread::_receive_yielded_cpu()
void Thread::proceed(unsigned const cpu) { mtc()->switch_to_user(this, cpu); }
char const * Kernel::Thread::pd_label() const {
return (_pd) ? _pd->platform_pd()->label() : "?"; }
void Thread::_call_new_pd()
{
using namespace Genode;
try {
/* create protection domain */
void * p = (void *) user_arg_1();
Platform_pd * ppd = (Platform_pd *) user_arg_2();
Translation_table * tt = ppd->translation_table_phys();
new (p) Pd(tt, ppd);
user_arg_0(0);
return;
} catch(...) { }
user_arg_0(-1);
}
void Thread::_call_delete_pd() { reinterpret_cast<Pd*>(user_arg_1())->~Pd(); }
size_t Thread::_core_to_kernel_quota(size_t const quota) const
{
using Genode::Cpu_session;
@ -236,13 +190,13 @@ size_t Thread::_core_to_kernel_quota(size_t const quota) const
void Thread::_call_new_thread()
{
/* create new thread */
void * const p = (void *)user_arg_1();
unsigned const priority = user_arg_2();
unsigned const quota = _core_to_kernel_quota(user_arg_3());
char const * const label = (char *)user_arg_4();
Thread * const t = new (p) Thread(priority, quota, label);
user_arg_0(t->id());
void * const p = (void *)user_arg_1();
unsigned const priority = user_arg_2();
unsigned const quota = _core_to_kernel_quota(user_arg_3());
char const * const label = (char *)user_arg_4();
Core_object<Thread> * co =
Genode::construct_at<Core_object<Thread> >(p, priority, quota, label);
user_arg_0(co->core_capid());
}
@ -253,10 +207,6 @@ void Thread::_call_thread_quota()
}
void Thread::_call_delete_thread() {
reinterpret_cast<Thread*>(user_arg_1())->~Thread(); }
void Thread::_call_start_thread()
{
/* lookup CPU */
@ -266,13 +216,27 @@ void Thread::_call_start_thread()
user_arg_0(-2);
return;
}
Thread * const thread = (Thread*) user_arg_1();
Pd * const pd = (Pd *) user_arg_3();
/* start thread */
thread->init(cpu, pd, (Native_utcb *)user_arg_4(), 1);
user_arg_0(0);
Thread * const thread = (Thread*) user_arg_1();
assert(thread->_state == AWAITS_START)
thread->affinity(cpu);
/* join protection domain */
thread->_pd = (Pd *) user_arg_3();
thread->_pd->admit(thread);
/* print log message */
if (START_VERBOSE) {
Genode::printf("start thread '%s' in program '%s' ",
thread->label(), thread->pd_label());
if (NR_OF_CPUS) {
Genode::printf("on CPU %u/%u ", cpu->id(), NR_OF_CPUS); }
Genode::printf("\n");
}
thread->_init((Native_utcb *)user_arg_4(), this);
thread->_become_active();
}
@ -289,11 +253,14 @@ void Thread::_call_resume_thread() {
void Thread::_call_resume_local_thread()
{
if (!pd()) return;
/* lookup thread */
Thread * const thread = Thread::pool()->object(user_arg_1());
Thread * const thread = pd()->cap_tree().find<Thread>(user_arg_1());
if (!thread || pd() != thread->pd()) {
PWRN("failed to lookup thread");
user_arg_0(0);
PWRN("%s -> %s: failed to lookup thread %u to resume it",
pd_label(), label(), (capid_t)user_arg_1());
_stop();
return;
}
/* resume thread */
@ -312,16 +279,12 @@ Thread_event::Thread_event(Thread * const t)
: _thread(t), _signal_context(0) { }
void Thread_event::submit()
{
if (_signal_context && !_signal_context->submit(1)) { return; }
PWRN("failed to communicate thread event");
}
void Thread_event::submit() { if (_signal_context) _signal_context->submit(1); }
void Thread::_call_yield_thread()
{
Thread * const t = Thread::pool()->object(user_arg_1());
Thread * const t = pd()->cap_tree().find<Thread>(user_arg_1());
if (t) { t->_receive_yielded_cpu(); }
Cpu_job::_yield();
}
@ -329,10 +292,7 @@ void Thread::_call_yield_thread()
void Thread::_call_await_request_msg()
{
void * buf_base;
size_t buf_size;
_utcb_phys->message()->buffer_info(buf_base, buf_size);
if (Ipc_node::await_request(buf_base, buf_size)) {
if (Ipc_node::await_request(user_arg_1())) {
user_arg_0(0);
return;
}
@ -342,7 +302,8 @@ void Thread::_call_await_request_msg()
void Thread::_call_send_request_msg()
{
Thread * const dst = Thread::pool()->object(user_arg_1());
Object_identity_reference * oir = pd()->cap_tree().find(user_arg_1());
Thread * const dst = (oir) ? oir->object<Thread>() : nullptr;
if (!dst) {
PWRN("%s -> %s: cannot send to unknown recipient %llu",
pd_label(), label(), (unsigned long long)user_arg_1());
@ -350,22 +311,19 @@ void Thread::_call_send_request_msg()
return;
}
bool const help = Cpu_job::_helping_possible(dst);
void * buf_base;
size_t buf_size, msg_size;
_utcb_phys->message()->request_info(buf_base, buf_size, msg_size);
oir = oir->find(dst->pd());
Ipc_node::send_request(dst, oir ? oir->capid() : cap_id_invalid(),
help, user_arg_2());
_state = AWAITS_IPC;
Ipc_node::send_request(dst, buf_base, buf_size, msg_size, help);
if (!help || !dst->own_share_active()) { _deactivate_used_shares(); }
}
void Thread::_call_send_reply_msg()
{
void * msg_base;
size_t msg_size;
_utcb_phys->message()->reply_info(msg_base, msg_size);
Ipc_node::send_reply(msg_base, msg_size);
bool const await_request_msg = user_arg_1();
Ipc_node::send_reply();
bool const await_request_msg = user_arg_2();
if (await_request_msg) { _call_await_request_msg(); }
else { user_arg_0(0); }
}
@ -376,27 +334,14 @@ void Thread::_call_route_thread_event()
/* override event route */
Thread * const t = (Thread*) user_arg_1();
unsigned const event_id = user_arg_2();
unsigned const signal_context_id = user_arg_3();
if (t->_route_event(event_id, signal_context_id)) { user_arg_0(-1); }
else { user_arg_0(0); }
return;
Signal_context * c = pd()->cap_tree().find<Signal_context>(user_arg_3());
user_arg_0(t->_route_event(event_id, c));
}
int Thread::_route_event(unsigned const event_id,
unsigned const signal_context_id)
Signal_context * c)
{
/* lookup signal context */
Signal_context * c;
if (signal_context_id) {
c = Signal_context::pool()->object(signal_context_id);
if (!c) {
PWRN("%s -> %s: unknown signal context %u",
pd_label(), label(), signal_context_id);
return -1;
}
} else { c = 0; }
/* lookup event and assign signal context */
Thread_event Thread::* e = _event(event_id);
if (!e) { return -1; }
@ -412,11 +357,8 @@ void Thread_event::signal_context(Signal_context * const c)
}
unsigned Thread_event::signal_context_id() const
{
if (_signal_context) { return _signal_context->id(); }
return 0;
}
Signal_context * const Thread_event::signal_context() const {
return _signal_context; }
void Thread::_call_access_thread_regs()
@ -431,7 +373,7 @@ void Thread::_call_access_thread_regs()
return;
}
/* execute read operations */
addr_t * const utcb = (addr_t *)_utcb_phys->base();
addr_t * const utcb = (addr_t *) this->utcb()->base();
addr_t * const read_ids = &utcb[0];
addr_t * values = (addr_t *)user_arg_4();
for (unsigned i = 0; i < reads; i++) {
@ -500,17 +442,6 @@ void Thread::_call_update_instr_region()
}
void Thread::_print_activity_table()
{
for (unsigned id = 0; id < MAX_KERNEL_OBJECTS; id++) {
Thread * const t = Thread::pool()->object(id);
if (!t) { continue; }
t->_print_activity(t == this);
}
return;
}
void Thread::_print_activity(bool const printing_thread)
{
Genode::printf("\033[33m%s -> %s:\033[0m", pd_label(), label());
@ -529,12 +460,10 @@ void Thread::_print_activity(bool const printing_thread)
Genode::printf("\033[32m await RES\033[0m");
break; }
case AWAITS_SIGNAL: {
unsigned const receiver_id = Signal_handler::receiver()->id();
Genode::printf("\033[32m await SIG %u\033[0m", receiver_id);
Genode::printf("\033[32m await SIG\033[0m");
break; }
case AWAITS_SIGNAL_CONTEXT_KILL: {
unsigned const context_id = Signal_context_killer::context()->id();
Genode::printf("\033[32m await SCK %u\033[0m", context_id);
Genode::printf("\033[32m await SCK\033[0m");
break; }
case STOPPED: {
Genode::printf("\033[32m stop\033[0m");
@ -554,57 +483,33 @@ void Thread::_print_activity_when_awaits_ipc()
{
switch (Ipc_node::state()) {
case AWAIT_REPLY: {
Thread * const server = dynamic_cast<Thread *>(Ipc_node::outbuf_dst());
Genode::printf("\033[32m await RPL %u\033[0m", server->id());
Thread * const server = dynamic_cast<Thread *>(Ipc_node::callee());
Genode::printf("\033[32m await RPL %s -> %s\033[0m",
server->pd_label(), server->label());
break; }
case AWAIT_REQUEST: {
Genode::printf("\033[32m await REQ\033[0m");
break; }
case PREPARE_AND_AWAIT_REPLY: {
Thread * const server = dynamic_cast<Thread *>(Ipc_node::outbuf_dst());
Genode::printf("\033[32m prep RPL await RPL %u\033[0m", server->id());
Thread * const server = dynamic_cast<Thread *>(Ipc_node::callee());
Genode::printf("\033[32m prep RPL await RPL %s -> %s\033[0m",
server->pd_label(), server->label());
break; }
default: break;
}
}
void Thread::_call_print_char()
{
char const c = user_arg_1();
if (!c) { _print_activity_table(); }
Genode::printf("%c", (char)user_arg_1());
}
void Thread::_call_new_signal_receiver()
{
/* create receiver */
void * const p = (void *)user_arg_1();
Signal_receiver * const r = new (p) Signal_receiver();
user_arg_0(r->id());
}
void Thread::_call_new_signal_context()
{
/* create and assign context*/
void * const p = (void *)user_arg_1();
Signal_receiver * const r = (Signal_receiver *) user_arg_2();
unsigned const imprint = user_arg_3();
Signal_context * const c = new (p) Signal_context(r, imprint);
user_arg_0(c->id());
}
void Thread::_call_print_char() { Genode::printf("%c", (char)user_arg_1()); }
void Thread::_call_await_signal()
{
/* lookup receiver */
unsigned const receiver_id = user_arg_1();
Signal_receiver * const r = Signal_receiver::pool()->object(receiver_id);
Signal_receiver * const r = pd()->cap_tree().find<Signal_receiver>(user_arg_1());
if (!r) {
PWRN("%s -> %s: cannot await, unknown signal receiver %u",
pd_label(), label(), receiver_id);
pd_label(), label(), (capid_t)user_arg_1());
user_arg_0(-1);
return;
}
@ -621,11 +526,10 @@ void Thread::_call_await_signal()
void Thread::_call_signal_pending()
{
/* lookup signal receiver */
unsigned const id = user_arg_1();
Signal_receiver * const r = Signal_receiver::pool()->object(id);
Signal_receiver * const r = pd()->cap_tree().find<Signal_receiver>(user_arg_1());
if (!r) {
PWRN("%s -> %s: no pending, unknown signal receiver %u",
pd_label(), label(), id);
PWRN("%s -> %s: no pending, unknown signal receiver",
pd_label(), label());
user_arg_0(0);
return;
}
@ -637,14 +541,14 @@ void Thread::_call_signal_pending()
void Thread::_call_submit_signal()
{
/* lookup signal context */
unsigned const id = user_arg_1();
Signal_context * const c = Signal_context::pool()->object(id);
Signal_context * const c = pd()->cap_tree().find<Signal_context>(user_arg_1());
if(!c) {
PWRN("%s -> %s: cannot submit unknown signal context %u",
pd_label(), label(), id);
PWRN("%s -> %s: cannot submit unknown signal context",
pd_label(), label());
user_arg_0(-1);
return;
}
/* trigger signal context */
if (c->submit(user_arg_2())) {
PWRN("failed to submit signal context");
@ -658,13 +562,13 @@ void Thread::_call_submit_signal()
void Thread::_call_ack_signal()
{
/* lookup signal context */
unsigned const id = user_arg_1();
Signal_context * const c = Signal_context::pool()->object(id);
Signal_context * const c = pd()->cap_tree().find<Signal_context>(user_arg_1());
if (!c) {
PWRN("%s -> %s: cannot ack unknown signal context %u",
pd_label(), label(), id);
PWRN("%s -> %s: cannot ack unknown signal context",
pd_label(), label());
return;
}
/* acknowledge */
c->ack();
}
@ -673,14 +577,14 @@ void Thread::_call_ack_signal()
void Thread::_call_kill_signal_context()
{
/* lookup signal context */
unsigned const id = user_arg_1();
Signal_context * const c = Signal_context::pool()->object(id);
Signal_context * const c = pd()->cap_tree().find<Signal_context>(user_arg_1());
if (!c) {
PWRN("%s -> %s: cannot kill unknown signal context %u",
pd_label(), label(), id);
PWRN("%s -> %s: cannot kill unknown signal context",
pd_label(), label());
user_arg_0(-1);
return;
}
/* kill signal context */
if (c->kill(this)) {
PWRN("failed to kill signal context");
@ -690,17 +594,9 @@ void Thread::_call_kill_signal_context()
}
void Thread::_call_delete_signal_context() {
reinterpret_cast<Signal_context*>(user_arg_1())->~Signal_context(); }
void Thread::_call_delete_signal_receiver() {
reinterpret_cast<Signal_receiver*>(user_arg_1())->~Signal_receiver(); }
void Thread::_call_new_irq()
{
Signal_context * const c = Signal_context::pool()->object(user_arg_3());
Signal_context * const c = pd()->cap_tree().find<Signal_context>(user_arg_3());
if (!c) {
PWRN("%s -> %s: invalid signal context for interrupt",
pd_label(), label());
@ -717,8 +613,38 @@ void Thread::_call_ack_irq() {
reinterpret_cast<User_irq*>(user_arg_1())->enable(); }
void Thread::_call_delete_irq() {
reinterpret_cast<User_irq*>(user_arg_1())->~User_irq(); }
void Thread::_call_new_obj()
{
/* lookup thread */
Object_identity_reference * ref = pd()->cap_tree().find(user_arg_2());
Thread * thread = ref ? ref->object<Thread>() : nullptr;
if (!thread ||
(static_cast<Core_object<Thread>*>(thread)->capid() != ref->capid())) {
if (thread)
PWRN("faked thread %s -> %s", thread->pd_label(), thread->label());
user_arg_0(cap_id_invalid());
return;
}
using Thread_identity = Core_object_identity<Thread>;
Thread_identity * coi =
Genode::construct_at<Thread_identity>((void *)user_arg_1(), *thread);
user_arg_0(coi->core_capid());
}
void Thread::_call_delete_obj()
{
using Object = Core_object_identity<Thread>;
reinterpret_cast<Object*>(user_arg_1())->~Object();
}
void Thread::_call_delete_cap()
{
Object_identity_reference * oir = pd()->cap_tree().find(user_arg_1());
if (oir) destroy(pd()->platform_pd()->capability_slab(), oir);
}
int Thread::_read_reg(addr_t const id, addr_t & value) const
@ -749,6 +675,8 @@ int Thread::_write_reg(addr_t const id, addr_t const value)
void Thread::_call()
{
try {
/* switch over unrestricted kernel calls */
unsigned const call_id = user_arg_0();
switch (call_id) {
@ -766,6 +694,7 @@ void Thread::_call()
case call_id_signal_pending(): _call_signal_pending(); return;
case call_id_ack_signal(): _call_ack_signal(); return;
case call_id_print_char(): _call_print_char(); return;
case call_id_delete_cap(): _call_delete_cap(); return;
default:
/* check wether this is a core thread */
if (!_core()) {
@ -779,29 +708,78 @@ void Thread::_call()
switch (call_id) {
case call_id_new_thread(): _call_new_thread(); return;
case call_id_thread_quota(): _call_thread_quota(); return;
case call_id_delete_thread(): _call_delete_thread(); return;
case call_id_delete_thread(): _call_delete<Thread>(); return;
case call_id_start_thread(): _call_start_thread(); return;
case call_id_resume_thread(): _call_resume_thread(); return;
case call_id_access_thread_regs(): _call_access_thread_regs(); return;
case call_id_route_thread_event(): _call_route_thread_event(); return;
case call_id_update_pd(): _call_update_pd(); return;
case call_id_new_pd(): _call_new_pd(); return;
case call_id_delete_pd(): _call_delete_pd(); return;
case call_id_new_signal_receiver(): _call_new_signal_receiver(); return;
case call_id_new_signal_context(): _call_new_signal_context(); return;
case call_id_delete_signal_context(): _call_delete_signal_context(); return;
case call_id_delete_signal_receiver(): _call_delete_signal_receiver(); return;
case call_id_new_pd():
_call_new<Pd>((Genode::Translation_table *) user_arg_2(),
(Genode::Platform_pd *) user_arg_3());
return;
case call_id_delete_pd(): _call_delete<Pd>(); return;
case call_id_new_signal_receiver(): _call_new<Signal_receiver>(); return;
case call_id_new_signal_context():
_call_new<Signal_context>((Signal_receiver*) user_arg_2(),
(unsigned) user_arg_3());
return;
case call_id_delete_signal_context(): _call_delete<Signal_context>(); return;
case call_id_delete_signal_receiver(): _call_delete<Signal_receiver>(); return;
case call_id_new_vm(): _call_new_vm(); return;
case call_id_delete_vm(): _call_delete_vm(); return;
case call_id_run_vm(): _call_run_vm(); return;
case call_id_pause_vm(): _call_pause_vm(); return;
case call_id_pause_thread(): _call_pause_thread(); return;
case call_id_new_irq(): _call_new_irq(); return;
case call_id_delete_irq(): _call_delete_irq(); return;
case call_id_delete_irq(): _call_delete<Irq>(); return;
case call_id_ack_irq(): _call_ack_irq(); return;
case call_id_new_obj(): _call_new_obj(); return;
case call_id_delete_obj(): _call_delete_obj(); return;
default:
PWRN("%s -> %s: unknown kernel call", pd_label(), label());
_stop();
return;
}
} catch (Genode::Allocator::Out_of_memory &e) { user_arg_0(-2); }
}
/*****************
** Core_thread **
*****************/
Core_thread::Core_thread()
: Core_object<Thread>(Cpu_priority::max, 0, "core")
{
using Genode::Native_utcb;
static Genode::uint8_t stack[DEFAULT_STACK_SIZE];
static Native_utcb * const utcb =
unmanaged_singleton<Native_utcb, Genode::get_page_size()>();
/* map UTCB */
Genode::map_local((addr_t)utcb, (addr_t)Genode::utcb_main_thread(),
sizeof(Native_utcb) / Genode::get_page_size());
utcb->cap_add(cap_id_invalid());
utcb->cap_add(cap_id_invalid());
utcb->cap_add(core_capid());
/* start thread with stack pointer at the top of stack */
sp = (addr_t)&stack + DEFAULT_STACK_SIZE;
ip = (addr_t)&_core_start;
affinity(cpu_pool()->primary_cpu());
_utcb = utcb;
Thread::_pd = core_pd();
Thread::_pd->admit(this);
_become_active();
}
Thread & Core_thread::singleton()
{
static Core_thread ct;
return ct;
}

View File

@ -76,8 +76,8 @@ void Pager_object::unresolved_page_fault_occurred()
{
Platform_thread * const pt = (Platform_thread *)badge();
if (pt && pt->pd())
PERR("%s -> %s: unresolved pagefault at ip=%p",
pt->pd()->label(), pt->label(), (void*)pt->state().ip);
PERR("%s -> %s: unresolved pagefault at ip=%p sp=%p",
pt->pd()->label(), pt->label(), (void*)pt->state().ip, (void*)pt->state().sp);
}
Pager_object::Pager_object(unsigned const badge, Affinity::Location)
@ -137,10 +137,9 @@ Pager_entrypoint::Pager_entrypoint(Cap_session *,
Pager_capability Pager_entrypoint::manage(Pager_object * const o)
{
unsigned const d = _activation->cap().dst();
unsigned const b = o->badge();
auto const p = reinterpret_cap_cast<Pager_object>(Native_capability(d, b));
o->start_paging(_activation->Signal_receiver::manage(o), p);
Signal_context_capability scc = _activation->Signal_receiver::manage(o);
Pager_capability p = reinterpret_cap_cast<Pager_object>(scc);
o->start_paging(scc, p);
insert(o);
return p;
}

View File

@ -12,8 +12,12 @@
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <root/root.h>
/* core includes */
#include <platform_pd.h>
#include <platform_thread.h>
extern int _prog_img_beg;
extern int _prog_img_end;
@ -92,6 +96,25 @@ Hw::Address_space::Address_space(Kernel::Pd * pd)
}
/*************************************
** Capability_space implementation **
*************************************/
Capability_space::Capability_space()
: _slab(nullptr, (Slab_block*)&_initial_sb) { }
void Capability_space::upgrade_slab(Allocator &alloc)
{
for (;;) {
Slab_block * block;
if (!alloc.alloc(SLAB_SIZE, &block)) return;
block = construct_at<Slab_block>(block, &_slab);
_slab.insert_sb(block);
}
}
/********************************
** Platform_pd implementation **
********************************/
@ -121,16 +144,17 @@ int Platform_pd::assign_parent(Native_capability parent)
Platform_pd::Platform_pd(Translation_table * tt, Page_slab * slab)
: Hw::Address_space(reinterpret_cast<Kernel::Pd*>(&_kernel_object), tt, slab),
_label("core") { new (&_kernel_object) Kernel::Pd(tt, this); }
: Hw::Address_space(kernel_object(), tt, slab),
Kernel_object<Kernel::Pd>(false, tt, this),
_label("core") { }
Platform_pd::Platform_pd(Allocator * md_alloc, char const *label)
: Hw::Address_space(reinterpret_cast<Kernel::Pd*>(&_kernel_object)),
: Hw::Address_space(kernel_object()),
Kernel_object<Kernel::Pd>(true, translation_table_phys(), this),
_label(label)
{
/* create kernel object */
if (Kernel::new_pd(reinterpret_cast<Kernel::Pd*>(_kernel_object), this)) {
if (!_cap.valid()) {
PERR("failed to create kernel object");
throw Root::Unavailable();
}
@ -139,7 +163,6 @@ Platform_pd::Platform_pd(Allocator * md_alloc, char const *label)
Platform_pd::~Platform_pd()
{
Kernel::delete_pd(kernel_pd());
flush(platform()->vm_start(), platform()->vm_size());
/* TODO: destroy page slab and translation table!!! */

View File

@ -73,22 +73,21 @@ Platform_thread::~Platform_thread()
Pager_capability cap = reinterpret_cap_cast<Pager_object>(object->Object_pool<Pager_object>::Entry::cap());
rm->remove_client(cap);
}
/* destroy object at the kernel */
Kernel::delete_thread(kernel_thread());
}
void Platform_thread::quota(size_t const quota) {
Kernel::thread_quota((Kernel::Thread *)_kernel_thread, quota); }
Kernel::thread_quota(kernel_object(), quota); }
Platform_thread::Platform_thread(const char * const label,
Native_utcb * utcb)
: _pd(Kernel::core_pd()->platform_pd()),
_rm_client(0),
: Kernel_object<Kernel::Thread>(true, Kernel::Cpu_priority::max, 0, _label),
_pd(Kernel::core_pd()->platform_pd()),
_rm_client(nullptr),
_utcb_core_addr(utcb),
_utcb_pd_addr(utcb),
_main_thread(0)
_main_thread(false)
{
strncpy(_label, label, LABEL_MAX_LEN);
@ -100,17 +99,6 @@ Platform_thread::Platform_thread(const char * const label,
}
map_local((addr_t)utcb_phys, (addr_t)_utcb_core_addr,
sizeof(Native_utcb) / get_page_size());
/* set-up default start-info */
_utcb_core_addr->core_start_info()->init(Cpu::primary_id());
/* create kernel object */
constexpr unsigned prio = Kernel::Cpu_priority::max;
_id = Kernel::new_thread(_kernel_thread, prio, 0, _label);
if (!_id) {
PERR("failed to create kernel object");
throw Cpu_session::Thread_creation_failed();
}
}
@ -118,11 +106,11 @@ Platform_thread::Platform_thread(size_t const quota,
const char * const label,
unsigned const virt_prio,
addr_t const utcb)
:
_pd(nullptr),
_rm_client(0),
_utcb_pd_addr((Native_utcb *)utcb),
_main_thread(0)
: Kernel_object<Kernel::Thread>(true, _priority(virt_prio), 0, _label),
_pd(nullptr),
_rm_client(nullptr),
_utcb_pd_addr((Native_utcb *)utcb),
_main_thread(false)
{
strncpy(_label, label, LABEL_MAX_LEN);
@ -140,15 +128,6 @@ Platform_thread::Platform_thread(size_t const quota,
throw Cpu_session::Out_of_metadata();
}
_utcb_core_addr = (Native_utcb *)core_env()->rm_session()->attach(_utcb);
/* create kernel object */
constexpr unsigned max_prio = Kernel::Cpu_priority::max;
auto const phys_prio = Cpu_session::scale_priority(max_prio, virt_prio);
_id = Kernel::new_thread(_kernel_thread, phys_prio, quota, _label);
if (!_id) {
PERR("failed to create kernel object");
throw Cpu_session::Thread_creation_failed();
}
}
@ -197,24 +176,33 @@ int Platform_thread::start(void * const ip, void * const sp)
/* initialize thread registers */
typedef Kernel::Thread_reg_id Reg_id;
enum { WRITES = 2 };
addr_t * write_regs = (addr_t *)Thread_base::myself()->utcb()->base();
addr_t * write_regs = (addr_t*) Thread_base::myself()->utcb()->base();
write_regs[0] = Reg_id::IP;
write_regs[1] = Reg_id::SP;
addr_t values[] = { (addr_t)ip, (addr_t)sp };
if (Kernel::access_thread_regs(kernel_thread(), 0, WRITES, values)) {
if (Kernel::access_thread_regs(kernel_object(), 0, WRITES, values)) {
PERR("failed to initialize thread registers");
return -1;
}
/* start executing new thread */
unsigned const cpu =
_location.valid() ? _location.xpos() : Cpu::primary_id();
_utcb_core_addr->start_info()->init(_id, _utcb);
if (Kernel::start_thread(kernel_thread(), cpu, _pd->kernel_pd(),
_utcb_core_addr)) {
PERR("failed to start thread");
if (!_pd) {
PWRN("No protection domain associated!");
return -1;
}
unsigned const cpu =
_location.valid() ? _location.xpos() : Cpu::primary_id();
Native_utcb * utcb = Thread_base::myself()->utcb();
/* reset capability counter */
utcb->cap_cnt(0);
utcb->cap_add(_pd->parent().dst());
utcb->cap_add(_utcb.dst());
utcb->cap_add(_cap.dst());
Kernel::start_thread(kernel_object(), cpu, _pd->kernel_pd(),
_utcb_core_addr);
return 0;
}
@ -225,7 +213,7 @@ void Platform_thread::pager(Pager_object * const pager)
if (pager) {
unsigned const sc_id = pager->signal_context_id();
if (sc_id) {
if (!Kernel::route_thread_event(kernel_thread(), Event_id::FAULT,
if (!Kernel::route_thread_event(kernel_object(), Event_id::FAULT,
sc_id)) {
_rm_client = dynamic_cast<Rm_client *>(pager);
return;
@ -234,7 +222,7 @@ void Platform_thread::pager(Pager_object * const pager)
PERR("failed to attach signal context to fault");
return;
} else {
if (!Kernel::route_thread_event(kernel_thread(), Event_id::FAULT, 0)) {
if (!Kernel::route_thread_event(kernel_object(), Event_id::FAULT, 0)) {
_rm_client = 0;
return;
}
@ -261,11 +249,11 @@ Thread_state Platform_thread::state()
static addr_t const * const src = cpu_state_regs();
static size_t const length = cpu_state_regs_length();
static size_t const size = length * sizeof(src[0]);
void * dst = Thread_base::myself()->utcb()->base();
void * dst = (void*)Thread_base::myself()->utcb()->base();
Genode::memcpy(dst, src, size);
Thread_state thread_state;
Cpu_state * const cpu_state = static_cast<Cpu_state *>(&thread_state);
if (Kernel::access_thread_regs(kernel_thread(), length, 0,
if (Kernel::access_thread_regs(kernel_object(), length, 0,
(addr_t *)cpu_state)) {
throw Cpu_session::State_access_failed();
}
@ -278,10 +266,10 @@ void Platform_thread::state(Thread_state thread_state)
static addr_t const * const src = cpu_state_regs();
static size_t const length = cpu_state_regs_length();
static size_t const size = length * sizeof(src[0]);
void * dst = Thread_base::myself()->utcb()->base();
void * dst = (void*)Thread_base::myself()->utcb()->base();
Genode::memcpy(dst, src, size);
Cpu_state * const cpu_state = static_cast<Cpu_state *>(&thread_state);
if (Kernel::access_thread_regs(kernel_thread(), 0, length,
if (Kernel::access_thread_regs(kernel_object(), 0, length,
(addr_t *)cpu_state)) {
throw Cpu_session::State_access_failed();
}

View File

@ -59,7 +59,7 @@ int Pager_activation_base::apply_mapping()
void Pager_activation_base::entry()
{
/* get ready to receive faults */
_cap = Native_capability(thread_get_my_native_id(), 0);
_cap = Thread_base::myself()->tid().cap;
_cap_valid.unlock();
while (1)
{
@ -74,10 +74,8 @@ void Pager_activation_base::entry()
*/
unsigned const pon = po->cap().local_name();
Object_pool<Pager_object>::Guard pog(_ep->lookup_and_lock(pon));
if (!pog) {
PWRN("failed to lookup pager object");
continue;
}
if (!pog) continue;
/* let pager object go to fault state */
pog->fault_occured(s);
@ -92,10 +90,10 @@ void Pager_activation_base::entry()
Reg_id::FAULT_TLB, Reg_id::IP, Reg_id::FAULT_ADDR,
Reg_id::FAULT_WRITES, Reg_id::FAULT_SIGNAL };
enum { READS = sizeof(read_regs)/sizeof(read_regs[0]) };
void * const utcb = Thread_base::myself()->utcb()->base();
memcpy(utcb, read_regs, sizeof(read_regs));
memcpy((void*)Thread_base::myself()->utcb()->base(),
read_regs, sizeof(read_regs));
addr_t * const values = (addr_t *)&_fault;
if (Kernel::access_thread_regs(pt->kernel_thread(), READS, 0, values)) {
if (Kernel::access_thread_regs(pt->kernel_object(), READS, 0, values)) {
PWRN("failed to read fault data");
continue;
}

View File

@ -24,44 +24,42 @@
using namespace Genode;
Signal_session_component::Receiver::Receiver()
: Kernel_object<Kernel::Signal_receiver>(true),
Signal_session_component::Receiver::Pool::Entry(Kernel_object<Kernel::Signal_receiver>::_cap) { }
Signal_session_component::Context::Context(Signal_session_component::Receiver &r,
unsigned const imprint)
: Kernel_object<Kernel::Signal_context>(true, r.kernel_object(), imprint),
Signal_session_component::Context::Pool::Entry(Kernel_object<Kernel::Signal_context>::_cap) { }
Signal_receiver_capability Signal_session_component::alloc_receiver()
{
/* allocate resources for receiver */
void * p;
if (!_receivers_slab.alloc(Receiver::size(), &p)) {
try {
Receiver * r = new (_receivers_slab) Receiver();
_receivers.insert(r);
return reinterpret_cap_cast<Signal_receiver>(r->cap());
} catch (Allocator::Out_of_memory&) {
PERR("failed to allocate signal-receiver resources");
throw Out_of_metadata();
}
/* create kernel object for receiver */
addr_t donation = Receiver::kernel_donation(p);
unsigned const id = Kernel::new_signal_receiver(donation);
if (!id) {
/* clean up */
_receivers_slab.free(p, Receiver::size());
PERR("failed to create signal receiver");
throw Create_receiver_failed();
}
/* remember receiver ressources */
Native_capability cap(id, id);
Receiver * const r = new (p) Receiver(cap);
_receivers.insert(r);
/* return receiver capability */
return reinterpret_cap_cast<Signal_receiver>(cap);
return reinterpret_cap_cast<Signal_receiver>(Untyped_capability());
}
void Signal_session_component::free_receiver(Signal_receiver_capability cap)
{
/* look up ressource info */
Receiver * const r = _receivers.lookup_and_lock(cap);
Receiver::Pool::Guard r(_receivers.lookup_and_lock(cap));
if (!r) {
PERR("unknown signal receiver");
throw Kill_receiver_failed();
}
/* release resources */
_destruct_receiver(r);
_receivers_slab.free(r, Receiver::size());
_receivers.remove_locked(r);
destroy(&_receivers_slab, r.object());
}
@ -75,67 +73,47 @@ Signal_session_component::alloc_context(Signal_receiver_capability src,
PERR("unknown signal receiver");
throw Create_context_failed();
}
Kernel::Signal_receiver *sr =
(Kernel::Signal_receiver*) Receiver::kernel_donation(r);
/* allocate resources for context */
void * p;
if (!_contexts_slab.alloc(Context::size(), &p)) {
try {
Context * c = new (_contexts_slab) Context(*r.object(), imprint);
_contexts.insert(c);
return reinterpret_cap_cast<Signal_context>(c->cap());
} catch (Allocator::Out_of_memory&) {
PERR("failed to allocate signal-context resources");
throw Out_of_metadata();
}
/* create kernel object for context */
addr_t donation = Context::kernel_donation(p);
unsigned const id = Kernel::new_signal_context(donation, sr, imprint);
if (!id) {
/* clean up */
_contexts_slab.free(p, Context::size());
PERR("failed to create signal context");
throw Create_context_failed();
}
/* remember context ressources */
Native_capability cap(id, id);
_contexts.insert(new (p) Context(cap));
/* return context capability */
return reinterpret_cap_cast<Signal_context>(cap);
return reinterpret_cap_cast<Signal_context>(Untyped_capability());
}
void Signal_session_component::free_context(Signal_context_capability cap)
{
/* look up ressource info */
Context * const c = _contexts.lookup_and_lock(cap);
Context::Pool::Guard c(_contexts.lookup_and_lock(cap));
if (!c) {
PERR("unknown signal context");
throw Kill_context_failed();
}
/* release resources */
_destruct_context(c);
_contexts_slab.free(c, Context::size());
}
void Signal_session_component::_destruct_context(Context * const c)
{
/* release kernel resources */
Kernel::Signal_context *sc =
(Kernel::Signal_context*) Context::kernel_donation(c);
Kernel::delete_signal_context(sc);
/* release core resources */
_contexts.remove_locked(c);
c->~Signal_session_context();
destroy(&_contexts_slab, c.object());
}
void Signal_session_component::_destruct_receiver(Receiver * const r)
Signal_session_component::Signal_session_component(Allocator * const allocator,
size_t const quota)
: _allocator(allocator, quota), _receivers_slab(&_allocator),
_contexts_slab(&_allocator) { }
Signal_session_component::~Signal_session_component()
{
/* release kernel resources */
Kernel::Signal_receiver *sr =
(Kernel::Signal_receiver*) Receiver::kernel_donation(r);
Kernel::delete_signal_receiver(sr);
/* release core resources */
_receivers.remove_locked(r);
r->~Signal_session_receiver();
while (Context * const c = _contexts.first_locked()) {
_contexts.remove_locked(c);
destroy(&_contexts_slab, c);
}
while (Receiver * const r = _receivers.first_locked()) {
_receivers.remove_locked(r);
destroy(&_receivers_slab, r);
}
}

View File

@ -15,6 +15,7 @@
/* core includes */
#include <kernel/cpu.h>
#include <kernel/kernel.h>
#include <kernel/pd.h>
using namespace Kernel;
@ -25,7 +26,7 @@ Cpu_idle::Cpu_idle(Cpu * const cpu) : Cpu_job(Cpu_priority::min, 0)
cpu_exception = RESET;
ip = (addr_t)&_main;
sp = (addr_t)&_stack[stack_size];
init_thread((addr_t)core_pd()->translation_table(), core_pd()->id());
init_thread((addr_t)core_pd()->translation_table(), core_pd()->asid);
}

View File

@ -26,10 +26,18 @@ static Asid_allocator &alloc() {
Kernel::Pd::Pd(Kernel::Pd::Table * const table,
Genode::Platform_pd * const platform_pd)
: Kernel::Cpu::Pd((Genode::uint8_t)alloc().alloc()),
_table(table), _platform_pd(platform_pd) { }
_table(table), _platform_pd(platform_pd)
{
capid_t invalid = _capid_alloc.alloc();
assert(invalid == cap_id_invalid());
}
Kernel::Pd::~Pd() {
while (Object_identity_reference *oir = _cap_tree.first())
oir->~Object_identity_reference();
/* clean up buffers of memory management */
Cpu::flush_tlb_by_pid(asid);
alloc().free(asid);

View File

@ -22,10 +22,9 @@ using namespace Kernel;
Thread::Thread(unsigned const priority, unsigned const quota,
char const * const label)
:
Thread_base(this), Cpu_job(priority, quota), _state(AWAITS_START), _pd(0),
_utcb_phys(0), _signal_receiver(0), _label(label)
{ cpu_exception = RESET; }
: Thread_base(this), Cpu_job(priority, quota),
_state(AWAITS_START), _signal_receiver(0),
_label(label) { cpu_exception = RESET; }
void Thread::exception(unsigned const cpu)
@ -106,8 +105,8 @@ void Thread::_mmu_exception()
{
_become_inactive(AWAITS_RESUME);
if (in_fault(_fault_addr, _fault_writes)) {
_fault_pd = (addr_t)_pd->platform_pd();
_fault_signal = _fault.signal_context_id();
_fault_pd = (addr_t)_pd->platform_pd();
_fault_signal = (addr_t)_fault.signal_context();
/**
* core should never raise a page-fault,

View File

@ -15,13 +15,23 @@
#include <kernel/thread.h>
#include <kernel/vm.h>
void Kernel::Thread::_call_delete_vm()
void Kernel::Thread::_call_new_vm()
{
reinterpret_cast<Vm*>(user_arg_1())->~Vm();
user_arg_0(0);
Signal_context * context =
pd()->cap_tree().find<Signal_context>(user_arg_4());
if (!context) {
user_arg_0(cap_id_invalid());
return;
}
_call_new<Vm>((Genode::Cpu_state_modes*)user_arg_2(), context,
(void*)user_arg_3());
}
void Kernel::Thread::_call_delete_vm() { _call_delete<Vm>(); }
void Kernel::Thread::_call_run_vm()
{
reinterpret_cast<Vm*>(user_arg_1())->run();

View File

@ -20,11 +20,6 @@ extern Genode::addr_t _tz_client_context;
extern Genode::addr_t _mt_master_context_begin;
extern Genode::addr_t _tz_master_context;
namespace Kernel
{
Vm_pool * vm_pool() { return unmanaged_singleton<Vm_pool>(); }
}
using namespace Kernel;
@ -42,6 +37,9 @@ Kernel::Vm::Vm(void * const state,
}
Kernel::Vm::~Vm() {}
void Vm::exception(unsigned const cpu)
{
switch(_state->cpu_exception) {

View File

@ -1,36 +0,0 @@
/*
* \brief Kernel backend for thread-syscalls related to VMs
* \author Stefan Kalkowski
* \date 2015-02-23
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* core includes */
#include <kernel/vm.h>
#include <kernel/thread.h>
void Kernel::Thread::_call_new_vm()
{
/* lookup signal context */
auto const context = Signal_context::pool()->object(user_arg_4());
if (!context) {
PWRN("failed to lookup signal context");
user_arg_0(-1);
return;
}
/* create virtual machine */
typedef Genode::Cpu_state_modes Cpu_state_modes;
void * const allocator = reinterpret_cast<void *>(user_arg_1());
void * const table = reinterpret_cast<void *>(user_arg_3());
Cpu_state_modes * const state =
reinterpret_cast<Cpu_state_modes *>(user_arg_2());
new (allocator) Vm(state, context, table);
user_arg_0(0);
}

View File

@ -20,17 +20,10 @@ using namespace Genode;
void Vm_session_component::exception_handler(Signal_context_capability handler)
{
if (_initialized) {
PWRN("Cannot initialize kernel vm object twice!");
return;
if (!create((void*)_ds.core_local_addr(), handler.dst(), nullptr)) {
PWRN("Cannot instantiate vm kernel object twice,"
"or invalid signal context?");
}
if (Kernel::new_vm(&_vm, (void*)_ds.core_local_addr(), handler.dst(), 0)) {
PWRN("Cannot instantiate vm kernel object, invalid signal context?");
return;
}
_initialized = true;
}

View File

@ -16,8 +16,6 @@
namespace Kernel
{
Vm_pool * vm_pool() { return unmanaged_singleton<Vm_pool>(); }
/**
* ARM's virtual interrupt controller cpu interface
*/
@ -62,7 +60,7 @@ struct Kernel::Vm_irq : Kernel::Irq
if (!vm)
PERR("VM timer interrupt while VM is not runnning!");
else
vm->inject_irq(_id());
vm->inject_irq(_irq_nr);
}
};
@ -201,18 +199,39 @@ void Kernel::prepare_hypervisor()
}
using Vmid_allocator = Genode::Bit_allocator<256>;
static Vmid_allocator &alloc()
{
static Vmid_allocator * allocator = nullptr;
if (!allocator) {
allocator = unmanaged_singleton<Vmid_allocator>();
/* reserve VM ID 0 for the hypervisor */
unsigned id = allocator->alloc();
assert (id == 0);
}
return *allocator;
}
Kernel::Vm::Vm(void * const state,
Kernel::Signal_context * const context,
void * const table)
: Cpu_job(Cpu_priority::min, 0),
_id(alloc().alloc()),
_state((Genode::Vm_state * const)state),
_context(context),
_table(table) {
_table(table)
{
affinity(cpu_pool()->primary_cpu());
Virtual_pic::pic().irq.enable();
}
Kernel::Vm::~Vm() { alloc().free(_id); }
void Kernel::Vm::exception(unsigned const cpu_id)
{
Virtual_timer::save(_state);
@ -220,6 +239,7 @@ void Kernel::Vm::exception(unsigned const cpu_id)
switch(_state->cpu_exception) {
case Genode::Cpu_state::INTERRUPT_REQUEST:
case Genode::Cpu_state::FAST_INTERRUPT_REQUEST:
_state->gic_irq = Genode::Board_base::VT_MAINTAINANCE_IRQ;
_interrupt(cpu_id);
break;
default:
@ -237,7 +257,7 @@ void Kernel::Vm::proceed(unsigned const cpu_id)
/*
* the following values have to be enforced by the hypervisor
*/
_state->vttbr = Cpu::Ttbr0::init((Genode::addr_t)_table, id());
_state->vttbr = Cpu::Ttbr0::init((Genode::addr_t)_table, _id);
/*
* use the following report fields not needed for loading the context

View File

@ -1,36 +0,0 @@
/*
* \brief Kernel backend for thread-syscalls related to virtual machines
* \author Stefan Kalkowski
* \date 2015-02-10
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#include <kernel/vm.h>
#include <kernel/thread.h>
void Kernel::Thread::_call_new_vm()
{
/* lookup signal context */
auto const context = Signal_context::pool()->object(user_arg_4());
if (!context) {
PWRN("failed to lookup signal context");
user_arg_0(-1);
return;
}
/* create virtual machine */
typedef Genode::Cpu_state_modes Cpu_state_modes;
void * const allocator = reinterpret_cast<void *>(user_arg_1());
void * const table = reinterpret_cast<void *>(user_arg_3());
Cpu_state_modes * const state =
reinterpret_cast<Cpu_state_modes *>(user_arg_2());
new (allocator) Vm(state, context, table);
user_arg_0(0);
}

View File

@ -25,20 +25,12 @@ using namespace Genode;
void Vm_session_component::exception_handler(Signal_context_capability handler)
{
if (_initialized) {
PWRN("Cannot initialize kernel vm object twice!");
return;
}
Core_mem_allocator * cma =
static_cast<Core_mem_allocator*>(platform()->core_mem_alloc());
if (Kernel::new_vm(&_vm, (void*)_ds.core_local_addr(), handler.dst(),
cma->phys_addr(_table))) {
PWRN("Cannot instantiate vm kernel object, invalid signal context?");
return;
}
_initialized = true;
if (!create((void*)_ds.core_local_addr(), handler.dst(),
cma->phys_addr(_table)))
PWRN("Cannot instantiate vm kernel object, invalid signal context?");
}

View File

@ -32,15 +32,15 @@ addr_t Vm_session_component::_alloc_ds(size_t &ram_quota)
void Vm_session_component::run(void)
{
if (!_initialized || Kernel::run_vm(_kernel_object()))
PWRN("Unknown VM: is the exception handler registered?");
if (Kernel_object<Kernel::Vm>::_cap.valid())
Kernel::run_vm(kernel_object());
}
void Vm_session_component::pause(void)
{
if (!_initialized || Kernel::pause_vm(_kernel_object()))
PWRN("Unknown VM: is the exception handler registered?");
if (Kernel_object<Kernel::Vm>::_cap.valid())
Kernel::pause_vm(kernel_object());
}
@ -49,8 +49,6 @@ Vm_session_component::~Vm_session_component()
/* dissolve VM dataspace from service entry point */
_ds_ep->dissolve(&_ds);
if (Kernel::delete_vm(_kernel_object())) PERR("Cannot destruct unknown VM");
/* free region in allocator */
core_env()->rm_session()->detach(_ds.core_local_addr());
platform()->ram_alloc()->free((void*)_ds.phys_addr());

View File

@ -14,6 +14,7 @@
/* core includes */
#include <kernel/cpu.h>
#include <kernel/kernel.h>
#include <kernel/pd.h>
using namespace Kernel;

View File

@ -17,10 +17,18 @@
Kernel::Pd::Pd(Kernel::Pd::Table * const table,
Genode::Platform_pd * const platform_pd)
: _table(table), _platform_pd(platform_pd) { }
: _table(table), _platform_pd(platform_pd)
{
capid_t invalid = _capid_alloc.alloc();
assert(invalid == cap_id_invalid());
}
Kernel::Pd::~Pd() { }
Kernel::Pd::~Pd()
{
while (Object_identity_reference *oir = _cap_tree.first())
oir->~Object_identity_reference();
}
void Kernel::Pd::admit(Kernel::Cpu::Context * const c) {

View File

@ -19,9 +19,8 @@ using namespace Kernel;
Thread::Thread(unsigned const priority, unsigned const quota,
char const * const label)
:
Thread_base(this), Cpu_job(priority, quota), _state(AWAITS_START), _pd(0),
_utcb_phys(0), _signal_receiver(0), _label(label) { }
: Thread_base(this), Cpu_job(priority, quota), _state(AWAITS_START),
_signal_receiver(0), _label(label) { }
void Thread::exception(unsigned const cpu)

View File

@ -66,7 +66,7 @@ void Thread::_mmu_exception()
{
_become_inactive(AWAITS_RESUME);
_fault_pd = (addr_t)_pd->platform_pd();
_fault_signal = _fault.signal_context_id();
_fault_signal = (addr_t)_fault.signal_context();
_fault_addr = Cpu::Cr2::read();
/**

View File

@ -27,8 +27,9 @@ using namespace Genode;
namespace Genode { Rm_session * env_context_area_rm_session(); }
extern Ram_dataspace_capability _main_thread_utcb_ds;
extern Native_thread_id _main_thread_id;
namespace Hw {
extern Untyped_capability _main_thread_cap;
}
void Thread_base::start()
{
@ -60,10 +61,10 @@ void Thread_base::_init_platform_thread(size_t, Type type)
}
/* remap initial main-thread UTCB according to context-area spec */
Genode::map_local((addr_t)Kernel::core_main_thread_utcb_phys_addr(),
Genode::map_local((addr_t)Kernel::Core_thread::singleton().utcb(),
(addr_t)&_context->utcb,
max(sizeof(Native_utcb) / get_page_size(), (size_t)1));
/* adjust initial object state in case of a main thread */
tid().thread_id = _main_thread_id;
tid().cap = Hw::_main_thread_cap.dst();
}

View File

@ -0,0 +1,36 @@
/*
* \brief Obtain parent capability
* \author Stefan Kalkowski
* \date 2015-04-27
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _PLATFORM__MAIN_PARENT_CAP_H_
#define _PLATFORM__MAIN_PARENT_CAP_H_
/* Genode includes */
#include <parent/capability.h>
namespace Hw {
extern Genode::Untyped_capability _parent_cap;
}
namespace Genode {
/**
* Return constructed parent capability
*/
Parent_capability parent_cap()
{
/* assemble parent capability */
return reinterpret_cap_cast<Parent>(Hw::_parent_cap);
}
}
#endif /* _PLATFORM__MAIN_PARENT_CAP_H_ */

View File

@ -13,7 +13,7 @@
/* core includes */
#include <kernel/cpu_scheduler.h>
#include <kernel/test.h>
/*
* Utilities
@ -24,8 +24,6 @@ using Genode::addr_t;
using Kernel::Cpu_share;
using Kernel::Cpu_scheduler;
namespace Kernel { void test(); }
void * operator new(size_t s, void * p) { return p; }
struct Data

View File

@ -17,6 +17,7 @@
/* core includes */
#include <kernel/double_list.h>
#include <kernel/test.h>
/*
@ -27,8 +28,6 @@ using Genode::size_t;
using Kernel::Double_list_typed;
using Kernel::Double_list_item;
namespace Kernel { void test(); }
void * operator new(size_t s, void * p) { return p; }
struct Item_load { char volatile x, y, z; };

View File

@ -14,9 +14,10 @@
/* base includes */
#include <base/printf.h>
using namespace Genode;
/* core includes */
#include <kernel/test.h>
namespace Kernel { void test(); }
using namespace Genode;
void info();