2012-05-30 20:13:09 +02:00
|
|
|
/*
|
|
|
|
* \brief Thread facility
|
|
|
|
* \author Martin Stein
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
* \author Stefan Kalkowski
|
2012-05-30 20:13:09 +02:00
|
|
|
* \date 2012-02-12
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2013-01-10 21:44:47 +01:00
|
|
|
* Copyright (C) 2012-2013 Genode Labs GmbH
|
2012-05-30 20:13:09 +02:00
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
|
|
* under the terms of the GNU General Public License version 2.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* core includes */
|
2015-06-16 10:59:26 +02:00
|
|
|
#include <assert.h>
|
2012-05-30 20:13:09 +02:00
|
|
|
#include <platform_thread.h>
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
#include <platform_pd.h>
|
2012-05-30 20:13:09 +02:00
|
|
|
#include <core_env.h>
|
|
|
|
#include <rm_session_component.h>
|
2014-04-28 21:31:57 +02:00
|
|
|
#include <map_local.h>
|
|
|
|
|
2016-03-08 16:59:43 +01:00
|
|
|
/* base-internal includes */
|
|
|
|
#include <base/internal/native_utcb.h>
|
2012-05-30 20:13:09 +02:00
|
|
|
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
/* kernel includes */
|
2016-03-08 16:59:43 +01:00
|
|
|
#include <kernel/pd.h>
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
#include <kernel/kernel.h>
|
2012-05-30 20:13:09 +02:00
|
|
|
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
using namespace Genode;
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-11-23 02:30:24 +01:00
|
|
|
void Platform_thread::_init() { }
|
|
|
|
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2015-09-02 10:05:35 +02:00
|
|
|
Weak_ptr<Address_space>& Platform_thread::address_space() {
|
2015-06-26 15:05:28 +02:00
|
|
|
return _address_space; }
|
2013-03-08 11:54:12 +01:00
|
|
|
|
|
|
|
|
2012-10-09 15:41:40 +02:00
|
|
|
Platform_thread::~Platform_thread()
|
|
|
|
{
|
2015-06-26 15:05:28 +02:00
|
|
|
/* detach UTCB of main threads */
|
|
|
|
if (_main_thread) {
|
|
|
|
Locked_ptr<Address_space> locked_ptr(_address_space);
|
|
|
|
if (locked_ptr.is_valid())
|
|
|
|
locked_ptr->flush((addr_t)_utcb_pd_addr, sizeof(Native_utcb));
|
2012-10-09 15:41:40 +02:00
|
|
|
}
|
2014-04-28 21:31:57 +02:00
|
|
|
|
2012-10-09 15:41:40 +02:00
|
|
|
/* free UTCB */
|
2015-06-26 15:05:28 +02:00
|
|
|
core_env()->ram_session()->free(_utcb);
|
2012-10-09 15:41:40 +02:00
|
|
|
}
|
|
|
|
|
2013-11-18 15:31:54 +01:00
|
|
|
|
2015-03-27 14:05:55 +01:00
|
|
|
void Platform_thread::quota(size_t const quota) {
|
2015-05-19 14:18:40 +02:00
|
|
|
Kernel::thread_quota(kernel_object(), quota); }
|
2015-03-27 14:05:55 +01:00
|
|
|
|
|
|
|
|
2014-04-28 21:31:57 +02:00
|
|
|
Platform_thread::Platform_thread(const char * const label,
|
|
|
|
Native_utcb * utcb)
|
2015-06-29 13:30:35 +02:00
|
|
|
: Kernel_object<Kernel::Thread>(true, Kernel::Cpu_priority::MAX, 0, _label),
|
2015-05-19 14:18:40 +02:00
|
|
|
_pd(Kernel::core_pd()->platform_pd()),
|
2015-06-26 15:05:28 +02:00
|
|
|
_pager(nullptr),
|
2014-04-28 21:31:57 +02:00
|
|
|
_utcb_core_addr(utcb),
|
|
|
|
_utcb_pd_addr(utcb),
|
2015-05-19 14:18:40 +02:00
|
|
|
_main_thread(false)
|
2012-05-30 20:13:09 +02:00
|
|
|
{
|
2013-11-18 15:31:54 +01:00
|
|
|
strncpy(_label, label, LABEL_MAX_LEN);
|
2012-05-30 20:13:09 +02:00
|
|
|
|
|
|
|
/* create UTCB for a core thread */
|
2014-04-28 21:31:57 +02:00
|
|
|
void *utcb_phys;
|
|
|
|
if (!platform()->ram_alloc()->alloc(sizeof(Native_utcb), &utcb_phys)) {
|
2013-08-30 13:30:19 +02:00
|
|
|
PERR("failed to allocate UTCB");
|
|
|
|
throw Cpu_session::Out_of_metadata();
|
|
|
|
}
|
2014-04-28 21:31:57 +02:00
|
|
|
map_local((addr_t)utcb_phys, (addr_t)_utcb_core_addr,
|
|
|
|
sizeof(Native_utcb) / get_page_size());
|
2012-05-30 20:13:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-03-27 14:05:55 +01:00
|
|
|
Platform_thread::Platform_thread(size_t const quota,
|
|
|
|
const char * const label,
|
2013-11-20 23:35:02 +01:00
|
|
|
unsigned const virt_prio,
|
2013-11-18 15:31:54 +01:00
|
|
|
addr_t const utcb)
|
2015-07-06 16:47:19 +02:00
|
|
|
: Kernel_object<Kernel::Thread>(true, _priority(virt_prio), quota, _label),
|
2015-05-19 14:18:40 +02:00
|
|
|
_pd(nullptr),
|
2015-06-26 15:05:28 +02:00
|
|
|
_pager(nullptr),
|
2015-05-19 14:18:40 +02:00
|
|
|
_utcb_pd_addr((Native_utcb *)utcb),
|
|
|
|
_main_thread(false)
|
2012-05-30 20:13:09 +02:00
|
|
|
{
|
2013-11-18 15:31:54 +01:00
|
|
|
strncpy(_label, label, LABEL_MAX_LEN);
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2015-06-26 15:05:28 +02:00
|
|
|
try {
|
|
|
|
_utcb = core_env()->ram_session()->alloc(sizeof(Native_utcb),
|
|
|
|
CACHED);
|
|
|
|
} catch (...) {
|
2013-08-30 13:30:19 +02:00
|
|
|
PERR("failed to allocate UTCB");
|
|
|
|
throw Cpu_session::Out_of_metadata();
|
2012-05-30 20:13:09 +02:00
|
|
|
}
|
2014-04-28 21:31:57 +02:00
|
|
|
_utcb_core_addr = (Native_utcb *)core_env()->rm_session()->attach(_utcb);
|
2012-05-30 20:13:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
int Platform_thread::join_pd(Platform_pd * pd, bool const main_thread,
|
2013-03-08 11:54:12 +01:00
|
|
|
Weak_ptr<Address_space> address_space)
|
2012-05-30 20:13:09 +02:00
|
|
|
{
|
2013-11-18 15:31:54 +01:00
|
|
|
/* check if thread is already in another protection domain */
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
if (_pd && _pd != pd) {
|
2013-11-18 15:31:54 +01:00
|
|
|
PERR("thread already in another protection domain");
|
2013-08-30 13:30:19 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2014-04-28 21:31:57 +02:00
|
|
|
|
2013-11-18 15:31:54 +01:00
|
|
|
/* join protection domain */
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
_pd = pd;
|
2013-11-18 15:31:54 +01:00
|
|
|
_main_thread = main_thread;
|
2013-03-08 11:54:12 +01:00
|
|
|
_address_space = address_space;
|
2012-05-30 20:13:09 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-03-06 14:30:37 +01:00
|
|
|
void Platform_thread::affinity(Affinity::Location const & location)
|
|
|
|
{
|
|
|
|
_location = location;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Affinity::Location Platform_thread::affinity() const { return _location; }
|
|
|
|
|
|
|
|
|
|
|
|
int Platform_thread::start(void * const ip, void * const sp)
|
2012-05-30 20:13:09 +02:00
|
|
|
{
|
2013-11-23 02:30:24 +01:00
|
|
|
/* attach UTCB in case of a main thread */
|
|
|
|
if (_main_thread) {
|
2015-06-26 15:05:28 +02:00
|
|
|
|
|
|
|
/* lookup dataspace component for physical address */
|
2015-08-10 13:34:16 +02:00
|
|
|
auto lambda = [&] (Dataspace_component *dsc) {
|
|
|
|
if (!dsc) return -1;
|
|
|
|
|
|
|
|
/* lock the address space */
|
|
|
|
Locked_ptr<Address_space> locked_ptr(_address_space);
|
|
|
|
if (!locked_ptr.is_valid()) {
|
|
|
|
PERR("invalid RM client");
|
|
|
|
return -1;
|
|
|
|
};
|
|
|
|
Page_flags const flags = Page_flags::apply_mapping(true, CACHED, false);
|
|
|
|
_utcb_pd_addr = utcb_main_thread();
|
|
|
|
Hw::Address_space * as = static_cast<Hw::Address_space*>(&*locked_ptr);
|
|
|
|
if (!as->insert_translation((addr_t)_utcb_pd_addr, dsc->phys_addr(),
|
|
|
|
sizeof(Native_utcb), flags)) {
|
|
|
|
PERR("failed to attach UTCB");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
2013-08-30 13:30:19 +02:00
|
|
|
};
|
2015-08-10 13:34:16 +02:00
|
|
|
if (core_env()->entrypoint()->apply(_utcb, lambda)) return -1;
|
2012-05-30 20:13:09 +02:00
|
|
|
}
|
2015-06-26 15:05:28 +02:00
|
|
|
|
2013-11-23 02:30:24 +01:00
|
|
|
/* initialize thread registers */
|
2015-08-24 10:57:40 +02:00
|
|
|
kernel_object()->ip = reinterpret_cast<addr_t>(ip);
|
|
|
|
kernel_object()->sp = reinterpret_cast<addr_t>(sp);
|
2014-03-06 14:30:37 +01:00
|
|
|
|
2013-11-19 15:13:24 +01:00
|
|
|
/* start executing new thread */
|
2015-05-19 14:18:40 +02:00
|
|
|
if (!_pd) {
|
|
|
|
PWRN("No protection domain associated!");
|
2013-08-30 13:30:19 +02:00
|
|
|
return -1;
|
|
|
|
}
|
2015-05-19 14:18:40 +02:00
|
|
|
|
|
|
|
unsigned const cpu =
|
|
|
|
_location.valid() ? _location.xpos() : Cpu::primary_id();
|
|
|
|
|
|
|
|
Native_utcb * utcb = Thread_base::myself()->utcb();
|
|
|
|
|
|
|
|
/* reset capability counter */
|
|
|
|
utcb->cap_cnt(0);
|
|
|
|
utcb->cap_add(_cap.dst());
|
2015-06-08 15:24:43 +02:00
|
|
|
if (_main_thread) {
|
|
|
|
utcb->cap_add(_pd->parent().dst());
|
|
|
|
utcb->cap_add(_utcb.dst());
|
|
|
|
}
|
2015-05-19 14:18:40 +02:00
|
|
|
Kernel::start_thread(kernel_object(), cpu, _pd->kernel_pd(),
|
|
|
|
_utcb_core_addr);
|
2013-08-30 13:30:19 +02:00
|
|
|
return 0;
|
2012-05-30 20:13:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Platform_thread::pager(Pager_object * const pager)
|
|
|
|
{
|
2015-06-26 15:05:28 +02:00
|
|
|
using namespace Kernel;
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2015-06-26 15:05:28 +02:00
|
|
|
if (route_thread_event(kernel_object(), Thread_event_id::FAULT,
|
|
|
|
pager ? pager->cap().dst() : cap_id_invalid()))
|
|
|
|
PERR("failed to set pager object for thread %s", label());
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2015-06-26 15:05:28 +02:00
|
|
|
_pager = pager;
|
2013-08-30 13:30:19 +02:00
|
|
|
}
|
2013-11-11 13:03:07 +01:00
|
|
|
|
|
|
|
|
2015-06-26 15:05:28 +02:00
|
|
|
Genode::Pager_object * Platform_thread::pager() { return _pager; }
|
|
|
|
|
|
|
|
|
2013-11-11 13:03:07 +01:00
|
|
|
Thread_state Platform_thread::state()
|
|
|
|
{
|
2015-08-24 10:57:40 +02:00
|
|
|
Thread_state_base bstate(*kernel_object());
|
|
|
|
return Thread_state(bstate);
|
|
|
|
}
|
2013-11-11 13:03:07 +01:00
|
|
|
|
|
|
|
|
|
|
|
void Platform_thread::state(Thread_state thread_state)
|
|
|
|
{
|
2015-08-24 10:57:40 +02:00
|
|
|
Cpu_state * cstate = static_cast<Cpu_state *>(kernel_object());
|
|
|
|
*cstate = static_cast<Cpu_state>(thread_state);
|
|
|
|
}
|