2012-05-30 20:13:09 +02:00
|
|
|
/*
|
|
|
|
* \brief Singlethreaded minimalistic kernel
|
|
|
|
* \author Martin Stein
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
* \author Stefan Kalkowski
|
2012-05-30 20:13:09 +02:00
|
|
|
* \date 2011-10-20
|
|
|
|
*
|
|
|
|
* This kernel is the only code except the mode transition PIC, that runs in
|
|
|
|
* privileged CPU mode. It has two tasks. First it initializes the process
|
|
|
|
* 'core', enriches it with the whole identically mapped address range,
|
|
|
|
* joins and applies it, assigns one thread to it with a userdefined
|
|
|
|
* entrypoint (the core main thread) and starts this thread in userland.
|
|
|
|
* Afterwards it is called each time an exception occurs in userland to do
|
|
|
|
* a minimum of appropriate exception handling. Thus it holds a CPU context
|
|
|
|
* for itself as for any other thread. But due to the fact that it never
|
|
|
|
* relies on prior kernel runs this context only holds some constant pointers
|
|
|
|
* such as SP and IP.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2013-01-10 21:44:47 +01:00
|
|
|
* Copyright (C) 2011-2013 Genode Labs GmbH
|
2012-05-30 20:13:09 +02:00
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
|
|
* under the terms of the GNU General Public License version 2.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* core includes */
|
2015-04-16 11:25:23 +02:00
|
|
|
#include <kernel/lock.h>
|
2013-09-06 17:37:09 +02:00
|
|
|
#include <kernel/pd.h>
|
2013-02-22 10:30:48 +01:00
|
|
|
#include <platform_pd.h>
|
2012-10-02 14:27:32 +02:00
|
|
|
#include <trustzone.h>
|
2013-09-09 15:20:30 +02:00
|
|
|
#include <timer.h>
|
2013-10-30 13:56:57 +01:00
|
|
|
#include <pic.h>
|
2014-04-28 21:31:57 +02:00
|
|
|
#include <map_local.h>
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-12-04 23:41:52 +01:00
|
|
|
/* base includes */
|
|
|
|
#include <unmanaged_singleton.h>
|
2014-04-28 21:31:57 +02:00
|
|
|
#include <base/native_types.h>
|
2013-12-04 23:41:52 +01:00
|
|
|
|
2013-05-14 22:40:30 +02:00
|
|
|
/* base-hw includes */
|
2014-04-04 17:36:05 +02:00
|
|
|
#include <kernel/irq.h>
|
2013-09-26 17:03:33 +02:00
|
|
|
#include <kernel/perf_counter.h>
|
2012-05-30 20:13:09 +02:00
|
|
|
using namespace Kernel;
|
|
|
|
|
2015-03-09 15:00:58 +01:00
|
|
|
extern "C" void _core_start(void);
|
2014-01-28 14:30:36 +01:00
|
|
|
extern Genode::Native_thread_id _main_thread_id;
|
2014-10-10 16:13:52 +02:00
|
|
|
extern void * _start_secondary_cpus;
|
2014-04-28 21:31:57 +02:00
|
|
|
extern int _prog_img_beg;
|
|
|
|
extern int _prog_img_end;
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2015-03-27 14:05:55 +01:00
|
|
|
static_assert(sizeof(Genode::sizet_arithm_t) >= 2 * sizeof(size_t),
|
|
|
|
"Bad result type for size_t arithmetics.");
|
2015-03-17 11:47:25 +01:00
|
|
|
|
2012-05-30 20:13:09 +02:00
|
|
|
namespace Kernel
|
|
|
|
{
|
|
|
|
/* import Genode types */
|
2013-11-15 16:56:34 +01:00
|
|
|
typedef Genode::Core_thread_id Core_thread_id;
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-12-04 23:41:52 +01:00
|
|
|
Pd_pool * pd_pool() { return unmanaged_singleton<Pd_pool>(); }
|
|
|
|
Thread_pool * thread_pool() { return unmanaged_singleton<Thread_pool>(); }
|
|
|
|
Signal_context_pool * signal_context_pool() { return unmanaged_singleton<Signal_context_pool>(); }
|
|
|
|
Signal_receiver_pool * signal_receiver_pool() { return unmanaged_singleton<Signal_receiver_pool>(); }
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2014-10-08 21:26:56 +02:00
|
|
|
/**
|
|
|
|
* Hook that enables automated testing of kernel internals
|
|
|
|
*/
|
|
|
|
void test();
|
|
|
|
|
2012-05-30 20:13:09 +02:00
|
|
|
/**
|
|
|
|
* Static kernel PD that describes core
|
|
|
|
*/
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
Pd * core_pd()
|
2012-11-07 15:12:56 +01:00
|
|
|
{
|
2014-07-28 16:55:47 +02:00
|
|
|
typedef Early_translations_slab Slab;
|
|
|
|
typedef Early_translations_allocator Allocator;
|
|
|
|
typedef Genode::Translation_table Table;
|
2014-04-28 21:31:57 +02:00
|
|
|
|
2014-07-28 16:55:47 +02:00
|
|
|
constexpr addr_t table_align = 1 << Table::ALIGNM_LOG2;
|
2014-04-28 21:31:57 +02:00
|
|
|
|
|
|
|
struct Core_pd : Platform_pd, Pd
|
2013-12-05 00:01:48 +01:00
|
|
|
{
|
2014-06-13 10:52:13 +02:00
|
|
|
/**
|
|
|
|
* Establish initial one-to-one mappings for core/kernel.
|
|
|
|
* This function avoids to take the core-pd's translation table
|
|
|
|
* lock in contrast to normal translation insertions to
|
|
|
|
* circumvent strex/ldrex problems in early bootstrap code
|
|
|
|
* on some ARM SoCs.
|
|
|
|
*
|
|
|
|
* \param start physical/virtual start address of area
|
|
|
|
* \param end physical/virtual end address of area
|
|
|
|
* \param io_mem true if it should be marked as device memory
|
|
|
|
*/
|
|
|
|
void map(addr_t start, addr_t end, bool io_mem)
|
|
|
|
{
|
|
|
|
using namespace Genode;
|
|
|
|
|
|
|
|
Translation_table *tt = Platform_pd::translation_table();
|
2014-07-08 15:46:53 +02:00
|
|
|
const Page_flags flags =
|
|
|
|
Page_flags::apply_mapping(true, io_mem ? UNCACHED : CACHED,
|
|
|
|
io_mem);
|
2014-06-13 10:52:13 +02:00
|
|
|
|
|
|
|
start = trunc_page(start);
|
|
|
|
size_t size = round_page(end) - start;
|
|
|
|
|
|
|
|
try {
|
|
|
|
tt->insert_translation(start, start, size, flags, page_slab());
|
|
|
|
} catch(Page_slab::Out_of_slabs) {
|
|
|
|
PERR("Not enough page slabs");
|
|
|
|
} catch(Allocator::Out_of_memory) {
|
|
|
|
PERR("Translation table needs to much RAM");
|
|
|
|
} catch(...) {
|
|
|
|
PERR("Invalid mapping %p -> %p (%zx)", (void*)start,
|
|
|
|
(void*)start, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-28 16:55:47 +02:00
|
|
|
/**
|
|
|
|
* Constructor
|
|
|
|
*/
|
|
|
|
Core_pd(Table * const table, Slab * const slab)
|
|
|
|
: Platform_pd(table, slab), Pd(table, this)
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
{
|
2014-04-28 21:31:57 +02:00
|
|
|
using namespace Genode;
|
|
|
|
|
2015-03-27 13:55:03 +01:00
|
|
|
|
2015-04-01 10:23:38 +02:00
|
|
|
Platform_pd::_kernel_pd = this;
|
2014-04-28 21:31:57 +02:00
|
|
|
|
|
|
|
/* map exception vector for core */
|
2014-07-28 16:55:47 +02:00
|
|
|
Kernel::mtc()->map(table, slab);
|
2014-04-28 21:31:57 +02:00
|
|
|
|
|
|
|
/* map core's program image */
|
2014-06-13 10:52:13 +02:00
|
|
|
map((addr_t)&_prog_img_beg, (addr_t)&_prog_img_end, false);
|
2014-04-28 21:31:57 +02:00
|
|
|
|
|
|
|
/* map core's mmio regions */
|
|
|
|
Native_region * r = Platform::_core_only_mmio_regions(0);
|
|
|
|
for (unsigned i = 0; r;
|
|
|
|
r = Platform::_core_only_mmio_regions(++i))
|
2014-06-13 10:52:13 +02:00
|
|
|
map(r->base, r->base + r->size, true);
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
}
|
2013-12-05 00:01:48 +01:00
|
|
|
};
|
2014-04-28 21:31:57 +02:00
|
|
|
|
2014-07-28 16:55:47 +02:00
|
|
|
Allocator * const alloc = unmanaged_singleton<Allocator>();
|
|
|
|
Table * const table = unmanaged_singleton<Table, table_align>();
|
|
|
|
Slab * const slab = unmanaged_singleton<Slab, Slab::ALIGN>(alloc);
|
|
|
|
return unmanaged_singleton<Core_pd>(table, slab);
|
2012-11-07 15:12:56 +01:00
|
|
|
}
|
2012-05-30 20:13:09 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Get attributes of the mode transition region in every PD
|
|
|
|
*/
|
2014-03-15 16:12:09 +01:00
|
|
|
addr_t mode_transition_base() { return mtc()->VIRT_BASE; }
|
|
|
|
size_t mode_transition_size() { return mtc()->SIZE; }
|
2012-05-30 20:13:09 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Get attributes of the kernel objects
|
|
|
|
*/
|
2014-04-28 21:31:57 +02:00
|
|
|
size_t thread_size() { return sizeof(Thread); }
|
|
|
|
size_t signal_context_size() { return sizeof(Signal_context); }
|
|
|
|
size_t signal_receiver_size() { return sizeof(Signal_receiver); }
|
|
|
|
unsigned pd_alignm_log2() { return Genode::Translation_table::ALIGNM_LOG2; }
|
|
|
|
size_t pd_size() { return sizeof(Genode::Translation_table) + sizeof(Pd); }
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
enum { STACK_SIZE = 64 * 1024 };
|
2012-05-30 20:13:09 +02:00
|
|
|
|
|
|
|
/**
|
2013-12-17 18:10:02 +01:00
|
|
|
* Return lock that guards all kernel data against concurrent access
|
2012-05-30 20:13:09 +02:00
|
|
|
*/
|
2013-12-17 18:10:02 +01:00
|
|
|
Lock & data_lock()
|
|
|
|
{
|
|
|
|
static Lock s;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-04-28 21:31:57 +02:00
|
|
|
addr_t core_tt_base;
|
2013-12-17 18:10:02 +01:00
|
|
|
unsigned core_pd_id;
|
2012-05-30 20:13:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-03-27 13:55:03 +01:00
|
|
|
Kernel::Id_allocator & Kernel::id_alloc() {
|
|
|
|
return *unmanaged_singleton<Id_allocator>(); }
|
|
|
|
|
2015-02-19 14:50:27 +01:00
|
|
|
Pic * Kernel::pic() { return unmanaged_singleton<Pic>(); }
|
|
|
|
|
|
|
|
|
2015-03-17 11:47:25 +01:00
|
|
|
Native_utcb* Kernel::core_main_thread_utcb_phys_addr() {
|
|
|
|
return unmanaged_singleton<Native_utcb,Genode::get_page_size()>(); }
|
|
|
|
|
|
|
|
|
2012-05-30 20:13:09 +02:00
|
|
|
/**
|
2014-10-10 16:13:52 +02:00
|
|
|
* Enable kernel-entry assembly to get an exclusive stack for every CPU
|
2012-05-30 20:13:09 +02:00
|
|
|
*/
|
2013-12-17 18:10:02 +01:00
|
|
|
unsigned kernel_stack_size = Kernel::STACK_SIZE;
|
2014-10-10 16:13:52 +02:00
|
|
|
char kernel_stack[NR_OF_CPUS][Kernel::STACK_SIZE]
|
2014-05-07 18:28:22 +02:00
|
|
|
__attribute__((aligned(16)));
|
2013-12-17 18:10:02 +01:00
|
|
|
|
|
|
|
|
|
|
|
/**
|
2014-10-10 16:13:52 +02:00
|
|
|
* Setup kernel environment before activating secondary CPUs
|
2013-12-17 18:10:02 +01:00
|
|
|
*/
|
2014-10-10 16:13:52 +02:00
|
|
|
extern "C" void init_kernel_up()
|
2012-05-30 20:13:09 +02:00
|
|
|
{
|
2014-10-10 16:13:52 +02:00
|
|
|
/*
|
|
|
|
* As atomic operations are broken in physical mode on some platforms
|
|
|
|
* we must avoid the use of 'cmpxchg' by now (includes not using any
|
|
|
|
* local static objects.
|
|
|
|
*/
|
2013-12-17 18:10:02 +01:00
|
|
|
|
|
|
|
/* calculate in advance as needed later when data writes aren't allowed */
|
2015-03-27 13:55:03 +01:00
|
|
|
core_pd();
|
2013-12-17 18:10:02 +01:00
|
|
|
|
2014-10-10 16:13:52 +02:00
|
|
|
/* initialize all CPU objects */
|
|
|
|
cpu_pool();
|
2013-12-17 18:10:02 +01:00
|
|
|
|
2015-02-18 14:41:49 +01:00
|
|
|
/* initialize PIC */
|
|
|
|
pic();
|
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
/* go multiprocessor mode */
|
2014-10-10 16:13:52 +02:00
|
|
|
Cpu::start_secondary_cpus(&_start_secondary_cpus);
|
2013-12-17 18:10:02 +01:00
|
|
|
}
|
|
|
|
|
2014-10-10 16:13:52 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Setup kernel enviroment after activating secondary CPUs as primary CPU
|
|
|
|
*/
|
|
|
|
void init_kernel_mp_primary()
|
|
|
|
{
|
2015-03-09 15:00:58 +01:00
|
|
|
using namespace Genode;
|
|
|
|
|
2014-10-10 16:13:52 +02:00
|
|
|
/* get stack memory that fullfills the constraints for core stacks */
|
|
|
|
enum {
|
2015-03-09 15:00:58 +01:00
|
|
|
STACK_ALIGNM = 1 << CORE_STACK_ALIGNM_LOG2,
|
2014-10-10 16:13:52 +02:00
|
|
|
STACK_SIZE = DEFAULT_STACK_SIZE,
|
|
|
|
};
|
|
|
|
static_assert(STACK_SIZE <= STACK_ALIGNM - sizeof(Core_thread_id),
|
|
|
|
"stack size does not fit stack alignment of core");
|
|
|
|
static char s[STACK_SIZE] __attribute__((aligned(STACK_ALIGNM)));
|
|
|
|
|
|
|
|
/* provide thread ident at the aligned base of the stack */
|
|
|
|
*(Core_thread_id *)s = 0;
|
|
|
|
|
2015-03-09 15:00:58 +01:00
|
|
|
/* initialize UTCB and map it */
|
2015-03-17 11:47:25 +01:00
|
|
|
Native_utcb * utcb = Kernel::core_main_thread_utcb_phys_addr();
|
|
|
|
Genode::map_local((addr_t)utcb, (addr_t)UTCB_MAIN_THREAD,
|
2015-03-09 15:00:58 +01:00
|
|
|
sizeof(Native_utcb) / get_page_size());
|
|
|
|
|
|
|
|
static Kernel::Thread t(Cpu_priority::max, 0, "core");
|
|
|
|
|
2014-10-10 16:13:52 +02:00
|
|
|
/* start thread with stack pointer at the top of stack */
|
2015-03-17 11:47:25 +01:00
|
|
|
utcb->start_info()->init(t.id(), Dataspace_capability());
|
2015-03-09 15:00:58 +01:00
|
|
|
t.ip = (addr_t)&_core_start;
|
2014-10-10 16:13:52 +02:00
|
|
|
t.sp = (addr_t)s + STACK_SIZE;
|
2015-03-09 15:00:58 +01:00
|
|
|
t.init(cpu_pool()->primary_cpu(), core_pd(),
|
|
|
|
(Native_utcb*)Genode::UTCB_MAIN_THREAD, 1);
|
2014-10-10 16:13:52 +02:00
|
|
|
|
|
|
|
/* kernel initialization finished */
|
|
|
|
Genode::printf("kernel initialized\n");
|
|
|
|
test();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
/**
|
2014-10-10 16:13:52 +02:00
|
|
|
* Setup kernel enviroment after activating secondary CPUs
|
2013-12-17 18:10:02 +01:00
|
|
|
*/
|
2014-10-10 16:13:52 +02:00
|
|
|
extern "C" void init_kernel_mp()
|
2013-12-17 18:10:02 +01:00
|
|
|
{
|
2014-10-10 16:13:52 +02:00
|
|
|
/*
|
|
|
|
* As updates on a cached kernel lock might not be visible to CPUs that
|
|
|
|
* have not enabled caches, we can't synchronize the activation of MMU and
|
|
|
|
* caches. Hence we must avoid write access to kernel data by now.
|
|
|
|
*/
|
2013-12-17 18:10:02 +01:00
|
|
|
|
2014-10-10 16:13:52 +02:00
|
|
|
/* synchronize data view of all CPUs */
|
|
|
|
Cpu::invalidate_data_caches();
|
|
|
|
Cpu::invalidate_instr_caches();
|
|
|
|
Cpu::data_synchronization_barrier();
|
2013-12-17 18:10:02 +01:00
|
|
|
|
2015-02-18 14:41:49 +01:00
|
|
|
/* locally initialize interrupt controller */
|
|
|
|
pic()->init_cpu_local();
|
|
|
|
|
2014-10-10 16:13:52 +02:00
|
|
|
/* initialize CPU in physical mode */
|
|
|
|
Cpu::init_phys_kernel();
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
/* switch to core address space */
|
2015-03-27 13:55:03 +01:00
|
|
|
Cpu::init_virt_kernel(core_pd());
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2014-10-10 16:13:52 +02:00
|
|
|
/*
|
|
|
|
* Now it's safe to use 'cmpxchg'
|
|
|
|
*/
|
2013-08-22 20:51:19 +02:00
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
Lock::Guard guard(data_lock());
|
2012-10-02 14:27:32 +02:00
|
|
|
|
2014-10-10 16:13:52 +02:00
|
|
|
/*
|
|
|
|
* Now it's save to write to kernel data
|
|
|
|
*/
|
2013-12-17 18:10:02 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* TrustZone initialization code
|
|
|
|
*
|
|
|
|
* FIXME This is a plattform specific feature
|
|
|
|
*/
|
|
|
|
init_trustzone(pic());
|
2013-09-26 17:03:33 +02:00
|
|
|
|
2013-12-18 16:18:16 +01:00
|
|
|
/*
|
2013-12-17 18:10:02 +01:00
|
|
|
* Enable performance counter
|
|
|
|
*
|
2014-10-10 16:13:52 +02:00
|
|
|
* FIXME This is an optional CPU specific feature
|
2013-12-18 16:18:16 +01:00
|
|
|
*/
|
2013-12-17 18:10:02 +01:00
|
|
|
perf_counter()->enable();
|
|
|
|
|
2015-02-18 14:41:49 +01:00
|
|
|
/* enable timer interrupt */
|
2014-10-10 16:13:52 +02:00
|
|
|
unsigned const cpu = Cpu::executing_id();
|
|
|
|
pic()->unmask(Timer::interrupt_id(cpu), cpu);
|
2013-05-14 22:40:30 +02:00
|
|
|
|
2014-10-10 16:13:52 +02:00
|
|
|
/* do further initialization only as primary CPU */
|
|
|
|
if (Cpu::primary_id() != cpu) { return; }
|
|
|
|
init_kernel_mp_primary();
|
2013-12-18 16:18:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Main routine of every kernel pass
|
|
|
|
*/
|
|
|
|
extern "C" void kernel()
|
|
|
|
{
|
2013-12-17 18:10:02 +01:00
|
|
|
data_lock().lock();
|
2014-10-10 16:13:52 +02:00
|
|
|
cpu_pool()->cpu(Cpu::executing_id())->exception();
|
2012-05-30 20:13:09 +02:00
|
|
|
}
|
|
|
|
|
2013-12-18 16:18:16 +01:00
|
|
|
|
2014-07-28 16:55:47 +02:00
|
|
|
Kernel::Cpu_context::Cpu_context(Genode::Translation_table * const table)
|
2013-12-17 18:10:02 +01:00
|
|
|
{
|
2014-07-28 16:55:47 +02:00
|
|
|
_init(STACK_SIZE, (addr_t)table);
|
2013-12-17 18:10:02 +01:00
|
|
|
sp = (addr_t)kernel_stack;
|
|
|
|
ip = (addr_t)kernel;
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
core_pd()->admit(this);
|
2013-09-06 17:37:09 +02:00
|
|
|
}
|