2012-05-30 20:13:09 +02:00
|
|
|
/*
|
|
|
|
* \brief Singlethreaded minimalistic kernel
|
|
|
|
* \author Martin Stein
|
|
|
|
* \date 2011-10-20
|
|
|
|
*
|
|
|
|
* This kernel is the only code except the mode transition PIC, that runs in
|
|
|
|
* privileged CPU mode. It has two tasks. First it initializes the process
|
|
|
|
* 'core', enriches it with the whole identically mapped address range,
|
|
|
|
* joins and applies it, assigns one thread to it with a userdefined
|
|
|
|
* entrypoint (the core main thread) and starts this thread in userland.
|
|
|
|
* Afterwards it is called each time an exception occurs in userland to do
|
|
|
|
* a minimum of appropriate exception handling. Thus it holds a CPU context
|
|
|
|
* for itself as for any other thread. But due to the fact that it never
|
|
|
|
* relies on prior kernel runs this context only holds some constant pointers
|
|
|
|
* such as SP and IP.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2013-01-10 21:44:47 +01:00
|
|
|
* Copyright (C) 2011-2013 Genode Labs GmbH
|
2012-05-30 20:13:09 +02:00
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
|
|
* under the terms of the GNU General Public License version 2.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* core includes */
|
2013-09-06 17:37:09 +02:00
|
|
|
#include <kernel/pd.h>
|
2013-10-16 11:47:19 +02:00
|
|
|
#include <kernel/vm.h>
|
2013-02-22 10:30:48 +01:00
|
|
|
#include <platform_pd.h>
|
2012-10-02 14:27:32 +02:00
|
|
|
#include <trustzone.h>
|
2013-09-09 15:20:30 +02:00
|
|
|
#include <timer.h>
|
2013-10-30 13:56:57 +01:00
|
|
|
#include <pic.h>
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-12-04 23:41:52 +01:00
|
|
|
/* base includes */
|
|
|
|
#include <unmanaged_singleton.h>
|
|
|
|
|
2013-05-14 22:40:30 +02:00
|
|
|
/* base-hw includes */
|
2014-04-04 17:36:05 +02:00
|
|
|
#include <kernel/irq.h>
|
2013-09-26 17:03:33 +02:00
|
|
|
#include <kernel/perf_counter.h>
|
2013-05-14 22:40:30 +02:00
|
|
|
|
2012-05-30 20:13:09 +02:00
|
|
|
using namespace Kernel;
|
|
|
|
|
2014-01-28 14:30:36 +01:00
|
|
|
extern Genode::Native_thread_id _main_thread_id;
|
2012-05-30 20:13:09 +02:00
|
|
|
extern "C" void CORE_MAIN();
|
2013-12-17 18:10:02 +01:00
|
|
|
extern void * _start_secondary_processors;
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-12-18 17:07:35 +01:00
|
|
|
Genode::Native_utcb * _main_thread_utcb;
|
|
|
|
|
2012-05-30 20:13:09 +02:00
|
|
|
namespace Kernel
|
|
|
|
{
|
2013-10-30 13:56:57 +01:00
|
|
|
/**
|
|
|
|
* Return interrupt-controller singleton
|
|
|
|
*/
|
2013-12-04 23:41:52 +01:00
|
|
|
Pic * pic() { return unmanaged_singleton<Pic>(); }
|
2013-10-30 13:56:57 +01:00
|
|
|
|
2012-05-30 20:13:09 +02:00
|
|
|
/* import Genode types */
|
2013-11-15 16:56:34 +01:00
|
|
|
typedef Genode::umword_t umword_t;
|
|
|
|
typedef Genode::Core_tlb Core_tlb;
|
|
|
|
typedef Genode::Core_thread_id Core_thread_id;
|
2012-11-30 14:08:42 +01:00
|
|
|
}
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2012-11-30 14:08:42 +01:00
|
|
|
namespace Kernel
|
|
|
|
{
|
2013-12-04 23:41:52 +01:00
|
|
|
Pd_ids * pd_ids() { return unmanaged_singleton<Pd_ids>(); }
|
|
|
|
Thread_ids * thread_ids() { return unmanaged_singleton<Thread_ids>(); }
|
|
|
|
Signal_context_ids * signal_context_ids() { return unmanaged_singleton<Signal_context_ids>(); }
|
|
|
|
Signal_receiver_ids * signal_receiver_ids() { return unmanaged_singleton<Signal_receiver_ids>(); }
|
2013-10-07 14:56:31 +02:00
|
|
|
|
2013-12-04 23:41:52 +01:00
|
|
|
Pd_pool * pd_pool() { return unmanaged_singleton<Pd_pool>(); }
|
|
|
|
Thread_pool * thread_pool() { return unmanaged_singleton<Thread_pool>(); }
|
|
|
|
Signal_context_pool * signal_context_pool() { return unmanaged_singleton<Signal_context_pool>(); }
|
|
|
|
Signal_receiver_pool * signal_receiver_pool() { return unmanaged_singleton<Signal_receiver_pool>(); }
|
2012-05-30 20:13:09 +02:00
|
|
|
|
|
|
|
/**
|
2013-12-17 18:10:02 +01:00
|
|
|
* Return singleton kernel-timer
|
2012-05-30 20:13:09 +02:00
|
|
|
*/
|
2013-12-17 18:10:02 +01:00
|
|
|
Timer * timer()
|
|
|
|
{
|
|
|
|
static Timer _object;
|
|
|
|
return &_object;
|
|
|
|
}
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
/**
|
|
|
|
* Start a new scheduling lap
|
|
|
|
*/
|
2014-03-07 19:31:05 +01:00
|
|
|
void reset_scheduling_time(unsigned const processor_id)
|
2013-09-02 09:39:31 +02:00
|
|
|
{
|
2013-12-17 18:10:02 +01:00
|
|
|
unsigned const tics = timer()->ms_to_tics(USER_LAP_TIME_MS);
|
|
|
|
timer()->start_one_shot(tics, processor_id);
|
2013-09-02 09:39:31 +02:00
|
|
|
}
|
|
|
|
|
2012-05-30 20:13:09 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Static kernel PD that describes core
|
|
|
|
*/
|
2014-03-16 18:25:37 +01:00
|
|
|
Pd * core()
|
2012-11-07 15:12:56 +01:00
|
|
|
{
|
2013-12-05 00:01:48 +01:00
|
|
|
/**
|
|
|
|
* Core protection-domain
|
|
|
|
*/
|
|
|
|
class Core_pd : public Pd
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Constructor
|
|
|
|
*/
|
|
|
|
Core_pd(Tlb * const tlb, Platform_pd * const platform_pd)
|
|
|
|
:
|
|
|
|
Pd(tlb, platform_pd)
|
|
|
|
{ }
|
|
|
|
};
|
2013-05-14 22:40:30 +02:00
|
|
|
constexpr int tlb_align = 1 << Core_tlb::ALIGNM_LOG2;
|
2013-12-05 00:01:48 +01:00
|
|
|
Core_tlb * core_tlb = unmanaged_singleton<Core_tlb, tlb_align>();
|
|
|
|
Core_pd * core_pd = unmanaged_singleton<Core_pd>(core_tlb, nullptr);
|
|
|
|
return core_pd;
|
2012-11-07 15:12:56 +01:00
|
|
|
}
|
2012-05-30 20:13:09 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Get core attributes
|
|
|
|
*/
|
|
|
|
unsigned core_id() { return core()->id(); }
|
2014-04-04 17:36:05 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Return wether an interrupt is private to the kernel
|
|
|
|
*
|
|
|
|
* \param interrupt_id kernel name of the targeted interrupt
|
|
|
|
*/
|
|
|
|
bool private_interrupt(unsigned const interrupt_id)
|
|
|
|
{
|
|
|
|
bool ret = 0;
|
|
|
|
for (unsigned i = 0; i < PROCESSORS; i++) {
|
|
|
|
ret |= interrupt_id == Timer::interrupt_id(i);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2012-11-30 14:08:42 +01:00
|
|
|
}
|
2012-05-30 20:13:09 +02:00
|
|
|
|
|
|
|
|
2012-11-30 14:08:42 +01:00
|
|
|
namespace Kernel
|
|
|
|
{
|
2012-05-30 20:13:09 +02:00
|
|
|
/**
|
|
|
|
* Get attributes of the mode transition region in every PD
|
|
|
|
*/
|
2014-03-15 16:12:09 +01:00
|
|
|
addr_t mode_transition_base() { return mtc()->VIRT_BASE; }
|
|
|
|
size_t mode_transition_size() { return mtc()->SIZE; }
|
2012-05-30 20:13:09 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Get attributes of the kernel objects
|
|
|
|
*/
|
2014-03-15 16:07:30 +01:00
|
|
|
size_t thread_size() { return sizeof(Thread); }
|
|
|
|
size_t pd_size() { return sizeof(Tlb) + sizeof(Pd); }
|
|
|
|
size_t signal_context_size() { return sizeof(Signal_context); }
|
|
|
|
size_t signal_receiver_size() { return sizeof(Signal_receiver); }
|
|
|
|
unsigned pd_alignment_log2() { return Tlb::ALIGNM_LOG2; }
|
|
|
|
size_t vm_size() { return sizeof(Vm); }
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
enum { STACK_SIZE = 64 * 1024 };
|
2012-05-30 20:13:09 +02:00
|
|
|
|
|
|
|
/**
|
2013-12-17 18:10:02 +01:00
|
|
|
* Return lock that guards all kernel data against concurrent access
|
2012-05-30 20:13:09 +02:00
|
|
|
*/
|
2013-12-17 18:10:02 +01:00
|
|
|
Lock & data_lock()
|
|
|
|
{
|
|
|
|
static Lock s;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr_t core_tlb_base;
|
|
|
|
unsigned core_pd_id;
|
2012-05-30 20:13:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2013-12-17 18:10:02 +01:00
|
|
|
* Enable kernel-entry assembly to get an exclusive stack at every processor
|
2012-05-30 20:13:09 +02:00
|
|
|
*/
|
2013-12-17 18:10:02 +01:00
|
|
|
char kernel_stack[PROCESSORS][Kernel::STACK_SIZE] __attribute__((aligned()));
|
|
|
|
unsigned kernel_stack_size = Kernel::STACK_SIZE;
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Setup kernel enviroment before activating secondary processors
|
|
|
|
*/
|
|
|
|
extern "C" void init_kernel_uniprocessor()
|
2012-05-30 20:13:09 +02:00
|
|
|
{
|
2013-12-17 18:10:02 +01:00
|
|
|
/************************************************************************
|
|
|
|
** As atomic operations are broken in physical mode on some platforms **
|
|
|
|
** we must avoid the use of 'cmpxchg' by now (includes not using any **
|
|
|
|
** local static objects. **
|
|
|
|
************************************************************************/
|
|
|
|
|
|
|
|
/* calculate in advance as needed later when data writes aren't allowed */
|
|
|
|
core_tlb_base = core()->tlb()->base();
|
|
|
|
core_pd_id = core_id();
|
|
|
|
|
|
|
|
/* initialize all processor objects */
|
2014-03-03 15:42:03 +01:00
|
|
|
processor_pool();
|
2013-12-17 18:10:02 +01:00
|
|
|
|
|
|
|
/* go multiprocessor mode */
|
2014-03-03 00:12:53 +01:00
|
|
|
Processor::start_secondary_processors(&_start_secondary_processors);
|
2013-12-17 18:10:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Setup kernel enviroment after activating secondary processors
|
|
|
|
*/
|
|
|
|
extern "C" void init_kernel_multiprocessor()
|
|
|
|
{
|
|
|
|
/***********************************************************************
|
|
|
|
** As updates on a cached kernel lock might not be visible to **
|
|
|
|
** processors that have not enabled caches, we can't synchronize the **
|
|
|
|
** activation of MMU and caches. Hence we must avoid write access to **
|
|
|
|
** kernel data by now. **
|
|
|
|
***********************************************************************/
|
|
|
|
|
|
|
|
/* synchronize data view of all processors */
|
2014-04-03 18:22:54 +02:00
|
|
|
Processor::invalidate_data_caches();
|
2014-04-17 12:42:39 +02:00
|
|
|
Processor::invalidate_instr_caches();
|
2014-03-03 00:12:53 +01:00
|
|
|
Processor::invalidate_control_flow_predictions();
|
|
|
|
Processor::data_synchronization_barrier();
|
2013-12-17 18:10:02 +01:00
|
|
|
|
2013-12-18 16:18:16 +01:00
|
|
|
/* initialize processor in physical mode */
|
2014-03-03 00:12:53 +01:00
|
|
|
Processor::init_phys_kernel();
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
/* switch to core address space */
|
2014-03-03 00:12:53 +01:00
|
|
|
Processor::init_virt_kernel(core_tlb_base, core_pd_id);
|
2012-05-30 20:13:09 +02:00
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
/************************************
|
|
|
|
** Now it's safe to use 'cmpxchg' **
|
|
|
|
************************************/
|
2013-08-22 20:51:19 +02:00
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
Lock::Guard guard(data_lock());
|
2012-10-02 14:27:32 +02:00
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
/*******************************************
|
|
|
|
** Now it's save to write to kernel data **
|
|
|
|
*******************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TrustZone initialization code
|
|
|
|
*
|
|
|
|
* FIXME This is a plattform specific feature
|
|
|
|
*/
|
|
|
|
init_trustzone(pic());
|
2013-09-26 17:03:33 +02:00
|
|
|
|
2013-12-18 16:18:16 +01:00
|
|
|
/*
|
2013-12-17 18:10:02 +01:00
|
|
|
* Enable performance counter
|
|
|
|
*
|
|
|
|
* FIXME This is an optional processor specific feature
|
2013-12-18 16:18:16 +01:00
|
|
|
*/
|
2013-12-17 18:10:02 +01:00
|
|
|
perf_counter()->enable();
|
|
|
|
|
|
|
|
/* initialize interrupt controller */
|
|
|
|
pic()->init_processor_local();
|
2014-03-06 13:55:56 +01:00
|
|
|
unsigned const processor_id = Processor::executing_id();
|
2013-12-17 18:10:02 +01:00
|
|
|
pic()->unmask(Timer::interrupt_id(processor_id), processor_id);
|
2013-05-14 22:40:30 +02:00
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
/* as primary processor create the core main thread */
|
2014-03-03 00:12:53 +01:00
|
|
|
if (Processor::primary_id() == processor_id)
|
2013-12-18 16:18:16 +01:00
|
|
|
{
|
|
|
|
/* get stack memory that fullfills the constraints for core stacks */
|
|
|
|
enum {
|
|
|
|
STACK_ALIGNM = 1 << Genode::CORE_STACK_ALIGNM_LOG2,
|
|
|
|
STACK_SIZE = DEFAULT_STACK_SIZE,
|
|
|
|
};
|
2014-03-12 16:23:01 +01:00
|
|
|
static_assert(STACK_SIZE <= STACK_ALIGNM - sizeof(Core_thread_id),
|
|
|
|
"stack size does not fit stack alignment of core");
|
2013-12-18 16:18:16 +01:00
|
|
|
static char s[STACK_SIZE] __attribute__((aligned(STACK_ALIGNM)));
|
|
|
|
|
|
|
|
/* provide thread ident at the aligned base of the stack */
|
|
|
|
*(Core_thread_id *)s = 0;
|
|
|
|
|
|
|
|
/* start thread with stack pointer at the top of stack */
|
|
|
|
static Native_utcb utcb;
|
|
|
|
static Thread t(Priority::MAX, "core");
|
|
|
|
_main_thread_id = t.id();
|
|
|
|
_main_thread_utcb = &utcb;
|
|
|
|
_main_thread_utcb->start_info()->init(t.id(), Genode::Native_capability());
|
|
|
|
t.ip = (addr_t)CORE_MAIN;;
|
|
|
|
t.sp = (addr_t)s + STACK_SIZE;
|
2014-03-16 18:25:37 +01:00
|
|
|
t.init(processor_pool()->processor(processor_id), core(), &utcb, 1);
|
2013-12-17 18:10:02 +01:00
|
|
|
|
2014-04-04 17:36:05 +02:00
|
|
|
/* initialize interrupt objects */
|
|
|
|
static Genode::uint8_t _irqs[Pic::MAX_INTERRUPT_ID * sizeof(Irq)];
|
|
|
|
for (unsigned i = 0; i < Pic::MAX_INTERRUPT_ID; i++) {
|
|
|
|
if (private_interrupt(i)) { continue; }
|
|
|
|
new (&_irqs[i * sizeof(Irq)]) Irq(i);
|
|
|
|
}
|
2013-12-17 18:10:02 +01:00
|
|
|
/* kernel initialization finished */
|
2014-03-12 13:34:53 +01:00
|
|
|
Genode::printf("kernel initialized\n");
|
2012-05-30 20:13:09 +02:00
|
|
|
}
|
2014-03-07 19:31:05 +01:00
|
|
|
reset_scheduling_time(processor_id);
|
2013-12-18 16:18:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Main routine of every kernel pass
|
|
|
|
*/
|
|
|
|
extern "C" void kernel()
|
|
|
|
{
|
2014-03-06 14:21:13 +01:00
|
|
|
/* ensure that no other processor accesses kernel data while we do */
|
2013-12-17 18:10:02 +01:00
|
|
|
data_lock().lock();
|
2014-03-06 14:21:13 +01:00
|
|
|
|
|
|
|
/* determine local processor scheduler */
|
2014-03-06 13:55:56 +01:00
|
|
|
unsigned const processor_id = Processor::executing_id();
|
2014-03-11 01:21:56 +01:00
|
|
|
Processor * const processor = processor_pool()->processor(processor_id);
|
2013-12-17 18:10:02 +01:00
|
|
|
Processor_scheduler * const scheduler = processor->scheduler();
|
2014-03-06 14:21:13 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Request the current processor occupant without any update. While this
|
|
|
|
* processor was outside the kernel, another processor may have changed the
|
|
|
|
* scheduling of the local activities in a way that an update would return
|
|
|
|
* an occupant other than that whose exception caused the kernel entry.
|
|
|
|
*/
|
2014-03-10 22:22:50 +01:00
|
|
|
Processor_client * const old_occupant = scheduler->occupant();
|
2014-03-07 19:31:05 +01:00
|
|
|
old_occupant->exception(processor_id);
|
2014-03-06 14:21:13 +01:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The processor local as well as remote exception-handling may have
|
|
|
|
* changed the scheduling of the local activities. Hence we must update the
|
|
|
|
* processor occupant.
|
|
|
|
*/
|
2014-03-10 22:22:50 +01:00
|
|
|
Processor_client * const new_occupant = scheduler->update_occupant();
|
2014-03-07 19:31:05 +01:00
|
|
|
if (old_occupant != new_occupant) { reset_scheduling_time(processor_id); }
|
|
|
|
new_occupant->proceed(processor_id);
|
2012-05-30 20:13:09 +02:00
|
|
|
}
|
|
|
|
|
2013-12-18 16:18:16 +01:00
|
|
|
|
2013-10-16 12:30:10 +02:00
|
|
|
Kernel::Mode_transition_control * Kernel::mtc()
|
2013-09-06 17:37:09 +02:00
|
|
|
{
|
2013-12-17 18:10:02 +01:00
|
|
|
/* create singleton processor context for kernel */
|
|
|
|
Cpu_context * const cpu_context = unmanaged_singleton<Cpu_context>();
|
2013-09-06 17:37:09 +02:00
|
|
|
|
|
|
|
/* initialize mode transition page */
|
2013-12-17 18:10:02 +01:00
|
|
|
return unmanaged_singleton<Mode_transition_control>(cpu_context);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Kernel::Cpu_context::Cpu_context()
|
|
|
|
{
|
|
|
|
_init(STACK_SIZE);
|
|
|
|
sp = (addr_t)kernel_stack;
|
|
|
|
ip = (addr_t)kernel;
|
|
|
|
core()->admit(this);
|
2013-09-06 17:37:09 +02:00
|
|
|
}
|