hw & arm_v7: mode transition via transit ttbr0

Previously, we did the protection-domain switches without a transitional
translation table that contains only global mappings. This was fine as long
as the CPU did no speculative memory accesses. However, to enabling branch
prediction triggers such accesses. Thus, if we don't want to invalidate
predictors on every context switch, we need to switch more carefully.

ref #474
This commit is contained in:
Martin Stein 2014-07-28 16:55:47 +02:00 committed by Norman Feske
parent 03cd76821c
commit 9da42dde2f
17 changed files with 468 additions and 460 deletions

View File

@ -9,6 +9,7 @@ INC_DIR += $(REP_DIR)/src/core/include/spec/arm_v6
# add C++ sources # add C++ sources
SRC_CC += cpu.cc SRC_CC += cpu.cc
SRC_CC += spec/arm_v6/cpu.cc
# add assembly sources # add assembly sources
SRC_S += spec/arm_v6/mode_transition.s SRC_S += spec/arm_v6/mode_transition.s

View File

@ -7,6 +7,9 @@
# add include paths # add include paths
INC_DIR += $(REP_DIR)/src/core/include/spec/arm_v7 INC_DIR += $(REP_DIR)/src/core/include/spec/arm_v7
# add C++ sources
SRC_CC += spec/arm_v7/cpu.cc
# add assembly sources # add assembly sources
SRC_S += spec/arm_v7/mode_transition.s SRC_S += spec/arm_v7/mode_transition.s

View File

@ -53,6 +53,7 @@ SRC_CC += kernel/thread.cc
SRC_CC += kernel/vm.cc SRC_CC += kernel/vm.cc
SRC_CC += kernel/signal_receiver.cc SRC_CC += kernel/signal_receiver.cc
SRC_CC += kernel/irq.cc SRC_CC += kernel/irq.cc
SRC_CC += kernel/pd.cc
SRC_CC += kernel/processor.cc SRC_CC += kernel/processor.cc
SRC_CC += kernel/processor_pool.cc SRC_CC += kernel/processor_pool.cc

View File

@ -0,0 +1,82 @@
/**
* \brief Tools for early translation tables
* \author Stefan Kalkowski
* \author Martin Stein
* \date 2014-08-05
*/
/*
* Copyright (C) 2012-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _KERNEL__EARLY_TRANSLATIONS_H_
#define _KERNEL__EARLY_TRANSLATIONS_H_
/* core includes */
#include <page_slab.h>
#include <translation_table.h>
namespace Genode
{
/**
* Dummy back-end allocator for early translation tables
*/
class Early_translations_allocator;
/**
* Aligned slab for early translation tables
*/
class Early_translations_slab;
}
namespace Kernel
{
using Genode::Early_translations_allocator;
using Genode::Early_translations_slab;
}
class Genode::Early_translations_allocator : public Genode::Core_mem_translator
{
public:
Early_translations_allocator() { }
int add_range(addr_t base, size_t size) { return -1; }
int remove_range(addr_t base, size_t size) { return -1; }
Alloc_return alloc_aligned(size_t size, void **out_addr, int align) {
return Alloc_return::RANGE_CONFLICT; }
Alloc_return alloc_addr(size_t size, addr_t addr) {
return Alloc_return::RANGE_CONFLICT; }
void free(void *addr) {}
size_t avail() { return 0; }
bool valid_addr(addr_t addr) { return false; }
bool alloc(size_t size, void **out_addr) { return false; }
void free(void *addr, size_t) { }
size_t overhead(size_t size) { return 0; }
bool need_size_for_free() const override { return false; }
void * phys_addr(void * addr) { return addr; }
void * virt_addr(void * addr) { return addr; }
};
class Genode::Early_translations_slab : public Genode::Page_slab
{
public:
typedef Genode::Core_mem_translator Allocator;
enum {
ALIGN_LOG2 = Genode::Translation_table::ALIGNM_LOG2,
ALIGN = 1 << ALIGN_LOG2,
};
/**
* Constructor
*/
Early_translations_slab(Allocator * const alloc) : Page_slab(alloc) {
assert(Genode::aligned(this, ALIGN_LOG2)); }
} __attribute__((aligned(Early_translations_slab::ALIGN)));
#endif /* _KERNEL__EARLY_TRANSLATIONS_H_ */

View File

@ -19,12 +19,12 @@
#include <cpu/atomic.h> #include <cpu/atomic.h>
/* core includes */ /* core includes */
#include <kernel/early_translations.h>
#include <kernel/configuration.h> #include <kernel/configuration.h>
#include <kernel/object.h> #include <kernel/object.h>
#include <kernel/processor.h> #include <kernel/processor.h>
#include <translation_table.h> #include <translation_table.h>
#include <assert.h> #include <assert.h>
#include <page_slab.h>
/* structure of the mode transition */ /* structure of the mode transition */
extern int _mt_begin; extern int _mt_begin;
@ -80,6 +80,11 @@ class Kernel::Lock
namespace Kernel namespace Kernel
{ {
/**
* Processor context of the kernel
*/
class Cpu_context;
/** /**
* Controls the mode-transition page * Controls the mode-transition page
* *
@ -90,7 +95,7 @@ namespace Kernel
* control provides a simple interface to access the code from within * control provides a simple interface to access the code from within
* the kernel. * the kernel.
*/ */
struct Mode_transition_control; class Mode_transition_control;
/** /**
* Return the system wide mode-transition control * Return the system wide mode-transition control
@ -111,16 +116,69 @@ namespace Kernel
Lock & data_lock(); Lock & data_lock();
} }
class Kernel::Cpu_context : Cpu::Context
{
private:
/**
* Hook for environment specific initializations
*
* \param stack_size size of kernel stack
* \param table base of transit translation table
*/
void _init(size_t const stack_size, addr_t const table);
public:
/**
* Constructor
*
* \param table mode-transition table
*/
Cpu_context(Genode::Translation_table * const table);
};
class Kernel::Mode_transition_control class Kernel::Mode_transition_control
{ {
friend class Pd; friend class Pd;
private: private:
typedef Genode::Cpu_state_modes Cpu_state_modes; typedef Early_translations_allocator Allocator;
typedef Genode::Page_flags Page_flags; typedef Early_translations_slab Slab;
typedef Genode::Translation_table Table;
typedef Genode::Cpu_state_modes Cpu_state_modes;
typedef Genode::Page_flags Page_flags;
addr_t const _virt_user_entry; Allocator _allocator;
Slab _slab;
Table _table;
Cpu_context _master;
/**
* Return size of the mode transition
*/
static size_t _size() { return (addr_t)&_mt_end - (addr_t)&_mt_begin; }
/**
* Return size of master-context space in the mode transition
*/
static size_t _master_context_size()
{
addr_t const begin = (addr_t)&_mt_master_context_begin;
addr_t const end = (addr_t)&_mt_master_context_end;
return end - begin;
}
/**
* Return virtual address of the user entry-code
*/
static addr_t _virt_user_entry()
{
addr_t const phys = (addr_t)&_mt_user_entry_pic;
addr_t const phys_base = (addr_t)&_mt_begin;
return VIRT_BASE + (phys - phys_base);
}
/** /**
* Continue execution of client context * Continue execution of client context
@ -150,11 +208,11 @@ class Kernel::Mode_transition_control
public: public:
enum { enum {
SIZE_LOG2 = Genode::Translation_table::MIN_PAGE_SIZE_LOG2, SIZE_LOG2 = Genode::Translation_table::MIN_PAGE_SIZE_LOG2,
SIZE = 1 << SIZE_LOG2, SIZE = 1 << SIZE_LOG2,
VIRT_BASE = Processor::EXCEPTION_ENTRY, VIRT_BASE = Processor::EXCEPTION_ENTRY,
VIRT_END = VIRT_BASE + SIZE, ALIGN_LOG2 = Genode::Translation_table::ALIGNM_LOG2,
ALIGNM_LOG2 = SIZE_LOG2, ALIGN = 1 << ALIGN_LOG2,
}; };
/** /**
@ -162,27 +220,7 @@ class Kernel::Mode_transition_control
* *
* \param c CPU context for kernel mode entry * \param c CPU context for kernel mode entry
*/ */
Mode_transition_control(Processor::Context * const c) Mode_transition_control();
:
_virt_user_entry(VIRT_BASE + ((addr_t)&_mt_user_entry_pic -
(addr_t)&_mt_begin))
{
/* check if mode transition fits into aligned region */
addr_t const mt_begin = (addr_t)&_mt_begin;
addr_t const mt_end = (addr_t)&_mt_end;
size_t const mt_size = mt_end - mt_begin;
assert(mt_size <= SIZE);
/* check if kernel context fits into the mode transition */
addr_t const kc_begin = (addr_t)&_mt_master_context_begin;
addr_t const kc_end = (addr_t)&_mt_master_context_end;
size_t const kc_size = kc_end - kc_begin;
assert(sizeof(Processor::Context) <= kc_size);
/* fetch kernel-mode context */
Genode::memcpy(&_mt_master_context_begin, c,
sizeof(Processor::Context));
}
/** /**
* Map the mode transition page to a virtual address space * Map the mode transition page to a virtual address space
@ -210,7 +248,7 @@ class Kernel::Mode_transition_control
void continue_user(Processor::Context * const context, void continue_user(Processor::Context * const context,
unsigned const processor_id) unsigned const processor_id)
{ {
_continue_client(context, processor_id, _virt_user_entry); _continue_client(context, processor_id, _virt_user_entry());
} }
/** /**
@ -224,20 +262,22 @@ class Kernel::Mode_transition_control
{ {
_continue_client(context, processor_id, (addr_t)&_mt_vm_entry_pic); _continue_client(context, processor_id, (addr_t)&_mt_vm_entry_pic);
} }
};
} __attribute__((aligned(Mode_transition_control::ALIGN)));
class Kernel::Pd : public Object<Pd, MAX_PDS, Pd_ids, pd_ids, pd_pool> class Kernel::Pd : public Object<Pd, MAX_PDS, Pd_ids, pd_ids, pd_pool>
{ {
public:
typedef Genode::Translation_table Table;
private: private:
Genode::Translation_table * const _tt; Table * const _table;
Platform_pd * const _platform_pd; Platform_pd * const _platform_pd;
/* keep ready memory for size-aligned extra costs at construction */ /* keep ready memory for size-aligned extra costs at construction */
enum { enum { EXTRA_RAM_SIZE = 2 * Table::MAX_COSTS_PER_TRANSLATION };
EXTRA_RAM_SIZE = 2 * Genode::Translation_table::MAX_COSTS_PER_TRANSLATION
};
char _extra_ram[EXTRA_RAM_SIZE]; char _extra_ram[EXTRA_RAM_SIZE];
public: public:
@ -245,17 +285,11 @@ class Kernel::Pd : public Object<Pd, MAX_PDS, Pd_ids, pd_ids, pd_pool>
/** /**
* Constructor * Constructor
* *
* \param tt translation lookaside buffer of the PD * \param table translation table of the PD
* \param platform_pd core object of the PD * \param platform_pd core object of the PD
*/ */
Pd(Genode::Translation_table * const tt, Pd(Table * const table, Platform_pd * const platform_pd)
Platform_pd * const platform_pd) : _table(table), _platform_pd(platform_pd) { }
: _tt(tt), _platform_pd(platform_pd) { }
/**
* Destructor
*/
~Pd() { }
/** /**
* Let the CPU context 'c' join the PD * Let the CPU context 'c' join the PD
@ -272,9 +306,7 @@ class Kernel::Pd : public Object<Pd, MAX_PDS, Pd_ids, pd_ids, pd_pool>
***************/ ***************/
Platform_pd * platform_pd() const { return _platform_pd; } Platform_pd * platform_pd() const { return _platform_pd; }
Table * translation_table() const { return _table; }
Genode::Translation_table * translation_table() const {
return _tt; }
}; };
#endif /* _KERNEL__PD_H_ */ #endif /* _KERNEL__PD_H_ */

View File

@ -38,34 +38,8 @@ namespace Kernel
Thread_ids * thread_ids(); Thread_ids * thread_ids();
Thread_pool * thread_pool(); Thread_pool * thread_pool();
/**
* Processor context of the kernel
*/
class Cpu_context;
} }
struct Kernel::Cpu_context : Cpu::Context
{
private:
/**
* Hook for environment specific initializations
*
* \param stack_size size of kernel stack
*/
void _init(size_t const stack_size);
public:
/**
* Constructor
*/
Cpu_context();
};
class Kernel::Thread class Kernel::Thread
: :
public Cpu::User_context, public Cpu::User_context,

View File

@ -80,70 +80,10 @@ class Genode::Arm
*/ */
struct Sctlr : Register<32> struct Sctlr : Register<32>
{ {
struct M : Bitfield<0,1> { }; /* enable MMU */ struct M : Bitfield<0,1> { }; /* enable MMU */
struct A : Bitfield<1,1> { }; /* strict data addr. alignment on */ struct C : Bitfield<2,1> { }; /* enable data cache */
struct C : Bitfield<2,1> { }; /* enable data cache */
struct Z : Bitfield<11,1> { }; /* enable program flow prediction */
struct I : Bitfield<12,1> { }; /* enable instruction caches */ struct I : Bitfield<12,1> { }; /* enable instruction caches */
struct V : Bitfield<13,1> { }; /* select exception entry */
/*
* These must be set all ones
*/
struct Static1 : Bitfield<3,4> { };
struct Static2 : Bitfield<16,1> { };
struct Static3 : Bitfield<18,1> { };
struct Static4 : Bitfield<22,2> { };
struct V : Bitfield<13,1> /* select exception-entry base */
{
enum { XFFFF0000 = 1 };
};
struct Rr : Bitfield<14,1> /* replacement strategy */
{
enum { RANDOM = 0 };
};
struct Fi : Bitfield<21,1> { }; /* enable fast IRQ config */
struct Ve : Bitfield<24,1> /* interrupt vector config */
{
enum { FIXED = 0 };
};
struct Ee : Bitfield<25,1> { }; /* raise CPSR.E on exceptions */
/**
* Common bitfield values for all modes
*/
static access_t common()
{
return Static1::bits(~0) |
Static2::bits(~0) |
Static3::bits(~0) |
Static4::bits(~0) |
A::bits(0) |
C::bits(1) |
Z::bits(0) |
I::bits(1) |
V::bits(V::XFFFF0000) |
Rr::bits(Rr::RANDOM) |
Fi::bits(0) |
Ve::bits(Ve::FIXED) |
Ee::bits(0);
}
/**
* Value for the switch to virtual mode in kernel
*/
static access_t init_virt_kernel() {
return common() | M::bits(1); }
/**
* Value for the initial kernel entry
*/
static access_t init_phys_kernel() {
return common() | M::bits(0); }
/** /**
* Read register value * Read register value
@ -151,7 +91,7 @@ class Genode::Arm
static access_t read() static access_t read()
{ {
access_t v; access_t v;
asm volatile ("mrc p15, 0, %[v], c1, c0, 0" : [v]"=r"(v) :: ); asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (v) :: );
return v; return v;
} }
@ -159,7 +99,22 @@ class Genode::Arm
* Write register value * Write register value
*/ */
static void write(access_t const v) { static void write(access_t const v) {
asm volatile ("mcr p15, 0, %[v], c1, c0, 0" :: [v]"r"(v) : ); } asm volatile ("mcr p15, 0, %0, c1, c0, 0" :: "r" (v) : ); }
/**
* Initialization that is common
*/
static void init_common(access_t & v)
{
C::set(v, 1);
I::set(v, 1);
V::set(v, 1);
}
/**
* Initialization for virtual kernel stage
*/
static void init_virt_kernel(access_t & v) { M::set(v, 1); }
}; };
/** /**
@ -196,15 +151,7 @@ class Genode::Arm
*/ */
struct Ttbr0 : Register<32> struct Ttbr0 : Register<32>
{ {
struct S : Bitfield<1,1> { }; /* shareable */ struct Ba : Bitfield<14-TTBCR_N, 18+TTBCR_N> { };
struct Rgn : Bitfield<3, 2> /* outer cachable attributes */
{
enum { NON_CACHEABLE = 0, CACHEABLE = 1 };
};
struct Ba : Bitfield<14-TTBCR_N, 18+TTBCR_N> { }; /* translation
* table base */
/** /**
* Write register, only in privileged CPU mode * Write register, only in privileged CPU mode
@ -221,18 +168,6 @@ class Genode::Arm
asm volatile ("mrc p15, 0, %[v], c2, c0, 0" : [v]"=r"(v) :: ); asm volatile ("mrc p15, 0, %[v], c2, c0, 0" : [v]"=r"(v) :: );
return v; return v;
} }
/**
* Value for the switch to virtual mode in kernel
*
* \param sect_table pointer to initial section table
*/
static access_t init_virt_kernel(addr_t const sect_table)
{
return S::bits(0) |
Rgn::bits(Rgn::CACHEABLE) |
Ba::masked((addr_t)sect_table);
}
}; };
/** /**
@ -512,24 +447,19 @@ class Genode::Arm
*/ */
struct Context : Cpu_state struct Context : Cpu_state
{ {
/********************************************************** Cidr::access_t cidr;
** The offset and width of any of these classmembers is ** Ttbr0::access_t ttbr0;
** silently expected to be this way by several assembly **
** files. So take care if you attempt to change them. **
**********************************************************/
uint32_t cidr; /* context ID register backup */
uint32_t t_table; /* base address of applied translation table */
/** /**
* Get base of assigned translation lookaside buffer * Return base of assigned translation table
*/ */
addr_t translation_table() const { return t_table; } addr_t translation_table() const {
return Ttbr0::Ba::masked(ttbr0); }
/** /**
* Assign translation lookaside buffer * Assign translation-table base 'table'
*/ */
void translation_table(addr_t const tt) { t_table = tt; } void translation_table(addr_t const table);
/** /**
* Assign protection domain * Assign protection domain
@ -571,13 +501,13 @@ class Genode::Arm
/** /**
* Initialize thread context * Initialize thread context
* *
* \param tt physical base of appropriate translation table * \param table physical base of appropriate translation table
* \param pd_id kernel name of appropriate protection domain * \param pd_id kernel name of appropriate protection domain
*/ */
void init_thread(addr_t const tt, unsigned const pd_id) void init_thread(addr_t const table, unsigned const pd_id)
{ {
cidr = pd_id; protection_domain(pd_id);
t_table = tt; translation_table(table);
} }
/** /**

View File

@ -39,8 +39,9 @@
.set PC_OFFSET, 15 * 4 .set PC_OFFSET, 15 * 4
.set PSR_OFFSET, 16 * 4 .set PSR_OFFSET, 16 * 4
.set EXCEPTION_TYPE_OFFSET, 17 * 4 .set EXCEPTION_TYPE_OFFSET, 17 * 4
.set CONTEXTIDR_OFFSET, 18 * 4 .set TRANSIT_TTBR0_OFFSET, 17 * 4
.set SECTION_TABLE_OFFSET, 19 * 4 .set CIDR_OFFSET, 18 * 4
.set TTBR0_OFFSET, 19 * 4
/* size of local variables */ /* size of local variables */
.set CONTEXT_PTR_SIZE, 1 * 4 .set CONTEXT_PTR_SIZE, 1 * 4

View File

@ -56,91 +56,63 @@ class Genode::Cpu : public Arm
*/ */
struct Sctlr : Arm::Sctlr struct Sctlr : Arm::Sctlr
{ {
struct W : Bitfield<3,1> { }; /* enable write buffer */ struct W : Bitfield<3,1> { }; /* enable write buffer */
struct Unused_0 : Bitfield<4,3> { }; /* shall be ones */
struct B : Bitfield<7,1> /* Memory system endianess */
{
enum { LITTLE = 0 };
};
struct S : Bitfield<8,1> { }; /* enable MMU protection */
struct R : Bitfield<9,1> { }; /* enable ROM protection */
struct L4 : Bitfield<15,1> { }; /* raise T bit on LOAD-to-PC */
struct Dt : Bitfield<16,1> { }; /* global data TCM enable */ struct Dt : Bitfield<16,1> { }; /* global data TCM enable */
struct It : Bitfield<18,1> { }; /* global instruction TCM enable */ struct It : Bitfield<18,1> { }; /* global instruction TCM enable */
struct U : Bitfield<22,1> { }; /* enable unaligned data access */ struct U : Bitfield<22,1> { }; /* enable unaligned data access */
struct Xp : Bitfield<23,1> { }; /* disable subpage AP bits */ struct Xp : Bitfield<23,1> { }; /* disable subpage AP bits */
struct Unnamed_0 : Bitfield<4,3> { }; /* shall be ones */
struct Unused_1 : Bitfield<26,6> { }; /* shall not be modified */ struct Unnamed_1 : Bitfield<26,6> { }; /* shall not be modified */
/** /**
* Get static base value for writes * Initialization that is common
*/ */
static access_t base_value() { static void init_common(access_t & v)
return Unused_0::reg_mask() | Unused_1::masked(read()); }
/**
* Value for the switch to virtual mode in kernel
*/
static access_t init_virt_kernel()
{ {
return base_value() | Arm::Sctlr::init_common(v);
Arm::Sctlr::init_virt_kernel() | W::set(v, 1);
W::bits(0) | Dt::set(v, 1);
B::bits(B::LITTLE) | It::set(v, 1);
S::bits(0) | U::set(v, 1);
R::bits(0) | Xp::set(v, 1);
L4::bits(0) | Unnamed_0::set(v, ~0);
Dt::bits(0) | Unnamed_1::set(v, Unnamed_1::masked(read()));
It::bits(0) |
U::bits(0) |
Xp::bits(1);
} }
/** /**
* Value for the initial kernel entry * Initialization for physical kernel stage
*/
static access_t init_virt_kernel()
{
access_t v = 0;
init_common(v);
Arm::Sctlr::init_virt_kernel(v);
return v;
}
/**
* Initialization for physical kernel stage
*/ */
static access_t init_phys_kernel() static access_t init_phys_kernel()
{ {
return base_value() | access_t v = 0;
Arm::Sctlr::init_phys_kernel() | init_common(v);
W::bits(0) | return v;
B::bits(B::LITTLE) |
S::bits(0) |
R::bits(0) |
L4::bits(0) |
Dt::bits(1) |
It::bits(1) |
U::bits(0) |
Xp::bits(1);
} }
}; };
/** /**
* Translation table base control register 0 * Translation table base register 0
*/ */
struct Ttbr0 : Arm::Ttbr0 struct Ttbr0 : Arm::Ttbr0
{ {
struct C : Bitfield<0,1> /* inner cachable mode */
{
enum { NON_CACHEABLE = 0 };
};
struct P : Bitfield<2,1> { }; /* memory controller ECC enabled */
/** /**
* Value for the switch to virtual mode in kernel * Return initialized value
* *
* \param section_table initial section table * \param table base of targeted translation table
*/ */
static access_t init_virt_kernel(addr_t const sect_table) static access_t init(addr_t const table) {
{ return Ba::masked(table); }
return Arm::Ttbr0::init_virt_kernel(sect_table) |
P::bits(0) |
C::bits(C::NON_CACHEABLE);
}
}; };
/** /**
@ -165,16 +137,15 @@ class Genode::Cpu : public Arm
/** /**
* Switch to the virtual mode in kernel * Switch to the virtual mode in kernel
* *
* \param section_table section translation table of the initial * \param table base of targeted translation table
* address space this function switches to * \param process_id process ID of the initial address space
* \param process_id process ID of the initial address space
*/ */
static void init_virt_kernel(addr_t const section_table, static void
unsigned const process_id) init_virt_kernel(addr_t const table, unsigned const process_id)
{ {
Cidr::write(process_id); Cidr::write(process_id);
Dacr::write(Dacr::init_virt_kernel()); Dacr::write(Dacr::init_virt_kernel());
Ttbr0::write(Ttbr0::init_virt_kernel(section_table)); Ttbr0::write(Ttbr0::init(table));
Ttbcr::write(Ttbcr::init_virt_kernel()); Ttbcr::write(Ttbcr::init_virt_kernel());
Sctlr::write(Sctlr::init_virt_kernel()); Sctlr::write(Sctlr::init_virt_kernel());
} }

View File

@ -174,89 +174,6 @@ class Genode::Arm_v7 : public Arm
struct Cpnsae11 : Bitfield<11, 1> { }; struct Cpnsae11 : Bitfield<11, 1> { };
}; };
/**
* System control register
*/
struct Sctlr : Arm::Sctlr
{
struct Unused_0 : Bitfield<3,4> { }; /* shall be ~0 */
struct Sw : Bitfield<10,1> { }; /* support SWP and SWPB */
struct Unused_1 : Bitfield<16,1> { }; /* shall be ~0 */
struct Ha : Bitfield<17,1> { }; /* enable HW access flag */
struct Unused_2 : Bitfield<18,1> { }; /* shall be ~0 */
struct Unused_3 : Bitfield<22,2> { }; /* shall be ~0 */
struct Nmfi : Bitfield<27,1> { }; /* FIQs are non-maskable */
struct Tre : Bitfield<28,1> { }; /* remap TEX[2:1] for OS */
struct Afe : Bitfield<29,1> /* translation access perm. mode */
{
enum { FULL_RANGE_OF_PERMISSIONS = 0 };
};
struct Te : Bitfield<30,1> { }; /* do exceptions in Thumb state */
/**
* Static base value
*/
static access_t base_value()
{
return Unused_0::bits(~0) |
Unused_1::bits(~0) |
Unused_2::bits(~0) |
Unused_3::bits(~0);
}
/**
* Value for the first kernel run
*/
static access_t init_phys_kernel()
{
return base_value() |
Arm::Sctlr::init_phys_kernel() |
Sw::bits(0) |
Ha::bits(0) |
Nmfi::bits(0) |
Tre::bits(0);
}
/**
* Value for the switch to virtual mode in kernel
*/
static access_t init_virt_kernel()
{
return base_value() |
Arm::Sctlr::init_virt_kernel() |
Sw::bits(0) |
Ha::bits(0) |
Nmfi::bits(0) |
Tre::bits(0);
}
};
/**
* Translation table base register 0
*/
struct Ttbr0 : Arm::Ttbr0
{
struct Nos : Bitfield<5,1> { }; /* not outer shareable */
struct Irgn_1 : Bitfield<0,1> { }; /* inner cachable mode */
struct Irgn_0 : Bitfield<6,1> { }; /* inner cachable mode */
/**
* Value for the switch to virtual mode in kernel
*
* \param sect_table pointer to initial section table
*/
static access_t init_virt_kernel(addr_t const sect_table)
{
return Arm::Ttbr0::init_virt_kernel(sect_table) |
Nos::bits(0) |
Irgn_1::bits(0) |
Irgn_0::bits(1);
}
};
/** /**
* Translation table base control register * Translation table base control register
*/ */
@ -276,21 +193,89 @@ class Genode::Arm_v7 : public Arm
} }
}; };
/**
* System control register
*/
struct Sctlr : Arm::Sctlr
{
struct Z : Bitfield<11,1> { }; /* enable program flow prediction */
struct Unnamed_0 : Bitfield<3,4> { }; /* shall be ones */
struct Unnamed_1 : Bitfield<16,1> { }; /* shall be ones */
struct Unnamed_2 : Bitfield<18,1> { }; /* shall be ones */
struct Unnamed_3 : Bitfield<22,2> { }; /* shall be ones */
/**
* Initialization that is common
*/
static void init_common(access_t & v)
{
Arm::Sctlr::init_common(v);
Unnamed_0::set(v, ~0);
Unnamed_1::set(v, ~0);
Unnamed_2::set(v, ~0);
Unnamed_3::set(v, ~0);
}
/**
* Initialization for virtual kernel stage
*/
static access_t init_virt_kernel()
{
access_t v = 0;
init_common(v);
Arm::Sctlr::init_virt_kernel(v);
return v;
}
/**
* Initialization for physical kernel stage
*/
static access_t init_phys_kernel()
{
access_t v = 0;
init_common(v);
return v;
}
};
public: public:
/**
* Translation table base register 0
*/
struct Ttbr0 : Arm::Ttbr0
{
struct Irgn_1 : Bitfield<0, 1> { }; /* inner cache attr */
struct Rgn : Bitfield<3, 2> { }; /* outer cache attr */
struct Irgn_0 : Bitfield<6, 1> { }; /* inner cache attr */
struct Irgn : Bitset_2<Irgn_0, Irgn_1> { }; /* inner cache attr */
/**
* Return initialized value
*
* \param table base of targeted translation table
*/
static access_t init(addr_t const table)
{
access_t v = Ba::masked(table);
Irgn::set(v, 1);
Rgn::set(v, 1);
return v;
}
};
/** /**
* Switch to the virtual mode in kernel * Switch to the virtual mode in kernel
* *
* \param section_table section translation table of the initial * \param table base of targeted translation table
* address space this function switches to * \param process_id process ID of the kernel address-space
* \param process_id process ID of the initial address space
*/ */
static void init_virt_kernel(addr_t const section_table, static void
unsigned const process_id) init_virt_kernel(addr_t const table, unsigned const process_id)
{ {
Cidr::write(process_id); Cidr::write(process_id);
Dacr::write(Dacr::init_virt_kernel()); Dacr::write(Dacr::init_virt_kernel());
Ttbr0::write(Ttbr0::init_virt_kernel(section_table)); Ttbr0::write(Ttbr0::init(table));
Ttbcr::write(Ttbcr::init_virt_kernel()); Ttbcr::write(Ttbcr::init_virt_kernel());
Sctlr::write(Sctlr::init_virt_kernel()); Sctlr::write(Sctlr::init_virt_kernel());
} }

View File

@ -33,7 +33,6 @@
#include <map_local.h> #include <map_local.h>
/* base includes */ /* base includes */
#include <base/allocator_avl.h>
#include <unmanaged_singleton.h> #include <unmanaged_singleton.h>
#include <base/native_types.h> #include <base/native_types.h>
@ -79,33 +78,11 @@ namespace Kernel
*/ */
Pd * core_pd() Pd * core_pd()
{ {
using Ttable = Genode::Translation_table; typedef Early_translations_slab Slab;
constexpr int tt_align = 1 << Ttable::ALIGNM_LOG2; typedef Early_translations_allocator Allocator;
typedef Genode::Translation_table Table;
/** constexpr addr_t table_align = 1 << Table::ALIGNM_LOG2;
* Dummy page slab backend allocator for bootstrapping only
*/
struct Simple_allocator : Genode::Core_mem_translator
{
Simple_allocator() { }
int add_range(addr_t base, size_t size) { return -1; }
int remove_range(addr_t base, size_t size) { return -1; }
Alloc_return alloc_aligned(size_t size, void **out_addr, int align) {
return Alloc_return::RANGE_CONFLICT; }
Alloc_return alloc_addr(size_t size, addr_t addr) {
return Alloc_return::RANGE_CONFLICT; }
void free(void *addr) {}
size_t avail() { return 0; }
bool valid_addr(addr_t addr) { return false; }
bool alloc(size_t size, void **out_addr) { return false; }
void free(void *addr, size_t) { }
size_t overhead(size_t size) { return 0; }
bool need_size_for_free() const override { return false; }
void * phys_addr(void * addr) { return addr; }
void * virt_addr(void * addr) { return addr; }
};
struct Core_pd : Platform_pd, Pd struct Core_pd : Platform_pd, Pd
{ {
@ -144,16 +121,18 @@ namespace Kernel
} }
} }
Core_pd(Ttable * tt, Genode::Page_slab * slab) /**
: Platform_pd(tt, slab), * Constructor
Pd(tt, this) */
Core_pd(Table * const table, Slab * const slab)
: Platform_pd(table, slab), Pd(table, this)
{ {
using namespace Genode; using namespace Genode;
Platform_pd::_id = Pd::id(); Platform_pd::_id = Pd::id();
/* map exception vector for core */ /* map exception vector for core */
Kernel::mtc()->map(tt, slab); Kernel::mtc()->map(table, slab);
/* map core's program image */ /* map core's program image */
map((addr_t)&_prog_img_beg, (addr_t)&_prog_img_end, false); map((addr_t)&_prog_img_beg, (addr_t)&_prog_img_end, false);
@ -166,11 +145,10 @@ namespace Kernel
} }
}; };
Simple_allocator * sa = unmanaged_singleton<Simple_allocator>(); Allocator * const alloc = unmanaged_singleton<Allocator>();
Ttable * tt = unmanaged_singleton<Ttable, tt_align>(); Table * const table = unmanaged_singleton<Table, table_align>();
Genode::Page_slab * slab = unmanaged_singleton<Genode::Page_slab, Slab * const slab = unmanaged_singleton<Slab, Slab::ALIGN>(alloc);
tt_align>(sa); return unmanaged_singleton<Core_pd>(table, slab);
return unmanaged_singleton<Core_pd>(tt, slab);
} }
/** /**
@ -362,19 +340,9 @@ extern "C" void kernel()
} }
Kernel::Mode_transition_control * Kernel::mtc() Kernel::Cpu_context::Cpu_context(Genode::Translation_table * const table)
{ {
/* create singleton processor context for kernel */ _init(STACK_SIZE, (addr_t)table);
Cpu_context * const cpu_context = unmanaged_singleton<Cpu_context>();
/* initialize mode transition page */
return unmanaged_singleton<Mode_transition_control>(cpu_context);
}
Kernel::Cpu_context::Cpu_context()
{
_init(STACK_SIZE);
sp = (addr_t)kernel_stack; sp = (addr_t)kernel_stack;
ip = (addr_t)kernel; ip = (addr_t)kernel;
core_pd()->admit(this); core_pd()->admit(this);

View File

@ -0,0 +1,38 @@
/*
* \brief Kernel backend for protection domains
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2012-11-30
*/
/*
* Copyright (C) 2012-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* core includes */
#include <kernel/pd.h>
/* Genode includes */
#include <unmanaged_singleton.h>
using namespace Kernel;
Mode_transition_control::Mode_transition_control()
: _slab(&_allocator), _master(&_table)
{
assert(Genode::aligned(this, ALIGN_LOG2));
assert(sizeof(_master) <= _master_context_size());
assert(_size() <= SIZE);
map(&_table, &_slab);
Genode::memcpy(&_mt_master_context_begin, &_master, sizeof(_master));
}
Mode_transition_control * Kernel::mtc()
{
typedef Mode_transition_control Control;
return unmanaged_singleton<Control, Control::ALIGN>();
}

View File

@ -93,7 +93,11 @@ void Thread::_mmu_exception()
** Kernel::Cpu_context ** ** Kernel::Cpu_context **
*************************/ *************************/
void Kernel::Cpu_context::_init(size_t const stack_size) { r12 = stack_size; } void Kernel::Cpu_context::_init(size_t const stack_size, addr_t const table)
{
r12 = stack_size;
cpu_exception = Genode::Cpu::Ttbr0::init(table);
}
/************************* /*************************

View File

@ -0,0 +1,20 @@
/*
* \brief CPU driver for core
* \author Martin stein
* \date 2014-08-06
*/
/*
* Copyright (C) 2012-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* core includes */
#include <cpu.h>
using namespace Genode;
void Arm::Context::translation_table(addr_t const table) {
ttbr0 = Cpu::Ttbr0::init(table); }

View File

@ -64,13 +64,13 @@
/* load kernel cidr */ /* load kernel cidr */
adr sp, _mt_master_context_begin adr sp, _mt_master_context_begin
ldr sp, [sp, #CONTEXTIDR_OFFSET] ldr sp, [sp, #CIDR_OFFSET]
mcr p15, 0, sp, c13, c0, 1 mcr p15, 0, sp, c13, c0, 1
_flush_branch_predictor _flush_branch_predictor
/* load kernel section table */ /* load kernel ttbr0 */
adr sp, _mt_master_context_begin adr sp, _mt_master_context_begin
ldr sp, [sp, #SECTION_TABLE_OFFSET] ldr sp, [sp, #TTBR0_OFFSET]
mcr p15, 0, sp, c2, c0, 0 mcr p15, 0, sp, c2, c0, 0
_flush_branch_predictor _flush_branch_predictor
@ -212,9 +212,9 @@
add sp, lr, #SP_OFFSET add sp, lr, #SP_OFFSET
ldm sp, {sp,lr}^ ldm sp, {sp,lr}^
/* get user cidr and section table */ /* get user cidr and ttbr0 */
ldr sp, [lr, #CONTEXTIDR_OFFSET] ldr sp, [lr, #CIDR_OFFSET]
ldr lr, [lr, #SECTION_TABLE_OFFSET] ldr lr, [lr, #TTBR0_OFFSET]
/******************************************************** /********************************************************
** From now on, until we leave kernel mode, we must ** ** From now on, until we leave kernel mode, we must **

View File

@ -0,0 +1,20 @@
/*
* \brief CPU driver for core
* \author Martin stein
* \date 2014-08-06
*/
/*
* Copyright (C) 2012-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* core includes */
#include <cpu.h>
using namespace Genode;
void Arm::Context::translation_table(addr_t const table) {
ttbr0 = Arm_v7::Ttbr0::init(table); }

View File

@ -30,7 +30,7 @@
.set UND_MODE, 27 .set UND_MODE, 27
/* size of local variables */ /* size of local variables */
.set BUFFER_SIZE, 2 * 4 .set BUFFER_SIZE, 3 * 4
/************ /************
@ -85,35 +85,38 @@
/** /**
* Compose a value for the translation-table-base register 0 and apply it * Override the TTBR0 register
* *
* \param section_table_reg register that contains targeted section-table base * \param val new value, read reg
*/ */
.macro _init_ttbr0 section_table_reg .macro _write_ttbr0 val
mcr p15, 0, \val, c2, c0, 0
/* IRGN bitfield is set to 1 to compose the TTBR0 value */
orr \section_table_reg, \section_table_reg, #0b1001000
/* write translation-table-base register 0 */
mcr p15, 0, \section_table_reg, c2, c0, 0
/* instruction and data synchronization barrier */
isb
dsb
.endm .endm
/** /**
* Apply a value to the CONTEXTIDR register * Override the CIDR register
* *
* \param contexidr_reg register that contains the new CONTEXTIDR value * \param val new value, read reg
*/ */
.macro _init_contextidr contextidr_reg .macro _write_cidr val
mcr p15, 0, \val, c13, c0, 1
.endm
/* write CONTEXTIDR register */
mcr p15, 0, \contextidr_reg, c13, c0, 1
/* finish all previous instructions */ /**
* Switch to a given protection domain
*
* \param transit_ttbr0 transitional TTBR0 value, read/write reg
* \param new_cidr new CIDR value, read reg
* \param new_ttbr0 new TTBR0 value, read/write reg
*/
.macro _switch_protection_domain transit_ttbr0, new_cidr, new_ttbr0
_write_ttbr0 \transit_ttbr0
isb
_write_cidr \new_cidr
isb
_write_ttbr0 \new_ttbr0
isb isb
.endm .endm
@ -126,26 +129,20 @@
*/ */
.macro _user_to_kernel_pic exception_type, pc_adjust .macro _user_to_kernel_pic exception_type, pc_adjust
/*************************************************************************
** Still in user protection domain, thus avoid access to kernel memory **
*************************************************************************/
/* disable fast interrupts when not in fast-interrupt mode */ /* disable fast interrupts when not in fast-interrupt mode */
.if \exception_type != FIQ_TYPE .if \exception_type != FIQ_TYPE
cpsid f cpsid f
.endif .endif
/* /*
* The sp in svc mode still contains the base of the globally mapped * The sp in svc mode still contains the base of the globally mapped buffer
* buffer of this processor. Hence go to svc mode and buffer user r0 and * of this processor. Hence go to svc mode, buffer user r0-r2, and make
* user r1 to globally mapped memory to be able to pollute r0 and r1. * buffer pointer available to all modes
*/ */
.if \exception_type != RST_TYPE && \exception_type != SVC_TYPE .if \exception_type != RST_TYPE && \exception_type != SVC_TYPE
cps #SVC_MODE cps #SVC_MODE
.endif .endif
stm sp, {r0, r1}^ stm sp, {r0-r2}^
/* make buffer pointer available to all modes */
mov r0, sp mov r0, sp
/* switch back to previous privileged mode */ /* switch back to previous privileged mode */
@ -165,18 +162,11 @@
cps #FIQ_MODE cps #FIQ_MODE
.endif .endif
/* load kernel contextidr and base of the kernel section-table */
adr sp, _mt_master_context_begin
add sp, #CONTEXTIDR_OFFSET
ldm sp, {r1, sp}
/* switch to kernel protection-domain */ /* switch to kernel protection-domain */
_init_contextidr r1 adr sp, _mt_master_context_begin
_init_ttbr0 sp add sp, #TRANSIT_TTBR0_OFFSET
ldm sp, {r1, r2, sp}
/******************************************* _switch_protection_domain r1, r2, sp
** Now it's save to access kernel memory **
*******************************************/
/* get user context-pointer */ /* get user context-pointer */
_get_client_context_ptr sp, r1 _get_client_context_ptr sp, r1
@ -187,13 +177,9 @@
.endif .endif
str lr, [sp, #PC_OFFSET] str lr, [sp, #PC_OFFSET]
/* move buffer pointer to lr to enable us to save user r0 - r12 via stm */ /* restore user r0-r2 from buffer and save user r0-r12 */
mov lr, r0 mov lr, r0
ldm lr, {r0-r2}
/* restore user r0 and user r1 */
ldm lr, {r0, r1}
/* save user r0 - r12 */
stm sp, {r0-r12}^ stm sp, {r0-r12}^
/* save user sp and user lr */ /* save user sp and user lr */
@ -420,37 +406,29 @@
_get_client_context_ptr lr, r0 _get_client_context_ptr lr, r0
_get_buffer_ptr sp, r0 _get_buffer_ptr sp, r0
/* buffer user pc and base of user section-table globally mapped */ /* load user psr in spsr */
ldr r0, [lr, #PC_OFFSET]
ldr r1, [lr, #SECTION_TABLE_OFFSET]
stm sp, {r0, r1}
/* buffer user psr in spsr */
ldr r0, [lr, #PSR_OFFSET] ldr r0, [lr, #PSR_OFFSET]
msr spsr, r0 msr spsr, r0
/* setup banked user sp and banked user lr */ /* apply banked user sp, banked user lr, and user r0-r12 */
add r0, lr, #SP_OFFSET add r0, lr, #SP_OFFSET
ldm r0, {sp, lr}^ ldm r0, {sp, lr}^
/* setup user r0 to r12 */
ldm lr, {r0-r12}^ ldm lr, {r0-r12}^
/* load user contextidr */ /* buffer user r0-r1, and user pc */
ldr lr, [lr, #CONTEXTIDR_OFFSET] stm sp, {r0, r1}
ldr r0, [lr, #PC_OFFSET]
/******************************************************** str r0, [sp, #2*4]
** From now on, until we leave kernel mode, we must **
** avoid access to memory that is not mapped globally **
********************************************************/
/* switch to user protection-domain */ /* switch to user protection-domain */
_init_contextidr lr adr r0, _mt_master_context_begin
ldr lr, [sp, #4] ldr r0, [r0, #TRANSIT_TTBR0_OFFSET]
_init_ttbr0 lr add lr, lr, #CIDR_OFFSET
ldm lr, {r1, lr}
_switch_protection_domain r0, r1, lr
/* apply user pc which implies application of spsr as user psr */ /* apply user r0-r1 and user pc which implies application of spsr */
ldm sp, {pc}^ ldm sp, {r0, r1, pc}^
/* /*
* On vm exceptions the CPU has to jump to one of the following * On vm exceptions the CPU has to jump to one of the following