hw: switch page-tables only when necessary

* Instead of always re-load page-tables when a thread context is switched
  only do this when another user PD's thread is the next target,
  core-threads are always executed within the last PD's page-table set
* remove the concept of the mode transition
* instead map the exception vector once in bootstrap code into kernel's
  memory segment
* when a new page directory is constructed for a user PD, copy over the
  top-level kernel segment entries on RISCV and X86, on ARM we use a designated
  page directory register for the kernel segment
* transfer the current CPU id from bootstrap to core/kernel in a register
  to ease first stack address calculation
* align cpu context member of threads and vms, because of x86 constraints
  regarding the stack-pointer loading
* introduce Align_at template for members with alignment constraints
* let the x86 hardware do part of the context saving in ISS, by passing
  the thread context into the TSS before leaving to user-land
* use one exception vector for all ARM platforms including Arm_v6

Fix #2091
This commit is contained in:
Stefan Kalkowski 2017-06-30 12:00:27 +02:00 committed by Christian Helmuth
parent ca60e24ad9
commit 4e97a6511b
97 changed files with 1162 additions and 1966 deletions

View File

@ -57,7 +57,6 @@ SRC_CC += kernel/thread.cc
SRC_CC += kernel/signal_receiver.cc
SRC_CC += kernel/ipc_node.cc
SRC_CC += kernel/irq.cc
SRC_CC += kernel/pd.cc
SRC_CC += kernel/cpu.cc
SRC_CC += kernel/timer.cc
SRC_CC += kernel/object.cc

View File

@ -16,6 +16,7 @@ SRC_CC += spec/arm/platform_support.cc
# add assembly sources
SRC_S += spec/arm/crt0.s
SRC_S += spec/arm/exception_vector.s
vpath spec/32bit/memory_map.cc $(BASE_DIR)/../base-hw/src/lib/hw

View File

@ -17,8 +17,5 @@ SRC_CC += spec/arm/kernel/thread_update_pd.cc
SRC_CC += kernel/vm_thread_off.cc
SRC_CC += kernel/kernel.cc
# add assembly sources
SRC_S += spec/arm_v6/mode_transition.s
# include less specific configuration
include $(BASE_DIR)/../base-hw/lib/mk/spec/arm/core-hw.inc

View File

@ -11,8 +11,5 @@ INC_DIR += $(BASE_DIR)/../base-hw/src/core/spec/arm_v7
SRC_CC += spec/arm_v7/cpu.cc
SRC_CC += spec/arm_v7/perf_counter.cc
# add assembly sources
SRC_S += spec/arm_v7/mode_transition.s
# include less specific configuration
include $(BASE_DIR)/../base-hw/lib/mk/spec/arm/core-hw.inc

View File

@ -18,7 +18,7 @@ SRC_CC += spec/arm_v7/vm_session_component.cc
SRC_CC += spec/arm_v7/virtualization/vm_session_component.cc
# add assembly sources
SRC_S += spec/arm_v7/virtualization/mode_transition.s
SRC_S += spec/arm_v7/virtualization/exception_vector.s
NR_OF_CPUS = 2

View File

@ -26,7 +26,7 @@ SRC_CC += spec/arm_v7/trustzone/kernel/vm.cc
SRC_CC += spec/arm_v7/vm_session_component.cc
SRC_CC += spec/arm_v7/trustzone/vm_session_component.cc
SRC_S += spec/arm_v7/trustzone/mode_transition.s
SRC_S += spec/arm_v7/trustzone/exception_vector.s
endif
# include less specific configuration

View File

@ -2,6 +2,7 @@ INC_DIR += $(BASE_DIR)/../base-hw/src/bootstrap/spec/x86_64
SRC_CC += bootstrap/spec/x86_64/platform_muen.cc
SRC_CC += lib/muen/sinfo.cc
SRC_CC += hw/spec/64bit/memory_map.cc
SRC_S += bootstrap/spec/x86_64/crt0.s
SRC_S += bootstrap/spec/x86_64/crt0_translation_table_muen.s

View File

@ -14,8 +14,8 @@ INC_DIR += $(REP_DIR)/src/core/spec/x86_64/muen
INC_DIR += $(BASE_DIR)/../base-hw/src/core/spec/x86_64
# add assembly sources
SRC_S += spec/x86_64/mode_transition.s
SRC_S += spec/x86_64/crt0.s
SRC_S += spec/x86_64/exception_vector.s
# add C++ sources
SRC_CC += spec/x86_64/muen/kernel/cpu_exception.cc
@ -40,5 +40,9 @@ SRC_CC += spec/x86_64/kernel/thread.cc
SRC_CC += spec/x86_64/kernel/thread.cc
SRC_CC += spec/x86_64/platform_support_common.cc
SRC_CC += spec/64bit/memory_map.cc
vpath spec/64bit/memory_map.cc $(BASE_DIR)/../base-hw/src/lib/hw
# include less specific configuration
include $(BASE_DIR)/../base-hw/lib/mk/core-hw.inc

View File

@ -21,7 +21,7 @@ SRC_CC += spec/imx53/timer.cc
SRC_CC += spec/arm/cpu_context_trustzone.cc
# add assembly sources
SRC_S += spec/arm_v7/trustzone/mode_transition.s
SRC_S += spec/arm_v7/trustzone/exception_vector.s
# include less specific configuration
include $(REP_DIR)/lib/mk/spec/cortex_a8/core-hw.inc

View File

@ -9,8 +9,8 @@
INC_DIR += $(BASE_DIR)/../base-hw/src/core/spec/x86_64
# add assembly sources
SRC_S += spec/x86_64/mode_transition.s
SRC_S += spec/x86_64/crt0.s
SRC_S += spec/x86_64/exception_vector.s
# add C++ sources
SRC_CC += kernel/vm_thread_off.cc

View File

@ -25,6 +25,6 @@ extern "C" void init() __attribute__ ((noreturn));
extern "C" void init()
{
Bootstrap::platform().enable_mmu();
Bootstrap::platform().start_core();
Bootstrap::Platform & p = Bootstrap::platform();
p.start_core(p.enable_mmu());
}

View File

@ -135,18 +135,24 @@ Mapping Platform::_load_elf()
core_pd->map_insert(m);
else
ret = m;
/* map start of the text segment as exception vector */
if (segment.flags().x && !segment.flags().w) {
Memory_region e = Hw::Mm::supervisor_exception_vector();
core_pd->map_insert(Mapping((addr_t)phys, e.base, e.size, flags));
}
};
core_elf.for_each_segment(lambda);
return ret;
}
void Platform::start_core()
void Platform::start_core(unsigned cpu_id)
{
typedef void (* Entry)();
typedef void (* Entry)(unsigned);
Entry __attribute__((noreturn)) const entry
= reinterpret_cast<Entry>(core_elf.entry());
entry();
entry(cpu_id);
}

View File

@ -135,8 +135,8 @@ class Bootstrap::Platform
Platform();
void enable_mmu();
void start_core() __attribute__((noreturn));
unsigned enable_mmu();
void start_core(unsigned) __attribute__((noreturn));
};
#endif /* _SRC__BOOTSTRAP__PLATFORM_H_ */

View File

@ -37,13 +37,20 @@ void Bootstrap::Cpu::enable_mmu_and_caches(Genode::addr_t table)
Dacr::write(Dacr::D0::bits(1));
Ttbr_64bit::access_t ttbr0 = Ttbr_64bit::Ba::masked(table);
Ttbr_64bit::access_t ttbr1 = Ttbr_64bit::Ba::masked(table);
Ttbr_64bit::Asid::set(ttbr0, 0);
Ttbr0_64bit::write(ttbr0);
Ttbr1_64bit::write(ttbr1);
Ttbcr::access_t ttbcr = 0;
Ttbcr::T0sz::set(ttbcr, 1);
Ttbcr::T1sz::set(ttbcr, 0);
Ttbcr::Irgn0::set(ttbcr, 1);
Ttbcr::Irgn1::set(ttbcr, 1);
Ttbcr::Orgn0::set(ttbcr, 1);
Ttbcr::Orgn1::set(ttbcr, 1);
Ttbcr::Sh0::set(ttbcr, 0b10);
Ttbcr::Sh1::set(ttbcr, 0b10);
Ttbcr::Eae::set(ttbcr, 1);
Ttbcr::write(ttbcr);

View File

@ -13,8 +13,10 @@
#include <platform.h>
void Bootstrap::Platform::enable_mmu()
unsigned Bootstrap::Platform::enable_mmu()
{
Cpu::Sctlr::init();
Cpu::enable_mmu_and_caches((addr_t)core_pd->table_base);
return 0;
}

View File

@ -95,7 +95,7 @@ struct Scu : Genode::Mmio
*
* See ARM's Cortex-A9 MPCore TRM r2p0 in section 5.3.5 for more details
*/
void Bootstrap::Platform::enable_mmu()
unsigned Bootstrap::Platform::enable_mmu()
{
using namespace Bootstrap;
@ -162,4 +162,6 @@ void Bootstrap::Platform::enable_mmu()
/* wait for other cores' coherency activation */
smp_coherency_enabled.wait_for(NR_OF_CPUS);
return Cpu::Mpidr::Aff_0::get(Cpu::Mpidr::read());
}

View File

@ -71,7 +71,7 @@ static inline void prepare_hypervisor(Genode::addr_t table)
using Cpu = Hw::Arm_cpu;
/* set hypervisor exception vector */
Cpu::Hvbar::write(0xfff00000); /* FIXME */
Cpu::Hvbar::write(Hw::Mm::hypervisor_exception_vector().base);
/* set hypervisor's translation table */
Cpu::Httbr_64bit::write(table);
@ -152,7 +152,7 @@ static inline void switch_to_supervisor_mode()
}
void Bootstrap::Platform::enable_mmu()
unsigned Bootstrap::Platform::enable_mmu()
{
static volatile bool primary_cpu = true;
pic.init_cpu_local();
@ -173,6 +173,8 @@ void Bootstrap::Platform::enable_mmu()
}
cpu.enable_mmu_and_caches((Genode::addr_t)core_pd->table_base);
return Cpu::Mpidr::Aff_0::get(Cpu::Mpidr::read());
}

View File

@ -44,7 +44,7 @@ Bootstrap::Platform::Board::Board()
Aipstz aipstz_2(AIPS_2_MMIO_BASE);
/* set exception vector entry */
Cpu::Mvbar::write(0xfff00000);
Cpu::Mvbar::write(Hw::Mm::system_exception_vector().base);
/* enable coprocessor 10 + 11 access for TZ VMs */
Cpu::Nsacr::access_t v = 0;

View File

@ -23,11 +23,12 @@ Bootstrap::Platform::Board::Board()
Memory_region { UART_2_MMIO_BASE, UART_2_MMIO_SIZE }) { }
void Bootstrap::Platform::enable_mmu()
unsigned Bootstrap::Platform::enable_mmu()
{
pic.init_cpu_local();
Cpu::Sctlr::init();
Cpu::Cpsr::init();
cpu.invalidate_data_cache();
cpu.enable_mmu_and_caches((Genode::addr_t)core_pd->table_base);
return 0;
}

View File

@ -20,8 +20,10 @@ Bootstrap::Platform::Board::Board()
: early_ram_regions(Memory_region { RAM_0_BASE, RAM_0_SIZE } ) {}
void Bootstrap::Platform::enable_mmu()
unsigned Bootstrap::Platform::enable_mmu()
{
using Sptbr = Hw::Riscv_cpu::Sptbr;
Sptbr::write(Sptbr::Ppn::masked((addr_t)core_pd->table_base >> 12));
return 0;
}

View File

@ -35,7 +35,7 @@ Bootstrap::Platform::Board::Board()
USB_DWC_OTG_SIZE }) {}
void Bootstrap::Platform::enable_mmu()
unsigned Bootstrap::Platform::enable_mmu()
{
struct Sctlr : Cpu::Sctlr
{
@ -73,13 +73,14 @@ void Bootstrap::Platform::enable_mmu()
/* do not use domains, but permission bits in table */
Cpu::Dacr::write(Cpu::Dacr::D0::bits(1));
Cpu::Ttbcr::write(0);
Cpu::Ttbcr::write(1);
Genode::addr_t table = (Genode::addr_t)core_pd->table_base;
Cpu::Ttbr::access_t ttbr0 = Cpu::Ttbr::Ba::masked(table);
Cpu::Ttbr::Rgn::set(ttbr0, Cpu::Ttbr::CACHEABLE);
Cpu::Ttbr::C::set(ttbr0, 1);
Cpu::Ttbr0::write(ttbr0);
Cpu::Ttbr::access_t ttbr = Cpu::Ttbr::Ba::masked(table);
Cpu::Ttbr::Rgn::set(ttbr, Cpu::Ttbr::CACHEABLE);
Cpu::Ttbr::C::set(ttbr, 1);
Cpu::Ttbr0::write(ttbr);
Cpu::Ttbr1::write(ttbr);
sctlr = Cpu::Sctlr::read();
Cpu::Sctlr::C::set(sctlr, 1);
@ -89,4 +90,6 @@ void Bootstrap::Platform::enable_mmu()
/* invalidate branch predictor */
Cpu::Bpiall::write(0);
return 0;
}

View File

@ -38,8 +38,8 @@ Bootstrap::Platform::Board::Board()
Aipstz aipstz_1(AIPS_1_MMIO_BASE);
Aipstz aipstz_2(AIPS_2_MMIO_BASE);
/* set exception vector entry */
Cpu::Mvbar::write(0xfff00000); //FIXME
/* set monitor mode exception vector entry */
Cpu::Mvbar::write(Hw::Mm::system_exception_vector().base);
/* enable coprocessor 10 + 11 access for TZ VMs */
Cpu::Nsacr::access_t v = 0;

View File

@ -20,6 +20,8 @@
#include <hw/spec/x86_64/cpu.h>
#include <hw/spec/x86_64/x86_64.h>
void Hw::Pml4_table::_invalidate_range(addr_t vo, size_t size) {}
namespace Bootstrap {
struct Pic {};
using Cpu = Hw::X86_64_cpu;

View File

@ -82,9 +82,9 @@
movl %eax, %cr0
/* Set up GDT */
movl $_mt_gdt_ptr+2, %eax
movl $_mt_gdt_start, (%eax)
lgdt _mt_gdt_ptr
movl $__gdt_ptr+2, %eax
movl $__gdt_start, (%eax)
lgdt __gdt_ptr
/* Indirect long jump to 64-bit code */
ljmp $8, $_start64

View File

@ -100,8 +100,11 @@ Bootstrap::Platform::Board::Board()
}
void Bootstrap::Platform::enable_mmu() {
Cpu::Cr3::write(Cpu::Cr3::Pdb::masked((addr_t)core_pd->table_base)); }
unsigned Bootstrap::Platform::enable_mmu()
{
Cpu::Cr3::write(Cpu::Cr3::Pdb::masked((addr_t)core_pd->table_base));
return 0;
}
addr_t Bios_data_area::_mmio_base_virt() { return 0x1ff000; }

View File

@ -42,8 +42,11 @@ Bootstrap::Platform::Board::Board()
}
void Bootstrap::Platform::enable_mmu() {
Cpu::Cr3::write(Cpu::Cr3::Pdb::masked((addr_t)core_pd->table_base)); }
unsigned Bootstrap::Platform::enable_mmu()
{
Cpu::Cr3::write(Cpu::Cr3::Pdb::masked((addr_t)core_pd->table_base));
return 0;
}
Board::Serial::Serial(Genode::addr_t, Genode::size_t, unsigned baudrate)

View File

@ -124,9 +124,6 @@ Cpu_job::~Cpu_job()
** Cpu_idle **
**************/
void Cpu_idle::proceed(unsigned const cpu) { mtc()->switch_to_user(this, cpu); }
void Cpu_idle::_main() { while (1) { Genode::Cpu::wait_for_interrupt(); } }
@ -239,9 +236,8 @@ Cpu_domain_update::Cpu_domain_update() {
* 3) The alignment that originates from 1) and 2) is assumed to be always
* less or equal to the minimum page size.
*/
enum { KERNEL_STACK_SIZE = 16 * 1024 * sizeof(Genode::addr_t) };
Genode::size_t kernel_stack_size = KERNEL_STACK_SIZE;
Genode::uint8_t kernel_stack[NR_OF_CPUS][KERNEL_STACK_SIZE]
Genode::size_t kernel_stack_size = Cpu::KERNEL_STACK_SIZE;
Genode::uint8_t kernel_stack[NR_OF_CPUS][Cpu::KERNEL_STACK_SIZE]
__attribute__((aligned(Genode::get_page_size())));
Cpu_context::Cpu_context(Hw::Page_table * const table)
@ -253,5 +249,5 @@ Cpu_context::Cpu_context(Hw::Page_table * const table)
* platform specific initialization, has to be done after
* setting the registers by now
*/
_init(KERNEL_STACK_SIZE, (addr_t)table);
_init(Cpu::KERNEL_STACK_SIZE, (addr_t)table);
}

View File

@ -282,6 +282,8 @@ class Kernel::Cpu : public Genode::Cpu, public Irq::Pool, private Timeout
public:
enum { KERNEL_STACK_SIZE = 16 * 1024 * sizeof(Genode::addr_t) };
/**
* Construct object for CPU 'id'
*/

View File

@ -34,12 +34,12 @@ Pd * Kernel::core_pd() {
Pic * Kernel::pic() { return unmanaged_singleton<Pic>(); }
extern "C" void _start();
extern "C" void kernel_init();
/**
* Setup kernel environment
*/
extern "C" void _start()
extern "C" void kernel_init()
{
static volatile bool initialized = false;
if (Cpu::executing_id()) while (!initialized) ;

View File

@ -1,77 +0,0 @@
/*
* \brief Kernel backend for protection domains
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2012-11-30
*/
/*
* Copyright (C) 2012-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#include <base/internal/crt0.h>
#include <base/internal/unmanaged_singleton.h>
#include <base/log.h>
#include <hw/page_flags.h>
#include <hw/util.h>
#include <kernel/pd.h>
#include <util/construct_at.h>
#include <platform.h>
using namespace Kernel;
using Hw::Page_table;
using Genode::Platform;
/* structure of the mode transition */
extern int _mt_begin;
extern int _mt_user_entry_pic;
extern int _mt_client_context_ptr;
void Mode_transition_control::map(Page_table & tt,
Page_table::Allocator & alloc)
{
static addr_t const phys_base =
Platform::core_phys_addr((addr_t)&_mt_begin);
try {
tt.insert_translation(Cpu::exception_entry, phys_base, Cpu::mtc_size,
Hw::PAGE_FLAGS_KERN_EXCEP, alloc);
} catch(...) {
Genode::error("inserting exception vector in page table failed!"); }
}
void Mode_transition_control::switch_to(Cpu::Context * const context,
unsigned const cpu,
addr_t const entry_raw,
addr_t const context_ptr_base)
{
/* override client-context pointer of the executing CPU */
size_t const context_ptr_offset = cpu * sizeof(context);
addr_t const context_ptr = context_ptr_base + context_ptr_offset;
*(void * *)context_ptr = context;
/* call assembly code that applies the virtual-machine context */
typedef void (* Entry)();
Entry __attribute__((noreturn)) const entry = (Entry)entry_raw;
entry();
}
void Mode_transition_control::switch_to_user(Cpu::Context * const context,
unsigned const cpu)
{
static addr_t entry = (addr_t)Cpu::exception_entry
+ ((addr_t)&_mt_user_entry_pic
- (addr_t)&_mt_begin);
switch_to(context, cpu, entry, (addr_t)&_mt_client_context_ptr);
}
Mode_transition_control * Kernel::mtc() {
return unmanaged_singleton<Mode_transition_control>(); }

View File

@ -26,23 +26,6 @@ namespace Genode {
namespace Kernel
{
/**
* Controls the mode-transition page
*
* The mode transition page is a small memory region that is mapped by
* every PD to the same virtual address. It contains code that acts as a
* link between high privileged CPU mode (often called kernel) and low
* privileged CPU mode (often called userland). The mode transition
* control provides a simple interface to access the code from within
* the kernel.
*/
struct Mode_transition_control;
/**
* Return the system wide mode-transition control
*/
Mode_transition_control * mtc();
/**
* Kernel backend of protection domains
*/
@ -50,41 +33,6 @@ namespace Kernel
}
struct Kernel::Mode_transition_control
{
/**
* Map the mode transition page to a virtual address space
*
* \param tt translation buffer of the address space
* \param alloc translation table allocator used for the mapping
*/
void map(Hw::Page_table & tt,
Hw::Page_table::Allocator & alloc);
/**
* Continue execution of client context
*
* \param context targeted CPU context
* \param cpu kernel name of targeted CPU
* \param entry_raw raw pointer to assembly entry-code
* \param context_ptr_base base address of client-context pointer region
*/
void switch_to(Cpu::Context * const context,
unsigned const cpu,
addr_t const entry_raw,
addr_t const context_ptr_base);
/**
* Continue execution of user context
*
* \param context targeted CPU context
* \param cpu kernel name of targeted CPU
*/
void switch_to_user(Cpu::Context * const context,
unsigned const cpu);
};
class Kernel::Pd : public Cpu::Pd,
public Kernel::Object
{
@ -117,7 +65,7 @@ class Kernel::Pd : public Cpu::Pd,
/**
* Let the CPU context 'c' join the PD
*/
void admit(Cpu::Context * const c);
void admit(Cpu::Context & c);
static capid_t syscall_create(void * const dst,

View File

@ -144,9 +144,6 @@ Cpu_job * Thread::helping_sink() {
return static_cast<Thread *>(Ipc_node::helping_sink()); }
void Thread::proceed(unsigned const cpu) { mtc()->switch_to_user(this, cpu); }
size_t Thread::_core_to_kernel_quota(size_t const quota) const
{
using Genode::Cpu_session;
@ -203,7 +200,7 @@ void Thread::_call_start_thread()
/* join protection domain */
thread->_pd = (Pd *) user_arg_3();
thread->_pd->admit(thread);
thread->_pd->admit(*thread->regs);
thread->Ipc_node::_init((Native_utcb *)user_arg_4(), this);
thread->_become_active();
}
@ -666,13 +663,13 @@ Core_thread::Core_thread()
utcb->cap_add(cap_id_invalid());
/* start thread with stack pointer at the top of stack */
sp = (addr_t)&__initial_stack_base[0] + DEFAULT_STACK_SIZE;
ip = (addr_t)&_core_start;
regs->sp = (addr_t)&__initial_stack_base[0] + DEFAULT_STACK_SIZE;
regs->ip = (addr_t)&_core_start;
affinity(cpu_pool()->primary_cpu());
_utcb = utcb;
Thread::_pd = core_pd();
Thread::_pd->admit(this);
Thread::_pd->admit(*regs);
_become_active();
}

View File

@ -66,7 +66,7 @@ void Pager_object::unresolved_page_fault_occurred()
if (pt && pt->pd())
warning("page fault, pager_object: pd='", pt->pd()->label(),
"' thread='", pt->label(),
"' ip=", Hex(pt->kernel_object()->ip),
"' ip=", Hex(pt->kernel_object()->regs->ip),
" pf-addr=", Hex(pt->kernel_object()->fault_addr()));
}

View File

@ -85,16 +85,12 @@ Hw::Address_space::Address_space(Kernel::Pd & pd, Page_table & tt,
Hw::Address_space::Address_space(Kernel::Pd & pd)
: _tt(*construct_at<Page_table>(_table_alloc())),
: _tt(*construct_at<Page_table>(_table_alloc(), *((Page_table*)Hw::Mm::core_page_tables().base))),
_tt_phys((addr_t)_cma()->phys_addr(&_tt)),
_tt_array(new (_cma()) Array([this] (void * virt) {
return (addr_t)_cma()->phys_addr(virt);})),
_tt_alloc(_tt_array->alloc()),
_kernel_pd(pd)
{
Lock::Guard guard(_lock);
Kernel::mtc()->map(_tt, _tt_alloc);
}
_kernel_pd(pd) { }
Hw::Address_space::~Address_space()
@ -175,12 +171,6 @@ Platform_pd::~Platform_pd()
** Core_platform_pd implementation **
*************************************/
extern int _mt_master_context_begin;
Core_platform_pd::Core_platform_pd()
: Platform_pd(*(Hw::Page_table*)Hw::Mm::core_page_tables().base,
Platform::core_page_table_allocator())
{
Genode::construct_at<Kernel::Cpu_context>(&_mt_master_context_begin,
(Page_table*)translation_table_phys());
}
Platform::core_page_table_allocator()) { }

View File

@ -155,8 +155,8 @@ int Platform_thread::start(void * const ip, void * const sp)
}
/* initialize thread registers */
kernel_object()->ip = reinterpret_cast<addr_t>(ip);
kernel_object()->sp = reinterpret_cast<addr_t>(sp);
kernel_object()->regs->ip = reinterpret_cast<addr_t>(ip);
kernel_object()->regs->sp = reinterpret_cast<addr_t>(sp);
/* start executing new thread */
if (!_pd) {
@ -197,14 +197,14 @@ Genode::Pager_object * Platform_thread::pager() { return _pager; }
Thread_state Platform_thread::state()
{
Thread_state_base bstate(*kernel_object());
Thread_state_base bstate(*kernel_object()->regs);
return Thread_state(bstate);
}
void Platform_thread::state(Thread_state thread_state)
{
Cpu_state * cstate = static_cast<Cpu_state *>(kernel_object());
Cpu_state * cstate = static_cast<Cpu_state *>(&*kernel_object()->regs);
*cstate = static_cast<Cpu_state>(thread_state);
}

View File

@ -50,7 +50,7 @@ void Pager_entrypoint::entry()
continue;
}
_fault.ip = pt->kernel_object()->ip;
_fault.ip = pt->kernel_object()->regs->ip;
_fault.addr = pt->kernel_object()->fault_addr();
_fault.writes = pt->kernel_object()->fault_writes();

View File

@ -21,5 +21,5 @@ void Genode::Arm_cpu::User_context::init(bool privileged)
Psr::M::set(v, privileged ? Psr::M::SYS : Psr::M::USR);
Psr::F::set(v, 1);
Psr::A::set(v, 1);
cpsr = v;
regs->cpsr = v;
}

View File

@ -21,5 +21,5 @@ void Genode::Arm_cpu::User_context::init(bool privileged)
Psr::M::set(v, privileged ? Psr::M::SYS : Psr::M::USR);
Psr::I::set(v, 1);
Psr::A::set(v, 1);
cpsr = v;
regs->cpsr = v;
}

View File

@ -18,6 +18,7 @@
/* Genode includes */
#include <util/register.h>
#include <cpu/cpu_state.h>
#include <base/internal/align_at.h>
#include <hw/spec/arm/cpu.h>
@ -34,9 +35,6 @@ namespace Genode {
struct Genode::Arm_cpu : public Hw::Arm_cpu
{
static constexpr addr_t exception_entry = 0xffff0000;
static constexpr addr_t mtc_size = get_page_size();
/**
* Translation table base register 0
*/
@ -105,23 +103,25 @@ struct Genode::Arm_cpu : public Hw::Arm_cpu
/**
* An usermode execution state
*/
struct User_context : Context
struct User_context
{
Align_at<Context, 4> regs;
void init(bool privileged);
/**
* Support for kernel calls
*/
void user_arg_0(Kernel::Call_arg const arg) { r0 = arg; }
void user_arg_1(Kernel::Call_arg const arg) { r1 = arg; }
void user_arg_2(Kernel::Call_arg const arg) { r2 = arg; }
void user_arg_3(Kernel::Call_arg const arg) { r3 = arg; }
void user_arg_4(Kernel::Call_arg const arg) { r4 = arg; }
Kernel::Call_arg user_arg_0() const { return r0; }
Kernel::Call_arg user_arg_1() const { return r1; }
Kernel::Call_arg user_arg_2() const { return r2; }
Kernel::Call_arg user_arg_3() const { return r3; }
Kernel::Call_arg user_arg_4() const { return r4; }
void user_arg_0(Kernel::Call_arg const arg) { regs->r0 = arg; }
void user_arg_1(Kernel::Call_arg const arg) { regs->r1 = arg; }
void user_arg_2(Kernel::Call_arg const arg) { regs->r2 = arg; }
void user_arg_3(Kernel::Call_arg const arg) { regs->r3 = arg; }
void user_arg_4(Kernel::Call_arg const arg) { regs->r4 = arg; }
Kernel::Call_arg user_arg_0() const { return regs->r0; }
Kernel::Call_arg user_arg_1() const { return regs->r1; }
Kernel::Call_arg user_arg_2() const { return regs->r2; }
Kernel::Call_arg user_arg_3() const { return regs->r3; }
Kernel::Call_arg user_arg_4() const { return regs->r4; }
/**
* Initialize thread context
@ -131,8 +131,8 @@ struct Genode::Arm_cpu : public Hw::Arm_cpu
*/
void init_thread(addr_t const table, unsigned const pd_id)
{
protection_domain(pd_id);
translation_table(table);
regs->protection_domain(pd_id);
regs->translation_table(table);
}
/**
@ -150,9 +150,9 @@ struct Genode::Arm_cpu : public Hw::Arm_cpu
/* permission fault on page */
static constexpr Fsr::access_t permission = 0xf;
switch (cpu_exception) {
switch (regs->cpu_exception) {
case PREFETCH_ABORT:
case Context::PREFETCH_ABORT:
{
/* check if fault was caused by a translation miss */
Ifsr::access_t const fs = Fsr::Fs::get(Ifsr::read());
@ -161,10 +161,10 @@ struct Genode::Arm_cpu : public Hw::Arm_cpu
/* fetch fault data */
w = 0;
va = ip;
va = regs->ip;
return true;
}
case DATA_ABORT:
case Context::DATA_ABORT:
{
/* check if fault is of known type */
Dfsr::access_t const fs = Fsr::Fs::get(Dfsr::read());
@ -241,8 +241,18 @@ struct Genode::Arm_cpu : public Hw::Arm_cpu
** Dummies **
*************/
void switch_to(User_context&) { }
bool retry_undefined_instr(Context&) { return false; }
void switch_to(User_context & o)
{
if (o.regs->cidr == 0) return;
Cidr::access_t cidr = Cidr::read();
if (cidr != o.regs->cidr) {
Cidr::write(o.regs->cidr);
Ttbr0::write(o.regs->ttbr0);
}
}
bool retry_undefined_instr(User_context&) { return false; }
/**
* Return kernel name of the executing CPU

View File

@ -11,14 +11,36 @@
* under the terms of the GNU Affero General Public License version 3.
*/
/**************************
** .text (program code) **
**************************/
.section ".text"
/* program entry-point */
/***********************
** kernel entry code **
***********************/
.global _start
_start:
/* switch to cpu-specific kernel stack */
adr r1, _kernel_stack
adr r2, _kernel_stack_size
ldr r1, [r1]
ldr r2, [r2]
ldr r2, [r2]
add r0, #1
mul r0, r0, r2
add sp, r1, r0
/* jump into init C code */
b kernel_init
_kernel_stack: .long kernel_stack
_kernel_stack_size: .long kernel_stack_size
/*********************************
** core main thread entry code **
*********************************/
.global _core_start
_core_start:

View File

@ -0,0 +1,123 @@
/*
* \brief Transition between kernel/userland
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2011-11-15
*/
/*
* Copyright (C) 2011-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/*********************
** Constant values **
*********************/
.set USR_MODE, 16
.set FIQ_MODE, 17
.set IRQ_MODE, 18
.set SVC_MODE, 19
.set ABT_MODE, 23
.set UND_MODE, 27
.set SYS_MODE, 31
.set RST_TYPE, 1
.set UND_TYPE, 2
.set SVC_TYPE, 3
.set PAB_TYPE, 4
.set DAB_TYPE, 5
.set IRQ_TYPE, 6
.set FIQ_TYPE, 7
.set RST_PC_ADJUST, 0
.set UND_PC_ADJUST, 4
.set SVC_PC_ADJUST, 0
.set PAB_PC_ADJUST, 4
.set DAB_PC_ADJUST, 8
.set IRQ_PC_ADJUST, 4
.set FIQ_PC_ADJUST, 4
/* offsets of CPU context members */
.set PC_OFFSET, 15 * 4
.set STACK_OFFSET, 17 * 4
/************
** Macros **
************/
/**
* Save an interrupted user context and switch to kernel context
*
* \param exception_type kernel name of exception type
* \param mode mode number of program status register
* \param pc_adjust value that gets subtracted from saved user PC
*/
.macro _user_to_kernel exception_type, mode, pc_adjust
cpsid f, #SVC_MODE /* disable interrupts and change to SVC mode */
stm sp, {r0-r14}^ /* the sp_svc contains the user context pointer */
add r0, sp, #PC_OFFSET
ldr sp, [sp, #STACK_OFFSET] /* restore kernel stack pointer */
cps #\mode
sub r1, lr, #\pc_adjust /* calculate user program counter */
mrs r2, spsr /* get user cpsr */
mov r3, #\exception_type
b _common_kernel_entry
.endm
.section .text.crt0
/***********************
** Exception entries **
***********************/
b _rst_entry /* 0x00: reset */
b _und_entry /* 0x04: undefined instruction */
b _svc_entry /* 0x08: supervisor call */
b _pab_entry /* 0x0c: prefetch abort */
b _dab_entry /* 0x10: data abort */
nop /* 0x14: reserved */
b _irq_entry /* 0x18: interrupt request */
/*
* Fast interrupt exception entry 0x1c.
*
* If the previous mode was not the user mode, it means a previous
* exception got interrupted by a fast interrupt.
* In that case, we disable fast interrupts and return to the
* previous exception handling routine.
*/
mrs r8, spsr
and r9, r8, #0b11111
cmp r9, #USR_MODE
cmpne r9, #SYS_MODE
beq _fiq_entry
orr r8, #0b1000000
msr spsr_cxsf, r8
subs pc, lr, #4
_rst_entry: _user_to_kernel RST_TYPE, SVC_MODE, RST_PC_ADJUST
_und_entry: _user_to_kernel UND_TYPE, UND_MODE, UND_PC_ADJUST
_svc_entry: _user_to_kernel SVC_TYPE, SVC_MODE, SVC_PC_ADJUST
_pab_entry: _user_to_kernel PAB_TYPE, ABT_MODE, PAB_PC_ADJUST
_dab_entry: _user_to_kernel DAB_TYPE, ABT_MODE, DAB_PC_ADJUST
_irq_entry: _user_to_kernel IRQ_TYPE, IRQ_MODE, IRQ_PC_ADJUST
_fiq_entry: _user_to_kernel FIQ_TYPE, FIQ_MODE, FIQ_PC_ADJUST
_common_kernel_entry:
stmia r0!, {r1-r3} /* save pc, cpsr and exception type */
clrex /* clear exclusive access needed for cmpxchg */
cps #SVC_MODE
adr lr, _kernel_entry
ldr lr, [lr]
bx lr
_kernel_entry:
.long kernel

View File

@ -24,9 +24,9 @@ using namespace Kernel;
Cpu_idle::Cpu_idle(Cpu * const cpu) : Cpu_job(Cpu_priority::MIN, 0)
{
Cpu_job::cpu(cpu);
cpu_exception = RESET;
ip = (addr_t)&_main;
sp = (addr_t)&_stack[stack_size];
regs->cpu_exception = Cpu::Context::RESET;
regs->ip = (addr_t)&_main;
regs->sp = (addr_t)&_stack[stack_size];
init_thread((addr_t)core_pd()->translation_table(), core_pd()->asid);
init(true);
}
@ -34,9 +34,25 @@ Cpu_idle::Cpu_idle(Cpu * const cpu) : Cpu_job(Cpu_priority::MIN, 0)
void Cpu_idle::exception(unsigned const cpu)
{
switch (cpu_exception) {
case INTERRUPT_REQUEST: _interrupt(cpu); return;
case FAST_INTERRUPT_REQUEST: _interrupt(cpu); return;
case RESET: return;
switch (regs->cpu_exception) {
case Cpu::Context::INTERRUPT_REQUEST: _interrupt(cpu); return;
case Cpu::Context::FAST_INTERRUPT_REQUEST: _interrupt(cpu); return;
case Cpu::Context::RESET: return;
default: Genode::raw("Unknown exception in idle thread"); }
}
extern void * kernel_stack;
void Cpu_idle::proceed(unsigned const cpu)
{
regs->cpu_exception = (addr_t)&kernel_stack + Cpu::KERNEL_STACK_SIZE * (cpu+1);
asm volatile("mov sp, %0 \n"
"msr spsr_cxsf, %1 \n"
"mov lr, %2 \n"
"ldm sp, {r0-r14}^ \n"
"subs pc, lr, #0 \n"
:: "r" (static_cast<Cpu::Context*>(&*regs)),
"r" (regs->cpsr), "r" (regs->ip));
}

View File

@ -48,8 +48,8 @@ Kernel::Pd::~Pd() {
}
void Kernel::Pd::admit(Kernel::Cpu::Context * const c)
void Kernel::Pd::admit(Kernel::Cpu::Context & c)
{
c->protection_domain(asid);
c->translation_table((addr_t)translation_table());
c.protection_domain(asid);
c.translation_table((addr_t)translation_table());
}

View File

@ -21,35 +21,35 @@ using namespace Kernel;
void Kernel::Thread::_init()
{
init(_core);
cpu_exception = RESET;
regs->cpu_exception = Cpu::Context::RESET;
}
void Thread::exception(unsigned const cpu)
{
switch (cpu_exception) {
case SUPERVISOR_CALL:
switch (regs->cpu_exception) {
case Cpu::Context::SUPERVISOR_CALL:
_call();
return;
case PREFETCH_ABORT:
case DATA_ABORT:
case Cpu::Context::PREFETCH_ABORT:
case Cpu::Context::DATA_ABORT:
_mmu_exception();
return;
case INTERRUPT_REQUEST:
case FAST_INTERRUPT_REQUEST:
case Cpu::Context::INTERRUPT_REQUEST:
case Cpu::Context::FAST_INTERRUPT_REQUEST:
_interrupt(cpu);
return;
case UNDEFINED_INSTRUCTION:
case Cpu::Context::UNDEFINED_INSTRUCTION:
if (_cpu->retry_undefined_instr(*this)) { return; }
Genode::warning(*this, ": undefined instruction at ip=",
Genode::Hex(ip));
Genode::Hex(regs->ip));
_die();
return;
case RESET:
case Cpu::Context::RESET:
return;
default:
Genode::warning(*this, ": triggered an unknown exception ",
cpu_exception);
regs->cpu_exception);
_die();
return;
}
@ -68,18 +68,19 @@ void Thread::_mmu_exception()
*/
if (_pd == Kernel::core_pd())
Genode::error("page fault in core thread (", label(), "): "
"ip=", Genode::Hex(ip), " fault=", Genode::Hex(_fault_addr));
"ip=", Genode::Hex(regs->ip), " fault=", Genode::Hex(_fault_addr));
if (_pager) _pager->submit(1);
return;
}
bool da = regs->cpu_exception == Cpu::Context::DATA_ABORT;
Genode::error(*this, ": raised unhandled ",
cpu_exception == DATA_ABORT ? "data abort" : "prefetch abort", " "
da ? "data abort" : "prefetch abort", " "
"DFSR=", Genode::Hex(Cpu::Dfsr::read()), " "
"ISFR=", Genode::Hex(Cpu::Ifsr::read()), " "
"DFAR=", Genode::Hex(Cpu::Dfar::read()), " "
"ip=", Genode::Hex(ip), " "
"sp=", Genode::Hex(sp));
"ip=", Genode::Hex(regs->ip), " "
"sp=", Genode::Hex(regs->sp));
}
@ -130,3 +131,19 @@ void Kernel::Thread::_call_update_instr_region()
cpu->clean_invalidate_data_cache_by_virt_region(base, size);
cpu->invalidate_instr_cache_by_virt_region(base, size);
}
extern void * kernel_stack;
void Thread::proceed(unsigned const cpu)
{
regs->cpu_exception = (addr_t)&kernel_stack + Cpu::KERNEL_STACK_SIZE * (cpu+1);
asm volatile("mov sp, %0 \n"
"msr spsr_cxsf, %1 \n"
"mov lr, %2 \n"
"ldm sp, {r0-r14}^ \n"
"subs pc, lr, #0 \n"
:: "r" (static_cast<Cpu::Context*>(&*regs)),
"r" (regs->cpsr), "r" (regs->ip));
}

View File

@ -1,140 +0,0 @@
/*
* \brief Macros that are used by multiple assembly files
* \author Martin Stein
* \date 2014-01-13
*/
/*
* Copyright (C) 2014-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/**
* Get base of the first kernel-stack and the common kernel-stack size
*
* \param base_dst_reg register that shall receive the stack-area base
* \param size_dst_reg register that shall receive the size of a kernel stack
*/
.macro _get_constraints_of_kernel_stacks base_dst_reg, size_dst_reg
ldr \base_dst_reg, =kernel_stack
ldr \size_dst_reg, =kernel_stack_size
ldr \size_dst_reg, [\size_dst_reg]
.endm
/**
* Calculate and apply kernel SP for a given kernel-stacks area
*
* \base_reg register that contains the base of the kernel-stacks area
* \size_reg register that contains the size of one kernel stack
*/
.macro _init_kernel_sp base_reg, size_reg
/* get kernel name of CPU */
_get_cpu_id sp
/* calculate top of the kernel-stack of this CPU and apply as SP */
add sp, #1
mul \size_reg, \size_reg, sp
add sp, \base_reg, \size_reg
.endm
/**
* Restore kernel SP from a given kernel context
*
* \context_reg register that contains the base of the kernel context
* \buf_reg_* registers that can be used as local buffers
*/
.macro _restore_kernel_sp context_reg, buf_reg_0, buf_reg_1
/* get base of the kernel-stacks area and the kernel-stack size */
add sp, \context_reg, #R12_OFFSET
ldm sp, {\buf_reg_0, \buf_reg_1}
/* calculate and apply kernel SP */
_init_kernel_sp \buf_reg_1, \buf_reg_0
.endm
/***************************************************
** Constant values that are pretty commonly used **
***************************************************/
/* alignment constraints */
.set MIN_PAGE_SIZE_LOG2, 12
.set DATA_ACCESS_ALIGNM_LOG2, 2
/***************************************************
** Constant values that the mode transition uses **
***************************************************/
/* kernel names of exceptions that can interrupt a user */
.set RST_TYPE, 1
.set UND_TYPE, 2
.set SVC_TYPE, 3
.set PAB_TYPE, 4
.set DAB_TYPE, 5
.set IRQ_TYPE, 6
.set FIQ_TYPE, 7
.set RST_PC_ADJUST, 0
.set UND_PC_ADJUST, 4
.set SVC_PC_ADJUST, 0
.set PAB_PC_ADJUST, 4
.set DAB_PC_ADJUST, 8
.set IRQ_PC_ADJUST, 4
.set FIQ_PC_ADJUST, 4
/* offsets of the member variables in a CPU context */
.set R12_OFFSET, 12 * 4
.set SP_OFFSET, 13 * 4
.set LR_OFFSET, 14 * 4
.set PC_OFFSET, 15 * 4
.set PSR_OFFSET, 16 * 4
.set EXCEPTION_TYPE_OFFSET, 17 * 4
.set TRANSIT_TTBR0_OFFSET, 17 * 4
.set CIDR_OFFSET, 18 * 4
.set TTBR0_OFFSET, 19 * 4
.set TTBCR_OFFSET, 20 * 4
.set MAIR0_OFFSET, 21 * 4
/* size of local variables */
.set CONTEXT_PTR_SIZE, 1 * 4
/*********************************************************
** Local data structures that the mode transition uses **
*********************************************************/
.macro _mt_local_variables
/* space for a copy of the kernel context */
.p2align 2
.global _mt_master_context_begin
_mt_master_context_begin:
.space 32 * 4
.global _mt_master_context_end
_mt_master_context_end:
/* space for a client context-pointer per CPU */
.p2align 2
.global _mt_client_context_ptr
_mt_client_context_ptr:
.rept NR_OF_CPUS
.space CONTEXT_PTR_SIZE
.endr
/* a globally mapped buffer per CPU */
.p2align 2
.global _mt_buffer
_mt_buffer:
.rept NR_OF_CPUS
.space BUFFER_SIZE
.endr
.endm /* _mt_local_variables */

View File

@ -1,24 +0,0 @@
/*
* \brief Macros that are used by multiple assembly files
* \author Martin Stein
* \date 2014-01-13
*/
/*
* Copyright (C) 2014-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* core includes */
.include "spec/arm/macros_support.s"
/**
* Load kernel name of the executing CPU into register 'r'
*/
.macro _get_cpu_id r
/* no multiprocessing supported for ARMv6 */
mov \r, #0
.endm

View File

@ -1,236 +0,0 @@
/*
* \brief Transition between kernel and userland
* \author Martin stein
* \date 2011-11-15
*/
/*
* Copyright (C) 2011-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* core includes */
.include "macros.s"
/***************
** Constants **
***************/
/* size of local variables */
.set BUFFER_SIZE, 1 * 4
/************
** Macros **
************/
/**
* Invalidate all entries of the branch prediction cache
*
* FIXME branch prediction shall not be activated for now because we have no
* support for instruction barriers. The manual says that one should
* implement this via 'swi 0xf00000', but when we do this in SVC mode it
* pollutes our SP and this is not acceptable with the current mode
* transition implementation
*/
.macro _flush_branch_predictor
mcr p15, 0, sp, c7, c5, 6
/*swi 0xf00000 */
.endm
/**
* Switch from an interrupted user context to a kernel context
*
* \param exception_type immediate exception type ID
* \param pc_adjust immediate value that gets subtracted from the
* user PC before it gets saved
*/
.macro _user_to_kernel_pic exception_type, pc_adjust
/*
* We expect that privileged modes are never interrupted by an
* exception. Thus we can assume that we always come from
* user mode at this point.
*/
/************************************************
** We're still in the user protection domain, **
** so we must avoid access to kernel memory **
************************************************/
/* load kernel cidr */
adr sp, _mt_master_context_begin
ldr sp, [sp, #CIDR_OFFSET]
mcr p15, 0, sp, c13, c0, 1
_flush_branch_predictor
/* load kernel ttbr0 */
adr sp, _mt_master_context_begin
ldr sp, [sp, #TTBR0_OFFSET]
mcr p15, 0, sp, c2, c0, 0
_flush_branch_predictor
/*******************************************
** Now it's save to access kernel memory **
*******************************************/
/* get user context pointer */
ldr sp, _mt_client_context_ptr
/*
* Save user r0 ... r12. We explicitely target user registers
* via '^' because we might be in FIQ exception-mode where
* some of them are banked. Doesn't affect other modes.
*/
stmia sp, {r0-r12}^
/* save user lr and sp */
add r0, sp, #SP_OFFSET
stmia r0, {sp,lr}^
/* adjust and save user pc */
.if \pc_adjust != 0
sub lr, lr, #\pc_adjust
.endif
str lr, [sp, #PC_OFFSET]
/* save user psr */
mrs r0, spsr
str r0, [sp, #PSR_OFFSET]
/* save type of exception that interrupted the user */
mov r0, #\exception_type
str r0, [sp, #EXCEPTION_TYPE_OFFSET]
/*
* Switch to supervisor mode
*
* FIXME This is done due to incorrect behavior when running the kernel
* high-level-code in FIQ-exception mode. Please debug this behavior
* and remove this switch.
*/
cps #19
/* apply kernel sp */
adr r0, _mt_master_context_begin
_restore_kernel_sp r0, r1, r2
/* load kernel context */
add r0, r0, #LR_OFFSET
ldm r0, {lr, pc}
.endm
/**********************************
** Linked into the text section **
**********************************/
.section .text
/*
* Page aligned base of mode transition code.
*
* This position independent code switches between a kernel context and a
* user context and thereby between their address spaces. Due to the latter
* it must be mapped executable to the same region in every address space.
* To enable such switching, the kernel context must be stored within this
* region, thus one should map it solely accessable for privileged modes.
*/
.p2align MIN_PAGE_SIZE_LOG2
.global _mt_begin
_mt_begin:
/*
* On user exceptions the CPU has to jump to one of the following
* seven entry vectors to switch to a kernel context.
*/
.global _mt_kernel_entry_pic
_mt_kernel_entry_pic:
b _rst_entry /* 0x00: reset */
b _und_entry /* 0x04: undefined instruction */
b _swi_entry /* 0x08: software interrupt */
b _pab_entry /* 0x0c: prefetch abort */
b _dab_entry /* 0x10: data abort */
nop /* 0x14: reserved */
b _irq_entry /* 0x18: interrupt request */
b _fiq_entry /* 0x1c: fast interrupt request */
/* PICs that switch from a user exception to the kernel */
_rst_entry: _user_to_kernel_pic RST_TYPE, RST_PC_ADJUST
_und_entry: _user_to_kernel_pic UND_TYPE, UND_PC_ADJUST
_swi_entry:
/*
* FIXME fast SWI routines pollute the SVC SP but we have
* to call them especially in SVC mode
*/
/* check if SWI requests a fast service routine */
/*ldr sp, [r14, #-0x4]*/
/*and sp, sp, #0xffffff*/
/* fast "instruction barrier" service routine */
/*cmp sp, #0xf00000*/
/*bne _slow_swi_entry*/
/*movs pc, r14*/
/* slow high level service routine */
_slow_swi_entry: _user_to_kernel_pic SVC_TYPE, SVC_PC_ADJUST
_pab_entry: _user_to_kernel_pic PAB_TYPE, PAB_PC_ADJUST
_dab_entry: _user_to_kernel_pic DAB_TYPE, DAB_PC_ADJUST
_irq_entry: _user_to_kernel_pic IRQ_TYPE, IRQ_PC_ADJUST
_fiq_entry: _user_to_kernel_pic FIQ_TYPE, FIQ_PC_ADJUST
/* kernel must jump to this point to switch to a user context */
.p2align 2
.global _mt_user_entry_pic
_mt_user_entry_pic:
/* get user context pointer */
ldr lr, _mt_client_context_ptr
/* buffer user pc */
ldr r0, [lr, #PC_OFFSET]
adr r1, _mt_buffer
str r0, [r1]
/* buffer user psr */
ldr r0, [lr, #PSR_OFFSET]
msr spsr_cxsf, r0
/* load user r0 ... r12 */
ldm lr, {r0-r12}
/* load user sp and lr */
add sp, lr, #SP_OFFSET
ldm sp, {sp,lr}^
/* get user cidr and ttbr0 */
ldr sp, [lr, #CIDR_OFFSET]
ldr lr, [lr, #TTBR0_OFFSET]
/********************************************************
** From now on, until we leave kernel mode, we must **
** avoid access to memory that is not mapped globally **
********************************************************/
/* apply user contextidr and section table */
mcr p15, 0, sp, c13, c0, 1
mcr p15, 0, lr, c2, c0, 0
_flush_branch_predictor
/* load user pc (implies application of the user psr) */
adr lr, _mt_buffer
ldm lr, {pc}^
_mt_local_variables
.p2align 2
.global _mt_end
_mt_end:

View File

@ -1,86 +0,0 @@
/*
* \brief Macros that are used by multiple assembly files
* \author Martin Stein
* \date 2014-01-13
*/
/*
* Copyright (C) 2014-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* core includes */
.include "spec/arm/macros_support.s"
/**
* Load kernel name of the executing CPU into register 'r'
*/
.macro _get_cpu_id r
/* read the multiprocessor affinity register */
mrc p15, 0, \r, c0, c0, 5
/* get the affinity-0 bitfield from the read register value */
and \r, \r, #0xff
.endm
/**
* Determine the base of the client context of the executing CPU
*
* \param target_reg register that shall receive the base pointer
* \param buf_reg register that can be polluted by the macro
* \param client_context_ptr label of the client context pointer base
*/
.macro _get_client_context_ptr target_reg, buf_reg, client_context_ptr
/* get kernel name of CPU */
_get_cpu_id \buf_reg
/* multiply CPU name with pointer size to get offset of pointer */
mov \target_reg, #CONTEXT_PTR_SIZE
mul \buf_reg, \buf_reg, \target_reg
/* get base of the pointer array */
adr \target_reg, \client_context_ptr
/* add offset and base to get CPU-local pointer */
add \target_reg, \target_reg, \buf_reg
ldr \target_reg, [\target_reg]
.endm
/**
* Save sp, lr and spsr register banks of specified exception mode
*/
.macro _save_bank mode
cps #\mode /* switch to given mode */
mrs r1, spsr /* store mode-specific spsr */
stmia r0!, {r1,sp,lr} /* store mode-specific sp and lr */
.endm /* _save_bank mode */
/**
* Restore sp, lr and spsr register banks of specified exception mode
*/
.macro _restore_bank mode
cps #\mode /* switch to given mode */
ldmia r0!, {r1,sp,lr} /* load mode-specific sp, lr, and spsr into r1 */
msr spsr_cxfs, r1 /* load mode-specific spsr */
.endm
/***************
** Constants **
***************/
/* hardware names of CPU modes */
.set USR_MODE, 16
.set FIQ_MODE, 17
.set IRQ_MODE, 18
.set SVC_MODE, 19
.set ABT_MODE, 23
.set UND_MODE, 27

View File

@ -1,282 +0,0 @@
/*
* \brief Transition between kernel/userland, and secure/non-secure world
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2011-11-15
*/
/*
* Copyright (C) 2011-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* core includes */
.include "macros.s"
/* size of local variables */
.set BUFFER_SIZE, 3 * 4
/************
** Macros **
************/
/**
* Determine the base of the globally mapped buffer of the executing CPU
*
* \param target_reg register that shall receive the base pointer
* \param buf_reg register that can be polluted by the macro
*/
.macro _get_buffer_ptr target_reg, buf_reg
/* get kernel name of CPU */
_get_cpu_id \buf_reg
/* multiply CPU name with buffer size to get offset of buffer */
mov \target_reg, #BUFFER_SIZE
mul \buf_reg, \buf_reg, \target_reg
/* get base of the buffer array */
adr \target_reg, _mt_buffer
/* add offset and base to get CPU-local buffer */
add \target_reg, \target_reg, \buf_reg
.endm
/**
* Save an interrupted user context and switch to the kernel context
*
* \param exception_type kernel name of exception type
* \param pc_adjust value that gets subtracted from saved user PC
*/
.macro _user_to_kernel_pic exception_type, pc_adjust
/* disable fast interrupts when not in fast-interrupt mode */
.if \exception_type != FIQ_TYPE
cpsid f
.endif
/*
* The sp in svc mode still contains the base of the globally mapped buffer
* of this CPU. Hence go to svc mode, buffer user r0-r2, and make
* buffer pointer available to all modes
*/
.if \exception_type != RST_TYPE && \exception_type != SVC_TYPE
cps #SVC_MODE
.endif
stm sp, {r0-r2}^
mov r0, sp
/* switch back to previous privileged mode */
.if \exception_type == UND_TYPE
cps #UND_MODE
.endif
.if \exception_type == PAB_TYPE
cps #ABT_MODE
.endif
.if \exception_type == DAB_TYPE
cps #ABT_MODE
.endif
.if \exception_type == IRQ_TYPE
cps #IRQ_MODE
.endif
.if \exception_type == FIQ_TYPE
cps #FIQ_MODE
.endif
/* switch to kernel protection-domain */
adr sp, _mt_master_context_begin
add sp, #TRANSIT_TTBR0_OFFSET
ldm sp, {r1, r2, sp}
_switch_protection_domain r1, r2, sp
/* get user context-pointer */
_get_client_context_ptr sp, r1, _mt_client_context_ptr
/* adjust and save user pc */
.if \pc_adjust != 0
sub lr, lr, #\pc_adjust
.endif
str lr, [sp, #PC_OFFSET]
/* restore user r0-r2 from buffer and save user r0-r12 */
mov lr, r0
ldm lr, {r0-r2}
stm sp, {r0-r12}^
/* save user sp and user lr */
add r0, sp, #SP_OFFSET
stm r0, {sp, lr}^
/* get user psr and type of exception that interrupted the user */
mrs r0, spsr
mov r1, #\exception_type
b _common_user_to_kernel_pic
.endm /* _user_to_kernel_pic */
/**********************************
** Linked into the text section **
**********************************/
.section .text
/*
* Page aligned base of mode transition code.
*
* This position independent code switches between a kernel context and a
* user context and thereby between their address spaces. Due to the latter
* it must be mapped executable to the same region in every address space.
* To enable such switching, the kernel context must be stored within this
* region, thus one should map it solely accessable for privileged modes.
*/
.p2align MIN_PAGE_SIZE_LOG2
.global _mt_begin
_mt_begin:
/*
* On user exceptions the CPU has to jump to one of the following
* seven entry vectors to switch to a kernel context.
*/
.global _mt_kernel_entry_pic
_mt_kernel_entry_pic:
/***********************
** Exception entries **
***********************/
b _rst_entry /* 0x00: reset */
b _und_entry /* 0x04: undefined instruction */
b _svc_entry /* 0x08: supervisor call */
b _pab_entry /* 0x0c: prefetch abort */
b _dab_entry /* 0x10: data abort */
nop /* 0x14: reserved */
b _irq_entry /* 0x18: interrupt request */
/******************************************************
** Entry for fast interrupt requests at offset 0x1c **
******************************************************/
/* load the saved PSR of the the previous mode */
mrs r8, spsr
/* get the M bitfield from the read PSR value */
and r9, r8, #0b11111
/* skip following instructions if previous mode was user mode */
cmp r9, #USR_MODE
beq 1f
/*
* If we reach this point, the previous mode was not the user
* mode, meaning an exception entry has been preempted by this
* fast interrupt before it could disable fast interrupts.
*/
/* disable fast interrupts in PSR value of previous mode */
orr r8, #0b1000000
/* apply PSR of previous mode */
msr spsr_cxsf, r8
/*
* Resume excecution of previous exception entry leaving the fast
* interrupt unhandled till fast interrupts get enabled again.
*/
subs pc, lr, #4
/* switch to kernel to handle the fast interrupt */
1:
_user_to_kernel_pic FIQ_TYPE, FIQ_PC_ADJUST
/***************************************************************
** Code that switches from a non-FIQ exception to the kernel **
***************************************************************/
_rst_entry: _user_to_kernel_pic RST_TYPE, RST_PC_ADJUST
_und_entry: _user_to_kernel_pic UND_TYPE, UND_PC_ADJUST
_svc_entry: _user_to_kernel_pic SVC_TYPE, SVC_PC_ADJUST
_pab_entry: _user_to_kernel_pic PAB_TYPE, PAB_PC_ADJUST
_dab_entry: _user_to_kernel_pic DAB_TYPE, DAB_PC_ADJUST
_irq_entry: _user_to_kernel_pic IRQ_TYPE, IRQ_PC_ADJUST
/**************************************************************
** Kernel-entry code that is common for all user exceptions **
**************************************************************/
_common_user_to_kernel_pic:
/* save user psr and type of exception that interrupted the user */
add sp, sp, #PSR_OFFSET
stm sp, {r0, r1}
/*
* Clear exclusive access in local monitor,
* as we use strex/ldrex in our cmpxchg method, we've to do this during
* a context switch (ARM Reference Manual paragraph 3.4.4.)
*/
clrex
/*
* Switch to supervisor mode to circumvent incorrect behavior of
* kernel high-level code in fast interrupt mode and to ensure that
* we're in svc mode at kernel exit. The latter is because kernel
* exit stores a buffer pointer into its banked sp that is also
* needed by the subsequent kernel entry.
*/
cps #SVC_MODE
/* apply kernel sp */
adr r0, _mt_master_context_begin
_restore_kernel_sp r0, r1, r2
/* apply kernel lr and kernel pc */
add r1, r0, #LR_OFFSET
ldm r1, {lr, pc}
_mt_local_variables
/****************************************************************
** Code that switches from a kernel context to a user context **
****************************************************************/
.p2align 2
.global _mt_user_entry_pic
_mt_user_entry_pic:
/* get user context and globally mapped buffer of this CPU */
_get_client_context_ptr lr, r0, _mt_client_context_ptr
_get_buffer_ptr sp, r0
/* load user psr in spsr */
ldr r0, [lr, #PSR_OFFSET]
msr spsr_cxsf, r0
/* apply banked user sp, banked user lr, and user r0-r12 */
add r0, lr, #SP_OFFSET
ldm r0, {sp, lr}^
ldm lr, {r0-r12}^
/* buffer user r0-r1, and user pc */
stm sp, {r0, r1}
ldr r0, [lr, #PC_OFFSET]
str r0, [sp, #2*4]
/* switch to user protection-domain */
adr r0, _mt_master_context_begin
ldr r0, [r0, #TRANSIT_TTBR0_OFFSET]
add lr, lr, #CIDR_OFFSET
ldm lr, {r1, lr}
_switch_protection_domain r0, r1, lr
/* apply user r0-r1 and user pc which implies application of spsr */
ldm sp, {r0, r1, pc}^
/* end of the mode transition code */
.global _mt_end
_mt_end:

View File

@ -0,0 +1,120 @@
/*
* \brief TrustZone monitor mode exception vector
* \author Stefan Kalkowski
* \date 2015-02-16
*/
/*
* Copyright (C) 2015-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/*********************
** Constant values **
*********************/
.set RST_TYPE, 1
.set UND_TYPE, 2
.set SVC_TYPE, 3
.set PAB_TYPE, 4
.set DAB_TYPE, 5
.set IRQ_TYPE, 6
.set FIQ_TYPE, 7
.set LR_OFFSET, 14 * 4
/**
* Switch from normal into secure world
*
* \param exception_type immediate exception type ID
* \param pc_adjust immediate value that gets subtracted from the
* vm's PC before it gets saved
*/
.macro _nonsecure_to_secure exception_type, pc_adjust
stmia sp, {r0-lr}^ /* save user regs r0-r12,sp,lr */
add r0, sp, #15*4
.if \pc_adjust != 0 /* adjust pc if necessary */
sub lr, lr, #\pc_adjust
.endif
stmia r0!, {lr} /* save pc */
mrs r1, spsr /* spsr to r0 */
mov r2, #\exception_type /* exception reason to r1 */
b _nonsecure_kernel_entry
.endm /* _non_to_secure */
.section .text
.p2align 12
.global monitor_mode_exception_vector
monitor_mode_exception_vector:
b _mon_rst_entry /* reset */
b _mon_und_entry /* undefined instruction */
b _mon_svc_entry /* supervisor call */
b _mon_pab_entry /* prefetch abort */
b _mon_dab_entry /* data abort */
nop /* reserved */
b _mon_irq_entry /* interrupt request */
_nonsecure_to_secure FIQ_TYPE, 4 /* fast interrupt request */
_mon_rst_entry: _nonsecure_to_secure RST_TYPE, 0
_mon_und_entry: _nonsecure_to_secure UND_TYPE, 4
_mon_svc_entry: _nonsecure_to_secure SVC_TYPE, 0
_mon_pab_entry: _nonsecure_to_secure PAB_TYPE, 4
_mon_dab_entry: _nonsecure_to_secure DAB_TYPE, 8
_mon_irq_entry: _nonsecure_to_secure IRQ_TYPE, 4
_nonsecure_kernel_entry:
ldr lr, [sp, #17*4] /* load kernel sp from vm context */
stmia r0!, {r1-r2} /* save spsr, and exception reason */
mrc p15, 0, r3, c6, c0, 0 /* move DFAR to r3 */
mrc p15, 0, r4, c2, c0, 0 /* move TTBR0 to r4 */
mrc p15, 0, r5, c2, c0, 1 /* move TTBR1 to r5 */
mrc p15, 0, r6, c2, c0, 2 /* move TTBRC to r6 */
mov r1, #0
mcr p15, 0, r1, c1, c1, 0 /* disable non-secure bit */
.irp mode,27,19,23,18,17 /* save mode specific registers */
cps #\mode /* switch to given mode */
mrs r1, spsr /* store mode-specific spsr */
stmia r0!, {r1,sp,lr} /* store mode-specific sp and lr */
.endr
stmia r0!, {r8-r12} /* save fiq r8-r12 */
stmia r0!, {r3-r6} /* save MMU registers */
cps #22 /* switch back to monitor mode */
mov r0, #0b111010011 /* spsr to SVC mode, irqs masked */
msr spsr_cxsf, r0
mov r1, lr
cps #19
mov sp, r1
cps #22
adr lr, _kernel_entry
ldr lr, [lr]
subs pc, lr, #0 /* jump back into kernel */
_kernel_entry: .long kernel
/* jump to this point to switch to TrustZone's normal world */
.global monitor_mode_enter_normal_world
monitor_mode_enter_normal_world:
cps #22 /* switch to monitor mode */
mov sp, r0 /* store vm context pointer */
mov lr, r1 /* store kernel sp temporarily */
add r0, r0, #18*4 /* add offset of banked modes */
.irp mode,27,19,23,18,17 /* save mode specific registers */
cps #\mode /* switch to given mode */
ldmia r0!, {r2,sp,lr} /* load mode's sp, lr, and spsr */
msr spsr_cxfs, r2 /* load mode's spsr */
.endr
ldmia r0!, {r8 - r12} /* load fiq r8-r12 */
cps #22 /* switch to monitor mode */
ldmia sp, {r0-lr}^ /* load user r0-r12,sp,lr */
str lr, [sp, #17*4] /* store kernel sp in vm context */
ldr lr, [sp, #16*4] /* load vm's cpsr to lr */
msr spsr_cxfs, lr /* save cpsr to be load */
mov lr, #13
mcr p15, 0, lr, c1, c1, 0 /* enable EA, FIQ, NS bit in SCTRL */
ldr lr, [sp, #15*4] /* load vm's ip */
subs pc, lr, #0

View File

@ -15,11 +15,6 @@
/* core includes */
#include <kernel/vm.h>
extern void * _mt_nonsecure_entry_pic;
extern Genode::addr_t _tz_client_context;
extern Genode::addr_t _mt_master_context_begin;
extern Genode::addr_t _tz_master_context;
using namespace Kernel;
@ -28,13 +23,8 @@ Kernel::Vm::Vm(void * const state,
void * const table)
: Cpu_job(Cpu_priority::MIN, 0),
_state((Genode::Vm_state * const)state),
_context(context), _table(0)
{
affinity(cpu_pool()->primary_cpu());
Genode::memcpy(&_tz_master_context, &_mt_master_context_begin,
sizeof(Cpu_context));
}
_context(context), _table(0) {
affinity(cpu_pool()->primary_cpu()); }
Kernel::Vm::~Vm() {}
@ -58,6 +48,11 @@ void Vm::exception(unsigned const cpu)
bool secure_irq(unsigned const i);
extern "C" void monitor_mode_enter_normal_world(Cpu::Context*, void*);
extern void * kernel_stack;
void Vm::proceed(unsigned const cpu)
{
unsigned const irq = _state->irq_injection;
@ -69,7 +64,7 @@ void Vm::proceed(unsigned const cpu)
_state->irq_injection = 0;
}
}
mtc()->switch_to(reinterpret_cast<Cpu::Context*>(_state), cpu,
(addr_t)&_mt_nonsecure_entry_pic,
(addr_t)&_tz_client_context);
void * stack = (void*)((addr_t)&kernel_stack + Cpu::KERNEL_STACK_SIZE * (cpu+1));
monitor_mode_enter_normal_world(reinterpret_cast<Cpu::Context*>(_state), stack);
}

View File

@ -1,124 +0,0 @@
/*
* \brief Transition between secure/normal worl
* \author Stefan Kalkowski
* \date 2015-02-16
*/
/*
* Copyright (C) 2015-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* core includes */
.include "macros.s"
/**
* Switch from nonsecure into secure world
*
* \param exception_type immediate exception type ID
* \param pc_adjust immediate value that gets subtracted from the
* vm's PC before it gets saved
*/
.macro _nonsecure_to_secure exception_type, pc_adjust
ldr sp, _tz_client_context /* load context pointer */
stmia sp, {r0-lr}^ /* save user regs r0-r12,sp,lr */
add r0, sp, #15*4
.if \pc_adjust != 0 /* adjust pc if necessary */
sub lr, lr, #\pc_adjust
.endif
stmia r0!, {lr} /* save pc */
mrs r1, spsr /* spsr to r0 */
mov r2, #\exception_type /* exception reason to r1 */
b _nonsecure_kernel_entry
.endm /* _non_to_secure */
/**
* Switch from secure into nonsecure world
*/
.macro _secure_to_nonsecure
ldr r0, _tz_client_context /* get vm context pointer */
add r0, r0, #18*4 /* add offset of banked modes */
_restore_bank 27 /* load undefined banks */
_restore_bank 19 /* load supervisor banks */
_restore_bank 23 /* load abort banks */
_restore_bank 18 /* load irq banks */
_restore_bank 17 /* load fiq banks */
ldmia r0!, {r8 - r12} /* load fiq r8-r12 */
cps #22 /* switch to monitor mode */
ldr sp, _tz_client_context /* get vm context pointer */
ldmia sp, {r0-lr}^ /* load user r0-r12,sp,lr */
ldr lr, [sp, #16*4] /* load vm's cpsr to lr */
msr spsr_cxfs, lr /* save cpsr to be load when switching */
mov lr, #13
mcr p15, 0, lr, c1, c1, 0 /* enable EA, FIQ, and NS bit in SCTRL */
ldr lr, [sp, #15*4] /* load vm's ip */
subs pc, lr, #0
.endm /* _secure_to_nonsecure */
.section .text
/*
* On TrustZone exceptions the CPU has to jump to one of the following
* 7 entry vectors to switch to a kernel context.
*/
.p2align MIN_PAGE_SIZE_LOG2
.global _mon_kernel_entry
_mon_kernel_entry:
b _mon_rst_entry /* reset */
b _mon_und_entry /* undefined instruction */
b _mon_svc_entry /* supervisor call */
b _mon_pab_entry /* prefetch abort */
b _mon_dab_entry /* data abort */
nop /* reserved */
b _mon_irq_entry /* interrupt request */
_nonsecure_to_secure FIQ_TYPE, 4 /* fast interrupt request */
/* PICs that switch from a vm exception to the kernel */
_mon_rst_entry: _nonsecure_to_secure RST_TYPE, 0
_mon_und_entry: _nonsecure_to_secure UND_TYPE, 4
_mon_svc_entry: _nonsecure_to_secure SVC_TYPE, 0
_mon_pab_entry: _nonsecure_to_secure PAB_TYPE, 4
_mon_dab_entry: _nonsecure_to_secure DAB_TYPE, 8
_mon_irq_entry: _nonsecure_to_secure IRQ_TYPE, 4
/* space for a copy of the kernel context */
.p2align 2
.global _tz_master_context
_tz_master_context:
.space 32 * 4
/* space for a client context-pointer */
.p2align 2
.global _tz_client_context
_tz_client_context:
.space CONTEXT_PTR_SIZE
_nonsecure_kernel_entry:
stmia r0!, {r1-r2} /* save spsr, and exception reason */
mrc p15, 0, r3, c6, c0, 0 /* move DFAR to r3 */
mrc p15, 0, r4, c2, c0, 0 /* move TTBR0 to r4 */
mrc p15, 0, r5, c2, c0, 1 /* move TTBR1 to r5 */
mrc p15, 0, r6, c2, c0, 2 /* move TTBRC to r6 */
mov r1, #0
mcr p15, 0, r1, c1, c1, 0 /* disable non-secure bit */
_save_bank 27 /* save undefined banks */
_save_bank 19 /* save supervisor banks */
_save_bank 23 /* save abort banks */
_save_bank 18 /* save irq banks */
_save_bank 17 /* save fiq banks */
stmia r0!, {r8-r12} /* save fiq r8-r12 */
stmia r0!, {r3-r6} /* save MMU registers */
cps #SVC_MODE
adr r0, _tz_master_context
_restore_kernel_sp r0, r1, r2 /* apply kernel sp */
add r1, r0, #LR_OFFSET
ldm r1, {lr, pc}
/* kernel must jump to this point to switch to a vm */
.global _mt_nonsecure_entry_pic
_mt_nonsecure_entry_pic:
_secure_to_nonsecure

View File

@ -11,8 +11,25 @@
* under the terms of the GNU Affero General Public License version 3.
*/
/* core includes */
.include "macros.s"
.set USR_MODE, 16
.set FIQ_MODE, 17
.set IRQ_MODE, 18
.set SVC_MODE, 19
.set ABT_MODE, 23
.set UND_MODE, 27
.set SYS_MODE, 31
.macro _save_bank mode
cps #\mode /* switch to given mode */
mrs r1, spsr /* store mode-specific spsr */
stmia r0!, {r1,sp,lr} /* store mode-specific sp and lr */
.endm /* _save_bank mode */
.macro _restore_bank mode
cps #\mode /* switch to given mode */
ldmia r0!, {r1,sp,lr} /* load mode-specific sp, lr, and spsr into r1 */
msr spsr_cxfs, r1 /* load mode-specific spsr */
.endm
.macro _vm_exit exception_type
str r0, [sp]
@ -33,8 +50,8 @@
* 7 entry vectors to switch to a kernel context.
*/
.p2align 12
.global _vt_host_entry
_vt_host_entry:
.global hypervisor_exception_vector
hypervisor_exception_vector:
b _vt_rst_entry
b _vt_und_entry /* undefined instruction */
b _vt_svc_entry /* hypervisor call */
@ -42,7 +59,7 @@ _vt_host_entry:
b _vt_dab_entry /* data abort */
b _vt_trp_entry /* hypervisor trap */
b _vt_irq_entry /* interrupt request */
_vm_exit 7 /* fast interrupt request */
_vm_exit 7 /* fast interrupt request */
_vt_rst_entry: _vm_exit 1
_vt_und_entry: _vm_exit 2
@ -52,20 +69,6 @@ _vt_dab_entry: _vm_exit 5
_vt_irq_entry: _vm_exit 6
_vt_trp_entry: _vm_exit 8
/* space for a copy of the host context */
.p2align 2
.global _vt_host_context_ptr
_vt_host_context_ptr:
.space CONTEXT_PTR_SIZE
/* space for a vm context-pointer per CPU */
.p2align 2
.global _vt_vm_context_ptr
_vt_vm_context_ptr:
.rept NR_OF_CPUS
.space CONTEXT_PTR_SIZE
.endr
_host_to_vm:
msr elr_hyp, r2
msr spsr_cxfs, r3 /* load cpsr */
@ -130,14 +133,15 @@ _vm_to_host:
stm r0, {r3-r12}
add r0, sp, #13*4
ldr r3, _vt_host_context_ptr
_restore_kernel_sp r3, r4, r5
add r3, r3, #CIDR_OFFSET
ldmia r3, {r4-r9}
_switch_protection_domain r0, r4, r5
mcr p15, 0, r6, c1, c0, 0 /* write SCTRL */
mcr p15, 0, r7, c2, c0, 2 /* write TTBRC */
mcr p15, 0, r8, c10, c2, 0 /* write MAIR0 */
mcr p15, 0, r9, c3, c0, 0 /* write DACR */
ldr sp, [r3]
add r3, r3, #2*4
ldm r3, {r4-r11}
mcrr p15, 0, r6, r7, c2
mcrr p15, 1, r6, r7, c2
mcr p15, 0, r8, c1, c0, 0 /* write SCTRL */
mcr p15, 0, r9, c2, c0, 2 /* write TTBRC */
mcr p15, 0, r10, c10, c2, 0 /* write MAIR0 */
mcr p15, 0, r11, c3, c0, 0 /* write DACR */
cps #SVC_MODE
stmia r0, {r13-r14}^ /* save user regs sp,lr */
add r0, r0, #2*4
@ -151,15 +155,12 @@ _vm_to_host:
stmia r0!, {r8-r12} /* save fiq r8-r12 */
cps #SVC_MODE
ldr r0, _vt_host_context_ptr
_restore_kernel_sp r0, r1, r2 /* apply host kernel sp */
add r1, r0, #LR_OFFSET /* apply host kernel lr */
ldm r1, {lr, pc}
ldm r0, {sp,pc}
/* host kernel must jump to this point to switch to a vm */
.global _vt_vm_entry
_vt_vm_entry:
_get_client_context_ptr r0, lr, _vt_vm_context_ptr
add r0, r0, #SP_OFFSET
.global hypervisor_enter_vm
hypervisor_enter_vm:
add r0, r0, #13*4
ldm r0, {r13 - r14}^
add r0, r0, #2*4
ldmia r0!, {r2 - r4}
@ -172,3 +173,5 @@ _vt_vm_entry:
cps #SVC_MODE
ldm r0!, {r5 - r12}
hvc #0
_vt_host_context_ptr: .long vt_host_context

View File

@ -37,9 +37,20 @@ namespace Kernel
using namespace Kernel;
extern void * _vt_vm_entry;
extern void * _vt_host_entry;
extern Genode::addr_t _vt_vm_context_ptr;
extern "C" void kernel();
extern void * kernel_stack;
extern "C" void hypervisor_enter_vm(Cpu::Context*);
struct Host_context {
addr_t sp;
addr_t ip;
Cpu::Ttbr_64bit::access_t ttbr0;
Cpu::Ttbr_64bit::access_t ttbr1;
Cpu::Sctlr::access_t sctlr;
Cpu::Ttbcr::access_t ttbcr;
Cpu::Mair0::access_t mair0;
Cpu::Dacr::access_t dacr;
} vt_host_context;
struct Kernel::Vm_irq : Kernel::Irq
@ -200,6 +211,15 @@ Kernel::Vm::Vm(void * const state,
{
affinity(cpu_pool()->primary_cpu());
Virtual_pic::pic().irq.enable();
vt_host_context.sp = (addr_t)&kernel_stack + Cpu::KERNEL_STACK_SIZE;
vt_host_context.ttbr0 = Cpu::Ttbr0_64bit::read();
vt_host_context.ttbr1 = Cpu::Ttbr1_64bit::read();
vt_host_context.sctlr = Cpu::Sctlr::read();
vt_host_context.ttbcr = Cpu::Ttbcr::read();
vt_host_context.mair0 = Cpu::Mair0::read();
vt_host_context.dacr = Cpu::Dacr::read();
vt_host_context.ip = (addr_t) &kernel;
}
@ -244,8 +264,7 @@ void Kernel::Vm::proceed(unsigned const cpu_id)
Virtual_pic::load(_state);
Virtual_timer::load(_state, cpu_id);
mtc()->switch_to(reinterpret_cast<Cpu::Context*>(_state), cpu_id,
(addr_t) &_vt_vm_entry, (addr_t)&_vt_vm_context_ptr);
hypervisor_enter_vm(reinterpret_cast<Cpu::Context*>(_state));
}

View File

@ -23,9 +23,7 @@
#include <vm_root.h>
#include <platform.h>
extern Genode::addr_t _vt_host_context_ptr;
extern Genode::addr_t _vt_host_entry;
extern Genode::addr_t _mt_master_context_begin;
extern Genode::addr_t hypervisor_exception_vector;
/*
* Add ARM virtualization specific vm service
@ -36,11 +34,9 @@ void Genode::platform_add_local_services(Rpc_entrypoint *ep,
{
using namespace Genode;
/* initialize host context used in virtualization world switch */
*((void**)&_vt_host_context_ptr) = &_mt_master_context_begin;
map_local(Platform::core_phys_addr((addr_t)&_vt_host_entry),
0xfff00000, 1, Hw::PAGE_FLAGS_KERN_TEXT);
map_local(Platform::core_phys_addr((addr_t)&hypervisor_exception_vector),
Hw::Mm::hypervisor_exception_vector().base, 1,
Hw::PAGE_FLAGS_KERN_TEXT);
static Vm_root vm_root(ep, sh);
static Core_service<Vm_session_component> vm_service(*services, vm_root);

View File

@ -137,7 +137,7 @@ class Genode::Cpu : public Arm_v7_cpu
* Assign translation-table base 'table'
*/
void translation_table(addr_t const table) {
Ttbr_64bit::Ba::set(ttbr0, (Ttbr_64bit::access_t)(table >> 5)); }
Ttbr_64bit::Ba::set(ttbr0, Ttbr_64bit::Ba::get(table)); }
/**
* Assign protection domain
@ -152,30 +152,32 @@ class Genode::Cpu : public Arm_v7_cpu
*
* FIXME: this class largely overlaps with Genode::Arm::User_context
*/
struct User_context : Context
struct User_context
{
Align_at<Context, 8> regs;
void init(bool privileged)
{
Psr::access_t v = 0;
Psr::M::set(v, privileged ? Psr::M::SYS : Psr::M::USR);
Psr::F::set(v, 1);
Psr::A::set(v, 1);
cpsr = v;
regs->cpsr = v;
}
/**
* Support for kernel calls
*/
void user_arg_0(Kernel::Call_arg const arg) { r0 = arg; }
void user_arg_1(Kernel::Call_arg const arg) { r1 = arg; }
void user_arg_2(Kernel::Call_arg const arg) { r2 = arg; }
void user_arg_3(Kernel::Call_arg const arg) { r3 = arg; }
void user_arg_4(Kernel::Call_arg const arg) { r4 = arg; }
Kernel::Call_arg user_arg_0() const { return r0; }
Kernel::Call_arg user_arg_1() const { return r1; }
Kernel::Call_arg user_arg_2() const { return r2; }
Kernel::Call_arg user_arg_3() const { return r3; }
Kernel::Call_arg user_arg_4() const { return r4; }
void user_arg_0(Kernel::Call_arg const arg) { regs->r0 = arg; }
void user_arg_1(Kernel::Call_arg const arg) { regs->r1 = arg; }
void user_arg_2(Kernel::Call_arg const arg) { regs->r2 = arg; }
void user_arg_3(Kernel::Call_arg const arg) { regs->r3 = arg; }
void user_arg_4(Kernel::Call_arg const arg) { regs->r4 = arg; }
Kernel::Call_arg user_arg_0() const { return regs->r0; }
Kernel::Call_arg user_arg_1() const { return regs->r1; }
Kernel::Call_arg user_arg_2() const { return regs->r2; }
Kernel::Call_arg user_arg_3() const { return regs->r3; }
Kernel::Call_arg user_arg_4() const { return regs->r4; }
/**
* Initialize thread context
@ -185,8 +187,8 @@ class Genode::Cpu : public Arm_v7_cpu
*/
void init_thread(addr_t const table, unsigned const pd_id)
{
protection_domain(pd_id);
translation_table(table);
regs->protection_domain(pd_id);
regs->translation_table(table);
}
/**
@ -200,9 +202,9 @@ class Genode::Cpu : public Arm_v7_cpu
/* permission fault on page, 2nd level */
static constexpr Fsr::access_t permission = 0b1111;
switch (cpu_exception) {
switch (regs->cpu_exception) {
case PREFETCH_ABORT:
case Context::PREFETCH_ABORT:
{
/* check if fault was caused by a translation miss */
Fsr::access_t const fs = Fsr::Fs::get(Ifsr::read());
@ -210,11 +212,11 @@ class Genode::Cpu : public Arm_v7_cpu
/* fetch fault data */
w = 0;
va = ip;
va = regs->ip;
return true;
}
case DATA_ABORT:
case Context::DATA_ABORT:
{
/* check if fault was caused by translation miss */
Fsr::access_t const fs = Fsr::Fs::get(Dfsr::read());
@ -257,13 +259,19 @@ class Genode::Cpu : public Arm_v7_cpu
void invalidate_data_cache() {
invalidate_inner_data_cache(); }
void switch_to(User_context& o)
{
if (Ttbr_64bit::Asid::get(o.regs->ttbr0) &&
(Ttbr0_64bit::read() != o.regs->ttbr0))
Ttbr0_64bit::write(o.regs->ttbr0);
}
/*************
** Dummies **
*************/
void switch_to(User_context&) { }
bool retry_undefined_instr(Context&) { return false; }
bool retry_undefined_instr(User_context&) { return false; }
};
#endif /* _CORE__SPEC__CORTEX_A15__CPU_H_ */

View File

@ -1,37 +0,0 @@
/*
* \brief Macros that are used by multiple assembly files
* \author Stefan Kalkowski
* \date 2015-01-30
*/
/*
* Copyright (C) 2015-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* core includes */
.include "spec/arm_v7/macros_support.s"
/**
* Switch to a given protection domain
*
* There is no atomicity problem when setting the ASID and translation table
* base address in the one 64-bit TTBR0 register, like in Armv7 cpus without
* LPAE extensions. Therefore, we don't have to use a transition table.
*
* \param ignored ignored parameter
* \param ttbr0_low low word of TTBR0 64-bit register
* \param ttbr0_high high word of TTBR0 64-bit register
*/
.macro _switch_protection_domain ignored, ttbr0_low, ttbr0_high
/* write translation-table-base register 0 */
mcrr p15, 0, \ttbr0_low, \ttbr0_high, c2
/* instruction and data synchronization barrier */
isb
dsb
.endm

View File

@ -1,31 +0,0 @@
/*
* \brief Macros that are used by multiple assembly files
* \author Stefan Kalkowski
* \date 2015-01-30
*/
/*
* Copyright (C) 2015-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* core includes */
.include "spec/arm_v7/macros_support.s"
/**
* Switch to a given protection domain
*
* \param transit_ttbr0 transitional TTBR0 value, read/write reg
* \param new_cidr new CIDR value, read reg
* \param new_ttbr0 new TTBR0 value, read/write reg
*/
.macro _switch_protection_domain transit_ttbr0, new_cidr, new_ttbr0
mcr p15, 0, \transit_ttbr0, c2, c0, 0
isb
mcr p15, 0, \new_cidr, c13, c0, 1
isb
mcr p15, 0, \new_ttbr0, c2, c0, 0
isb
.endm

View File

@ -64,7 +64,11 @@ class Genode::Cpu : public Arm_v7_cpu
*
* \param context context to switch to
*/
void switch_to(User_context & context) { _fpu.switch_to(context); }
void switch_to(User_context & context)
{
Arm_cpu::switch_to(context);
_fpu.switch_to(context);
}
/**
* Return wether to retry an undefined user instruction after this call

View File

@ -19,17 +19,9 @@
#include <platform_pd.h>
#include <platform.h>
extern int _mt_begin;
extern int _mt_master_context_begin;
void Kernel::Cpu::init(Kernel::Pic &pic)
{
Cpu_context * c = (Cpu_context*)
(Cpu::exception_entry + ((addr_t)&_mt_master_context_begin -
(addr_t)&_mt_begin));
c->cpu_exception = Genode::Cpu::Ttbr0::read();
_fpu.init();
{

View File

@ -1,40 +0,0 @@
/*
* \brief Macros that are used by multiple assembly files
* \author Stefan Kalkowski
* \date 2015-01-30
*/
/*
* Copyright (C) 2015-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
/* core includes */
.include "spec/arm_v7/macros_support.s"
/**
* Switch to a given protection domain
*
* \param transit_ttbr0 transitional TTBR0 value, read/write reg
* \param new_cidr new CIDR value, read reg
* \param new_ttbr0 new TTBR0 value, read/write reg
*/
.macro _switch_protection_domain transit_ttbr0, new_cidr, new_ttbr0
/*
* FIXME: Fixes instability problems that were observed on the
* PandaBoard only. We neither know why invalidating predictions
* at PD switches is a fix nor wether not doing so is the real
* cause of this instability.
*/
mcr p15, 0, r0, c7, c5, 6
mcr p15, 0, \transit_ttbr0, c2, c0, 0
isb
mcr p15, 0, \new_cidr, c13, c0, 1
isb
mcr p15, 0, \new_ttbr0, c2, c0, 0
isb
.endm

View File

@ -22,7 +22,7 @@
#include <vm_root.h>
#include <map_local.h>
extern int _mon_kernel_entry;
extern int monitor_mode_exception_vector;
/*
* Add TrustZone specific vm service
@ -32,8 +32,9 @@ void Genode::platform_add_local_services(Rpc_entrypoint *ep,
Registry<Service> *local_services)
{
static addr_t const phys_base =
Platform::core_phys_addr((addr_t)&_mon_kernel_entry);
map_local(phys_base, 0xfff00000, 1); // FIXME
Platform::core_phys_addr((addr_t)&monitor_mode_exception_vector);
map_local(phys_base, Hw::Mm::system_exception_vector().base, 1,
Hw::PAGE_FLAGS_KERN_TEXT);
static Vm_root vm_root(ep, sliced_heap);
static Core_service<Vm_session_component> vm_service(*local_services, vm_root);
}

View File

@ -19,6 +19,8 @@
#include <cpu/cpu_state.h>
#include <util/register.h>
#include <base/internal/align_at.h>
#include <kernel/interface.h>
#include <hw/spec/riscv/cpu.h>
@ -38,9 +40,6 @@ class Genode::Cpu : public Hw::Riscv_cpu
{
public:
static constexpr addr_t mtc_size = 0x1000;
static constexpr addr_t exception_entry = 0xffffffc000000000;
/**
* Extend basic CPU state by members relevant for 'base-hw' only
*/
@ -77,8 +76,10 @@ class Genode::Cpu : public Hw::Riscv_cpu
/**
* A usermode execution state
*/
struct User_context : Context
struct User_context
{
Align_at<Context, 8> regs;
/**
* Constructor
*/
@ -87,16 +88,16 @@ class Genode::Cpu : public Hw::Riscv_cpu
/**
* Support for kernel calls
*/
void user_arg_0(Kernel::Call_arg const arg) { a0 = arg; }
void user_arg_1(Kernel::Call_arg const arg) { a1 = arg; }
void user_arg_2(Kernel::Call_arg const arg) { a2 = arg; }
void user_arg_3(Kernel::Call_arg const arg) { a3 = arg; }
void user_arg_4(Kernel::Call_arg const arg) { a4 = arg; }
Kernel::Call_arg user_arg_0() const { return a0; }
Kernel::Call_arg user_arg_1() const { return a1; }
Kernel::Call_arg user_arg_2() const { return a2; }
Kernel::Call_arg user_arg_3() const { return a3; }
Kernel::Call_arg user_arg_4() const { return a4; }
void user_arg_0(Kernel::Call_arg const arg) { regs->a0 = arg; }
void user_arg_1(Kernel::Call_arg const arg) { regs->a1 = arg; }
void user_arg_2(Kernel::Call_arg const arg) { regs->a2 = arg; }
void user_arg_3(Kernel::Call_arg const arg) { regs->a3 = arg; }
void user_arg_4(Kernel::Call_arg const arg) { regs->a4 = arg; }
Kernel::Call_arg user_arg_0() const { return regs->a0; }
Kernel::Call_arg user_arg_1() const { return regs->a1; }
Kernel::Call_arg user_arg_2() const { return regs->a2; }
Kernel::Call_arg user_arg_3() const { return regs->a3; }
Kernel::Call_arg user_arg_4() const { return regs->a4; }
/**
* Initialize thread context
@ -106,8 +107,8 @@ class Genode::Cpu : public Hw::Riscv_cpu
*/
void init_thread(addr_t const table, unsigned const pd_id)
{
protection_domain(pd_id);
translation_table(table);
regs->protection_domain(pd_id);
regs->translation_table(table);
}
};
@ -158,10 +159,11 @@ class Genode::Cpu : public Hw::Riscv_cpu
void switch_to(User_context& context)
{
bool user = Sptbr::Asid::get(context.sptbr);
bool user = Sptbr::Asid::get(context.regs->sptbr);
Sstatus::access_t v = Sstatus::read();
Sstatus::Spp::set(v, user ? 0 : 1);
Sstatus::write(v);
if (user) Sptbr::write(context.regs->sptbr);
}
};

View File

@ -13,8 +13,28 @@
.section ".text"
.global _core_start
_core_start:
/***********************
** kernel entry code **
***********************/
.global _start
_start:
la x29, kernel_stack
la x30, kernel_stack_size
ld x30, (x30)
add sp, x29, x30
la x30, kernel_init
jalr x30
/*********************************
** core main thread entry code **
*********************************/
.global _core_start
_core_start:
/* create environment for main thread */
jal init_main_thread

View File

@ -15,150 +15,40 @@
.set CPU_IP, 0
.set CPU_EXCEPTION, 8
.set CPU_X1, 2*8
.set CPU_SP, 3*8
.set CPU_SPTBR, 33*8
.section ".text.crt0"
.p2align 12
.global _mt_begin
_mt_begin:
# 0x100 user mode
j _mt_kernel_entry_pic
.space 0x3c
# 0x140 supervisor
1: j 1b
.space 0x3c
# 0x180 hypervisor
1: j 1b
.space 0x3c
# 0x1c0 machine
1: j 1b
.space 0x38
# 0x1fc non-maksable interrupt
1: j 1b
j _kernel_entry
/* space for a client context-pointer per CPU */
.p2align 3
.global _mt_client_context_ptr
_mt_client_context_ptr:
.space 8
.p2align 8
/* space for a copy of the kernel context */
.global _mt_master_context_begin
_mt_master_context_begin:
_kernel_entry:
/* space must be at least as large as 'Context' */
.space 35*8
.global _mt_master_context_end
_mt_master_context_end:
.global _mt_kernel_entry_pic
_mt_kernel_entry_pic:
# master context
# client context
csrrw x31, sscratch, x31
addi x31, x31, 8
# save x30 in master
sd x29, CPU_X1 + 8 * 28(x31)
sd x30, CPU_X1 + 8 * 29(x31)
# load kernel page table
ld x30, CPU_SPTBR(x31)
csrw sptbr, x30
#
# FIXME
# A TLB flush. Might be necessary to remove this in the near future again
# because on real hardware we currently get problems without.
#
sfence.vm x0
# save x29 - x31 in user context
mv x29, x31
addi x29, x29, -8
ld x29, (x29)
.irp reg,29,30
ld x30, CPU_X1 + 8 * (\reg - 1)(x31)
sd x30, CPU_X1 + 8 * (\reg - 1)(x29)
.endr
csrr x30, sscratch /* x31 */
sd x30, CPU_X1 + 8 * 30(x29)
# save x1 - x28
.irp reg,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28
sd x\reg, CPU_X1 + 8 * (\reg - 1)(x29)
# save x1 - x30
.irp reg,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
sd x\reg, CPU_X1 + 8 * (\reg - 1)(x31)
.endr
# trap reason
csrr x30, scause
sd x30, CPU_EXCEPTION(x29)
sd x30, CPU_EXCEPTION(x31)
# ip
csrr x30, sepc
sd x30, CPU_IP(x29)
sd x30, CPU_IP(x31)
# load kernel stack and ip
ld sp, CPU_SP(x31)
ld x30, CPU_IP(x31)
# x31
csrr x30, sscratch
sd x30, CPU_X1 + 8 * 30(x31)
# restore scratch
addi x31, x31, -8
csrw sscratch, x31
la x29, kernel_stack
la x30, kernel_stack_size
ld x30, (x30)
add sp, x29, x30
la x30, kernel
jalr x30
.global _mt_user_entry_pic
_mt_user_entry_pic:
# client context pointer
csrr x30, sscratch
ld x30, (x30)
# set return IP
ld x31, CPU_IP(x30)
csrw sepc, x31
# restore x1-x28
.irp reg,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28
ld x\reg, CPU_X1 + 8 * (\reg - 1)(x30)
.endr
# save x29, x30, x31 to master context
csrr x29, sscratch
addi x29, x29, 8 # master context
.irp reg,29,30,31
ld x31, CPU_X1 + 8 * (\reg - 1)(x30)
sd x31, CPU_X1 + 8 * (\reg - 1)(x29)
.endr
# switch page table
ld x31, CPU_SPTBR(x30)
csrw sptbr, x31
#
# FIXME
# A TLB flush. Might be necessary to remove this in the near future again
# because on real hardware we currently get problems without.
#
sfence.vm x0
# restore x29 - x31 from master context
.irp reg,31,30,29
ld x\reg, CPU_X1 + 8 * (\reg - 1)(x29)
.endr
sret
# end of the mode transition code
.global _mt_end
_mt_end:

View File

@ -16,37 +16,46 @@
#include <kernel/cpu.h>
#include <kernel/pd.h>
#include <hw/memory_map.h>
using namespace Kernel;
extern Genode::addr_t _mt_client_context_ptr;
void Kernel::Cpu::init(Kernel::Pic &pic/*, Kernel::Pd & core_pd,
Genode::Board & board*/)
{
addr_t client_context_ptr_off = (addr_t)&_mt_client_context_ptr & 0xfff;
addr_t client_context_ptr = exception_entry | client_context_ptr_off;
asm volatile ( "csrw sscratch,%0\n" /* master conext ptr */
:: "r" (client_context_ptr) : "memory");
Stvec::write(exception_entry);
}
void Kernel::Cpu::init(Kernel::Pic &pic) {
Stvec::write(Hw::Mm::supervisor_exception_vector().base); }
Cpu_idle::Cpu_idle(Cpu * const cpu) : Cpu_job(Cpu_priority::MIN, 0)
{
Cpu_job::cpu(cpu);
cpu_exception = RESET;
ip = (addr_t)&_main;
sp = (addr_t)&_stack[stack_size];
regs->cpu_exception = Cpu::Context::RESET;
regs->ip = (addr_t)&_main;
regs->sp = (addr_t)&_stack[stack_size];
init_thread((addr_t)core_pd()->translation_table(), core_pd()->asid);
}
void Cpu_idle::exception(unsigned const cpu)
{
if (is_irq()) {
if (regs->is_irq()) {
_interrupt(cpu);
return;
} else if (cpu_exception == RESET) return;
} else if (regs->cpu_exception == Cpu::Context::RESET) return;
ASSERT_NEVER_CALLED;
}
void Cpu_idle::proceed(unsigned const)
{
asm volatile("csrw sscratch, %1 \n"
"mv x31, %0 \n"
"ld x30, (x31) \n"
"csrw sepc, x30 \n"
".irp reg,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,"
"18,19,20,21,22,23,24,25,26,27,28,29,30 \n"
" ld x\\reg, 8 * (\\reg + 1)(x31) \n"
".endr \n"
"csrrw x31, sscratch, x31 \n"
"sret \n"
:: "r" (&*regs), "r" (regs->t6) : "x30", "x31");
}

View File

@ -43,8 +43,8 @@ Kernel::Pd::~Pd()
}
void Kernel::Pd::admit(Kernel::Cpu::Context * const c)
void Kernel::Pd::admit(Kernel::Cpu::Context & c)
{
c->protection_domain(asid);
c->translation_table((addr_t)translation_table());
c.protection_domain(asid);
c.translation_table((addr_t)translation_table());
}

View File

@ -21,23 +21,24 @@ void Kernel::Thread::_init() { }
void Thread::exception(unsigned const cpu)
{
if (is_irq())
if (regs->is_irq())
return;
switch(cpu_exception) {
case ECALL_FROM_USER:
case ECALL_FROM_SUPERVISOR:
switch(regs->cpu_exception) {
case Cpu::Context::ECALL_FROM_USER:
case Cpu::Context::ECALL_FROM_SUPERVISOR:
_call();
ip += 4; /* set to next instruction */
regs->ip += 4; /* set to next instruction */
break;
case INSTRUCTION_PAGE_FAULT:
case STORE_PAGE_FAULT:
case LOAD_PAGE_FAULT:
case Cpu::Context::INSTRUCTION_PAGE_FAULT:
case Cpu::Context::STORE_PAGE_FAULT:
case Cpu::Context::LOAD_PAGE_FAULT:
_mmu_exception();
break;
default:
Genode::error(*this, ": unhandled exception ", cpu_exception,
" at ip=", (void*)ip, " addr=", Genode::Hex(Cpu::Sbadaddr::read()));
Genode::error(*this, ": unhandled exception ", regs->cpu_exception,
" at ip=", (void*)regs->ip,
" addr=", Genode::Hex(Cpu::Sbadaddr::read()));
_die();
}
}
@ -66,3 +67,19 @@ void Thread::_call_update_data_region()
void Thread::_call_update_instr_region() { }
void Kernel::Thread::proceed(unsigned const)
{
asm volatile("csrw sscratch, %1 \n"
"mv x31, %0 \n"
"ld x30, (x31) \n"
"csrw sepc, x30 \n"
".irp reg,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,"
"18,19,20,21,22,23,24,25,26,27,28,29,30 \n"
" ld x\\reg, 8 * (\\reg + 1)(x31) \n"
".endr \n"
"csrrw x31, sscratch, x31 \n"
"sret \n"
:: "r" (&*regs), "r" (regs->t6) : "x30", "x31");
}

View File

@ -15,10 +15,10 @@
#include <cpu.h>
#include <kernel/pd.h>
extern int _mt_tss;
extern int _mt_idt;
extern int _mt_gdt_start;
extern int _mt_gdt_end;
extern int __tss;
extern int __idt;
extern int __gdt_start;
extern int __gdt_end;
void Genode::Cpu::Context::init(addr_t const table, bool core)
@ -48,16 +48,16 @@ void Genode::Cpu::Tss::init()
void Genode::Cpu::Idt::init()
{
Pseudo_descriptor descriptor {
(uint16_t)((addr_t)&_mt_tss - (addr_t)&_mt_idt),
(uint64_t)(&_mt_idt) };
(uint16_t)((addr_t)&__tss - (addr_t)&__idt),
(uint64_t)(&__idt) };
asm volatile ("lidt %0" : : "m" (descriptor));
}
void Genode::Cpu::Gdt::init()
{
addr_t const start = (addr_t)&_mt_gdt_start;
uint16_t const limit = _mt_gdt_end - _mt_gdt_start - 1;
addr_t const start = (addr_t)&__gdt_start;
uint16_t const limit = __gdt_end - __gdt_start - 1;
uint64_t const base = start;
asm volatile ("lgdt %0" :: "m" (Pseudo_descriptor(limit, base)));
}

View File

@ -23,6 +23,7 @@
#include <cpu/cpu_state.h>
/* base includes */
#include <base/internal/align_at.h>
#include <base/internal/unmanaged_singleton.h>
/* core includes */
@ -74,7 +75,7 @@ class Genode::Cpu
/**
* Extend basic CPU state by members relevant for 'base-hw' only
*/
struct Context : Cpu_state
struct alignas(16) Context : Cpu_state
{
/**
* Address of top-level paging structure.
@ -99,21 +100,24 @@ class Genode::Cpu
/**
* An usermode execution state
*/
struct User_context : Context, Fpu::Context
struct User_context
{
Align_at<Context, 16> regs;
Fpu::Context fpu_regs;
/**
* Support for kernel calls
*/
void user_arg_0(Kernel::Call_arg const arg) { rdi = arg; }
void user_arg_1(Kernel::Call_arg const arg) { rsi = arg; }
void user_arg_2(Kernel::Call_arg const arg) { rdx = arg; }
void user_arg_3(Kernel::Call_arg const arg) { rcx = arg; }
void user_arg_4(Kernel::Call_arg const arg) { r8 = arg; }
Kernel::Call_arg user_arg_0() const { return rdi; }
Kernel::Call_arg user_arg_1() const { return rsi; }
Kernel::Call_arg user_arg_2() const { return rdx; }
Kernel::Call_arg user_arg_3() const { return rcx; }
Kernel::Call_arg user_arg_4() const { return r8; }
void user_arg_0(Kernel::Call_arg const arg) { regs->rdi = arg; }
void user_arg_1(Kernel::Call_arg const arg) { regs->rsi = arg; }
void user_arg_2(Kernel::Call_arg const arg) { regs->rdx = arg; }
void user_arg_3(Kernel::Call_arg const arg) { regs->rcx = arg; }
void user_arg_4(Kernel::Call_arg const arg) { regs->r8 = arg; }
Kernel::Call_arg user_arg_0() const { return regs->rdi; }
Kernel::Call_arg user_arg_1() const { return regs->rsi; }
Kernel::Call_arg user_arg_2() const { return regs->rdx; }
Kernel::Call_arg user_arg_3() const { return regs->rcx; }
Kernel::Call_arg user_arg_4() const { return regs->r8; }
};
protected:
@ -124,11 +128,6 @@ class Genode::Cpu
Fpu & fpu() { return _fpu; }
static constexpr addr_t exception_entry = 0xffffffc000000000;
static constexpr addr_t mtc_size = 1 << 13;
static addr_t virt_mtc_addr(addr_t virt_base, addr_t label);
/**
* Wait for the next interrupt as cheap as possible
*/
@ -155,7 +154,7 @@ class Genode::Cpu
*
* \param context next CPU context
*/
void switch_to(User_context &context) { _fpu.switch_to(context); }
inline void switch_to(User_context &context);
};
@ -284,4 +283,13 @@ struct Genode::Cpu::Cr4 : Register<64>
}
};
void Genode::Cpu::switch_to(User_context &context)
{
_fpu.switch_to(context.fpu_regs);
if ((context.regs->cs != 0x8) && (context.regs->cr3 != Cr3::read()))
Cr3::write(context.regs->cr3);
};
#endif /* _CORE__SPEC__X86_64__CPU_H_ */

View File

@ -12,13 +12,29 @@
* under the terms of the GNU Affero General Public License version 3.
*/
/**************************
** .text (program code) **
**************************/
.section ".text"
/* program entry-point */
/***********************
** kernel entry code **
***********************/
.global _start
_start:
/* switch to kernel stack */
mov kernel_stack@GOTPCREL(%rip), %rax
mov kernel_stack_size@GOTPCREL(%rip), %rbx
add (%rbx), %rax
mov %rax, %rsp
/* jump to C entry code */
jmp kernel_init
/*********************************
** core main thread entry code **
*********************************/
.global _core_start
_core_start:

View File

@ -0,0 +1,169 @@
/*
* \brief Transition between kernel/userland
* \author Adrian-Ken Rueegsegger
* \author Martin Stein
* \author Reto Buerki
* \author Stefan Kalkowski
* \date 2011-11-15
*/
/*
* Copyright (C) 2011-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
.include "hw/spec/x86_64/gdt.s"
/* offsets of member variables in a CPU context */
.set IP_OFFSET, 17 * 8
.set SP_OFFSET, 20 * 8
/* tss segment constants */
.set TSS_LIMIT, 0x68
.set TSS_TYPE, 0x8900
/* virtual addresses */
.set BASE, 0xffffffc000000000
.set TSS, BASE + (__tss - _begin)
.set ISR, BASE
.set ISR_ENTRY_SIZE, 12
.set IDT_FLAGS_PRIVILEGED, 0x8e01
.set IDT_FLAGS_UNPRIVILEGED, 0xee01
.macro _isr_entry
.align 4, 0x90
.endm
.macro _exception vector
_isr_entry
push $0
push $\vector
jmp _kernel_entry
.endm
.macro _exception_with_code vector
_isr_entry
nop
nop
push $\vector
jmp _kernel_entry
.endm
.macro _idt_entry addr flags
.word \addr & 0xffff
.word 0x0008
.word \flags
.word (\addr >> 16) & 0xffff
.long \addr >> 32
.long 0
.endm
.macro _load_address label reg
mov \label@GOTPCREL(%rip), %\reg
.endm
.section ".text.crt0"
_begin:
/*
* On user exceptions the CPU has to jump to one of the following
* Interrupt Service Routines (ISRs) to switch to a kernel context.
*/
_exception 0
_exception 1
_exception 2
_exception 3
_exception 4
_exception 5
_exception 6
_exception 7
_exception_with_code 8
_exception 9
_exception_with_code 10
_exception_with_code 11
_exception_with_code 12
_exception_with_code 13
_exception_with_code 14
_exception 15
_exception 16
_exception_with_code 17
_exception 18
_exception 19
.set vec, 20
.rept 236
_exception vec
.set vec, vec + 1
.endr
_kernel_entry:
pushq %rbp
pushq %rsi
pushq %rdi
pushq %rdx
pushq %rcx
pushq %rbx
pushq %rax
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %r9
pushq %r8
/* Restore kernel stack and continue kernel execution */
_load_address kernel_stack rax
_load_address kernel_stack_size rbx
_load_address kernel rcx
add (%rbx), %rax
mov %rax, %rsp
jmp *%rcx
/*****************************************
** Interrupt Descriptor Table (IDT) **
** See Intel SDM Vol. 3A, section 6.10 **
*****************************************/
.global __idt
.align 8
__idt:
/* first 128 entries */
.set isr_addr, ISR
.rept 0x80
_idt_entry isr_addr IDT_FLAGS_PRIVILEGED
.set isr_addr, isr_addr + ISR_ENTRY_SIZE
.endr
/* syscall entry 0x80 */
_idt_entry isr_addr IDT_FLAGS_UNPRIVILEGED
.set isr_addr, isr_addr + ISR_ENTRY_SIZE
/* remaing entries */
.rept 127
_idt_entry isr_addr IDT_FLAGS_PRIVILEGED
.set isr_addr, isr_addr + ISR_ENTRY_SIZE
.endr
/****************************************
** Task State Segment (TSS) **
** See Intel SDM Vol. 3A, section 7.7 **
****************************************/
.global __tss
.align 8
__tss:
.space 36
.global __tss_client_context_ptr
__tss_client_context_ptr:
.space 64
_define_gdt TSS

View File

@ -27,11 +27,39 @@ Cpu_idle::Cpu_idle(Cpu * const cpu) : Cpu_job(Cpu_priority::MIN, 0)
{
Cpu::Gdt::init();
Cpu_job::cpu(cpu);
ip = (addr_t)&_main;
sp = (addr_t)&_stack[stack_size];
cs = 0x8;
ss = 0x10;
init((addr_t)core_pd()->translation_table(), true);
regs->ip = (addr_t)&_main;
regs->sp = (addr_t)&_stack[stack_size];
regs->cs = 0x8;
regs->ss = 0x10;
regs->init((addr_t)core_pd()->translation_table(), true);
}
extern void * __tss_client_context_ptr;
void Cpu_idle::proceed(unsigned const)
{
void * * tss_stack_ptr = (&__tss_client_context_ptr);
*tss_stack_ptr = &regs->cr3;
asm volatile("mov %0, %%rsp \n"
"popq %%r8 \n"
"popq %%r9 \n"
"popq %%r10 \n"
"popq %%r11 \n"
"popq %%r12 \n"
"popq %%r13 \n"
"popq %%r14 \n"
"popq %%r15 \n"
"popq %%rax \n"
"popq %%rbx \n"
"popq %%rcx \n"
"popq %%rdx \n"
"popq %%rdi \n"
"popq %%rsi \n"
"popq %%rbp \n"
"add $16, %%rsp \n"
"iretq \n"
:: "r" (&regs->r8));
}

View File

@ -17,15 +17,16 @@
void Kernel::Cpu_idle::exception(unsigned const cpu)
{
if (trapno == RESET) return;
if (regs->trapno == Cpu::Context::RESET) return;
if (trapno >= INTERRUPTS_START && trapno <= INTERRUPTS_END) {
if (regs->trapno >= Cpu::Context::INTERRUPTS_START &&
regs->trapno <= Cpu::Context::INTERRUPTS_END) {
_interrupt(cpu);
return;
}
Genode::warning("Unknown exception ", trapno, " with error code ",
errcode, " at ip=", (void *)ip);
Genode::warning("Unknown exception ", regs->trapno, " with error code ",
regs->errcode, " at ip=", (void *)regs->ip);
ASSERT_NEVER_CALLED;
}

View File

@ -32,5 +32,5 @@ Kernel::Pd::~Pd()
}
void Kernel::Pd::admit(Kernel::Cpu::Context * const c) {
c->init((addr_t)translation_table(), this == Kernel::core_pd()); }
void Kernel::Pd::admit(Kernel::Cpu::Context & c) {
c.init((addr_t)translation_table(), this == Kernel::core_pd()); }

View File

@ -42,7 +42,7 @@ void Kernel::Thread::_mmu_exception()
_become_inactive(AWAITS_RESTART);
_fault_pd = (addr_t)_pd->platform_pd();
_fault_addr = Cpu::Cr2::read();
_fault_writes = (errcode & ERR_P) && (errcode & ERR_W);
_fault_writes = (regs->errcode & ERR_P) && (regs->errcode & ERR_W);
/*
* Core should never raise a page-fault. If this happens, print out an
@ -50,7 +50,7 @@ void Kernel::Thread::_mmu_exception()
*/
if (_pd == Kernel::core_pd())
Genode::error("page fault in core thread (", label(), "): "
"ip=", Genode::Hex(ip), " fault=", Genode::Hex(_fault_addr));
"ip=", Genode::Hex(regs->ip), " fault=", Genode::Hex(_fault_addr));
if (_pager) _pager->submit(1);
return;
@ -61,3 +61,31 @@ void Kernel::Thread::_init() { }
void Kernel::Thread::_call_update_pd() { }
extern void * __tss_client_context_ptr;
void Kernel::Thread::proceed(unsigned const)
{
void * * tss_stack_ptr = (&__tss_client_context_ptr);
*tss_stack_ptr = &regs->cr3;
asm volatile("mov %0, %%rsp \n"
"popq %%r8 \n"
"popq %%r9 \n"
"popq %%r10 \n"
"popq %%r11 \n"
"popq %%r12 \n"
"popq %%r13 \n"
"popq %%r14 \n"
"popq %%r15 \n"
"popq %%rax \n"
"popq %%rbx \n"
"popq %%rcx \n"
"popq %%rdx \n"
"popq %%rdi \n"
"popq %%rsi \n"
"popq %%rbp \n"
"add $16, %%rsp \n"
"iretq \n" :: "r" (&regs->r8));
}

View File

@ -20,28 +20,31 @@ using namespace Kernel;
void Thread::exception(unsigned const cpu)
{
switch (trapno) {
case PAGE_FAULT:
using Genode::Cpu_state;
switch (regs->trapno) {
case Cpu_state::PAGE_FAULT:
_mmu_exception();
return;
case NO_MATH_COPROC:
if (_cpu->fpu().fault(*this)) { return; }
case Cpu_state::NO_MATH_COPROC:
if (_cpu->fpu().fault(fpu_regs)) { return; }
Genode::warning(*this, ": FPU error");
_die();
return;
case UNDEFINED_INSTRUCTION:
Genode::warning(*this, ": undefined instruction at ip=", (void*)ip);
case Cpu_state::UNDEFINED_INSTRUCTION:
Genode::warning(*this, ": undefined instruction at ip=", (void*)regs->ip);
_die();
return;
case SUPERVISOR_CALL:
case Cpu_state::SUPERVISOR_CALL:
_call();
return;
}
if (trapno >= INTERRUPTS_START && trapno <= INTERRUPTS_END) {
if (regs->trapno >= Cpu_state::INTERRUPTS_START &&
regs->trapno <= Cpu_state::INTERRUPTS_END) {
_interrupt(cpu);
return;
}
Genode::warning(*this, ": triggered unknown exception ", trapno,
" with error code ", errcode, " at ip=", (void*)ip);
Genode::warning(*this, ": triggered unknown exception ", regs->trapno,
" with error code ", regs->errcode, " at ip=", (void*)regs->ip);
_die();
}

View File

@ -1,23 +0,0 @@
/*
* \brief Macros that are used by multiple assembly files
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2014-01-13
*/
/*
* Copyright (C) 2014-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
.include "hw/spec/x86_64/gdt.s"
/***************************************************
** Constant values that are pretty commonly used **
***************************************************/
/* alignment constraints */
.set MIN_PAGE_SIZE_LOG2, 12
.set DATA_ACCESS_ALIGNM_LOG2, 2

View File

@ -1,305 +0,0 @@
/*
* \brief Transition between kernel/userland
* \author Adrian-Ken Rueegsegger
* \author Martin Stein
* \author Reto Buerki
* \author Stefan Kalkowski
* \date 2011-11-15
*/
/*
* Copyright (C) 2011-2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
.include "macros.s"
/* size of pointer to CPU context */
.set CONTEXT_PTR_SIZE, 1 * 8
/* globally mapped buffer storage */
.set BUFFER_SIZE, 6 * 8
/* offsets of the member variables in a CPU context */
.set SP_OFFSET, 1 * 8
.set R8_OFFSET, 2 * 8
.set RAX_OFFSET, 10 * 8
.set ERRCODE_OFFSET, 17 * 8
.set FLAGS_OFFSET, 18 * 8
.set TRAPNO_OFFSET, 19 * 8
.set CS_OFFSET, 20 * 8
.set SS_OFFSET, 22 * 8
.set CR3_OFFSET, 23 * 8
/* tss segment constants */
.set TSS_LIMIT, 0x68
.set TSS_TYPE, 0x8900
/* mtc virt addresses */
.set MT_BASE, 0xffffffc000000000
.set MT_TSS, MT_BASE + (_mt_tss - _mt_begin)
.set MT_ISR, MT_BASE
.set MT_IRQ_STACK, MT_BASE + (_mt_kernel_interrupt_stack - _mt_begin)
.set MT_ISR_ENTRY_SIZE, 12
.set IDT_FLAGS_PRIVILEGED, 0x8e01
.set IDT_FLAGS_UNPRIVILEGED, 0xee01
.macro _isr_entry
.align 4, 0x90
.endm
.macro _exception vector
_isr_entry
push $0
push $\vector
jmp _mt_kernel_entry_pic
.endm
.macro _exception_with_code vector
_isr_entry
nop
nop
push $\vector
jmp _mt_kernel_entry_pic
.endm
.macro _idt_entry addr flags
.word \addr & 0xffff
.word 0x0008
.word \flags
.word (\addr >> 16) & 0xffff
.long \addr >> 32
.long 0
.endm
.macro _load_address label reg
mov \label@GOTPCREL(%rip), %\reg
.endm
.section ".text.crt0"
/*
* Page aligned base of mode transition code.
*
* This position independent code switches between a kernel context and a
* user context and thereby between their address spaces. Due to the latter
* it must be mapped executable to the same region in every address space.
* To enable such switching, the kernel context must be stored within this
* region, thus one should map it solely accessable for privileged modes.
*/
.p2align MIN_PAGE_SIZE_LOG2
.global _mt_begin
_mt_begin:
/*
* On user exceptions the CPU has to jump to one of the following
* Interrupt Service Routines (ISRs) to switch to a kernel context.
*/
_exception 0
_exception 1
_exception 2
_exception 3
_exception 4
_exception 5
_exception 6
_exception 7
_exception_with_code 8
_exception 9
_exception_with_code 10
_exception_with_code 11
_exception_with_code 12
_exception_with_code 13
_exception_with_code 14
_exception 15
_exception 16
_exception_with_code 17
_exception 18
_exception 19
.set vec, 20
.rept 236
_exception vec
.set vec, vec + 1
.endr
/* space for a copy of the kernel context */
.p2align 2
.global _mt_master_context_begin
_mt_master_context_begin:
/* space must be at least as large as 'Cpu_state' */
.space 22*8
.global _mt_master_context_end
_mt_master_context_end:
/* space for a client context-pointer per CPU */
.p2align 2
.global _mt_client_context_ptr
_mt_client_context_ptr:
.space CONTEXT_PTR_SIZE
/************************************************
** Temporary interrupt stack **
** Set as RSP for privilege levels 0-2 and as **
** IST1 in TSS, used by all IDT entries **
** See Intel SDM Vol. 3A, section 7.7 **
************************************************/
/* a globally mapped buffer per CPU */
.p2align 4
.global _mt_buffer
_mt_buffer:
.space 6 * BUFFER_SIZE
.global _mt_kernel_interrupt_stack
_mt_kernel_interrupt_stack:
.global _mt_kernel_entry_pic
_mt_kernel_entry_pic:
/* Copy client context RAX to buffer */
pushq %rax
/* Switch to kernel page tables */
_load_address _mt_master_context_begin rax
mov CR3_OFFSET(%rax), %rax
mov %rax, %cr3
/* Save information on interrupt stack frame in client context */
_load_address _mt_client_context_ptr rax
mov (%rax), %rax
popq RAX_OFFSET(%rax)
popq TRAPNO_OFFSET(%rax)
popq ERRCODE_OFFSET(%rax)
popq (%rax)
popq FLAGS_OFFSET(%rax) /* Discard cs */
popq FLAGS_OFFSET(%rax)
popq SP_OFFSET(%rax)
/* Save register values to client context */
lea ERRCODE_OFFSET(%rax), %rsp
pushq %rbp
pushq %rsi
pushq %rdi
pushq %rdx
pushq %rcx
pushq %rbx
sub $8, %rsp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %r9
pushq %r8
/* Restore kernel stack and continue kernel execution */
_load_address _mt_master_context_begin rsp
mov (%rsp), %rax
mov SP_OFFSET(%rsp), %rsp
jmp *%rax
.global _mt_user_entry_pic
_mt_user_entry_pic:
/* Prepare stack frame in mt buffer (Intel SDM Vol. 3A, figure 6-8) */
_load_address _mt_client_context_ptr rax
mov (%rax), %rax
_load_address _mt_buffer rsp
add $BUFFER_SIZE, %rsp
pushq SS_OFFSET(%rax)
pushq SP_OFFSET(%rax)
pushq FLAGS_OFFSET(%rax)
pushq CS_OFFSET(%rax)
pushq (%rax)
/* Restore register values from client context */
lea R8_OFFSET(%rax), %rsp
_load_address _mt_buffer rbx
popq %r8
popq %r9
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
popq (%rbx)
popq %rbx
popq %rcx
popq %rdx
popq %rdi
popq %rsi
popq %rbp
/* Switch page tables */
mov CR3_OFFSET(%rax), %rax
mov %rax, %cr3
/* Set stack back to mt buffer and restore client RAX */
_load_address _mt_buffer rsp
popq %rax
iretq
/* VM entry: Switch to guest VM */
.global _vt_vm_entry
_vt_vm_entry:
sti
mov $1, %rax
vmcall
/*****************************************
** Interrupt Descriptor Table (IDT) **
** See Intel SDM Vol. 3A, section 6.10 **
*****************************************/
.global _mt_idt
.align 8
_mt_idt:
/* first 128 entries */
.set isr_addr, MT_ISR
.rept 0x80
_idt_entry isr_addr IDT_FLAGS_PRIVILEGED
.set isr_addr, isr_addr + MT_ISR_ENTRY_SIZE
.endr
/* syscall entry 0x80 */
_idt_entry isr_addr IDT_FLAGS_UNPRIVILEGED
.set isr_addr, isr_addr + MT_ISR_ENTRY_SIZE
/* remaing entries */
.rept 127
_idt_entry isr_addr IDT_FLAGS_PRIVILEGED
.set isr_addr, isr_addr + MT_ISR_ENTRY_SIZE
.endr
/****************************************
** Task State Segment (TSS) **
** See Intel SDM Vol. 3A, section 7.7 **
****************************************/
.global _mt_tss
.align 8
_mt_tss:
.space 4
.quad MT_IRQ_STACK
.quad MT_IRQ_STACK
.quad MT_IRQ_STACK
.space 8
.quad MT_IRQ_STACK
.space 48
_define_gdt MT_TSS
/* end of the mode transition code */
.global _mt_end
_mt_end:

View File

@ -17,16 +17,17 @@
void Kernel::Cpu_idle::exception(unsigned const cpu)
{
if (trapno == RESET) return;
if (regs->trapno == Cpu::Context::RESET) return;
if (trapno >= INTERRUPTS_START && trapno <= INTERRUPTS_END) {
pic()->irq_occurred(trapno);
if (regs->trapno >= Cpu::Context::INTERRUPTS_START &&
regs->trapno <= Cpu::Context::INTERRUPTS_END) {
pic()->irq_occurred(regs->trapno);
_interrupt(cpu);
return;
}
Genode::warning("Unknown exception ", trapno, " with error code ", errcode,
" at ip=", (void *)ip);
Genode::warning("Unknown exception ", regs->trapno, " with error code ",
regs->errcode, " at ip=", (void *)regs->ip);
ASSERT_NEVER_CALLED;
}

View File

@ -20,29 +20,30 @@ using namespace Kernel;
void Thread::exception(unsigned const cpu)
{
switch (trapno) {
case PAGE_FAULT:
switch (regs->trapno) {
case Cpu::Context::PAGE_FAULT:
_mmu_exception();
return;
case NO_MATH_COPROC:
if (_cpu->fpu().fault(*this)) { return; }
case Cpu::Context::NO_MATH_COPROC:
if (_cpu->fpu().fault(fpu_regs)) { return; }
Genode::warning(*this, ": FPU error");
_die();
return;
case UNDEFINED_INSTRUCTION:
Genode::warning(*this, ": undefined instruction at ip=", (void*)ip);
case Cpu::Context::UNDEFINED_INSTRUCTION:
Genode::warning(*this, ": undefined instruction at ip=", (void*)regs->ip);
_die();
return;
case SUPERVISOR_CALL:
case Cpu::Context::SUPERVISOR_CALL:
_call();
return;
}
if (trapno >= INTERRUPTS_START && trapno <= INTERRUPTS_END) {
pic()->irq_occurred(trapno);
if (regs->trapno >= Cpu::Context::INTERRUPTS_START &&
regs->trapno <= Cpu::Context::INTERRUPTS_END) {
pic()->irq_occurred(regs->trapno);
_interrupt(cpu);
return;
}
Genode::warning(*this, ": triggered unknown exception ", trapno,
" with error code ", errcode, " at ip=", (void*)ip);
Genode::warning(*this, ": triggered unknown exception ", regs->trapno,
" with error code ", regs->errcode, " at ip=", (void*)regs->ip);
_die();
}

View File

@ -19,9 +19,6 @@
#include <cpu/cpu_state.h>
#include <pic.h>
extern void * _vt_vm_entry;
extern void * _mt_client_context_ptr;
Kernel::Vm::Vm(void * const state, Kernel::Signal_context * const context,
void * const)
: Cpu_job(Cpu_priority::MIN, 0),
@ -30,6 +27,12 @@ Kernel::Vm::Vm(void * const state, Kernel::Signal_context * const context,
_table(nullptr)
{
affinity(cpu_pool()->primary_cpu());
/*
* Initialize VM context as a core/kernel context to prevent
* page-table switching before doing the world switch
*/
regs->init((addr_t)core_pd()->translation_table(), true);
}
@ -39,29 +42,35 @@ Kernel::Vm::~Vm() { }
void Kernel::Vm::exception(unsigned const cpu_id)
{
pause();
if (_state->trapno == 200) {
if (regs->trapno == 200) {
_context->submit(1);
return;
}
if (_state->trapno >= Genode::Cpu_state::INTERRUPTS_START &&
_state->trapno <= Genode::Cpu_state::INTERRUPTS_END) {
pic()->irq_occurred(_state->trapno);
if (regs->trapno >= Genode::Cpu_state::INTERRUPTS_START &&
regs->trapno <= Genode::Cpu_state::INTERRUPTS_END) {
pic()->irq_occurred(regs->trapno);
_interrupt(cpu_id);
_context->submit(1);
return;
}
Genode::warning("VM: triggered unknown exception ", _state->trapno,
" with error code ", _state->errcode);
Genode::warning("VM: triggered unknown exception ", regs->trapno,
" with error code ", regs->errcode);
ASSERT_NEVER_CALLED;
}
extern void * __tss_client_context_ptr;
void Kernel::Vm::proceed(unsigned const cpu_id)
{
mtc()->switch_to(reinterpret_cast<Cpu::Context*>(_state), cpu_id,
(addr_t) &_vt_vm_entry, (addr_t) &_mt_client_context_ptr);
void * * tss_stack_ptr = (&__tss_client_context_ptr);
*tss_stack_ptr = &regs->cr3;
asm volatile("sti \n"
"mov $1, %rax \n"
"vmcall");
}

View File

@ -15,5 +15,13 @@
#define _CORE__SPEC__X86_64__TRANSLATION_TABLE_H_
#include <hw/spec/x86_64/page_table.h>
#include <cpu.h>
void Hw::Pml4_table::_invalidate_range(addr_t vo, size_t size)
{
/* FIXME: do not necessarily flush the whole TLB */
Genode::Cpu::Cr3::write(Genode::Cpu::Cr3::read());
}
#endif /* _CORE__SPEC__X86_64__TRANSLATION_TABLE_H_ */

View File

@ -0,0 +1,54 @@
/*
* \brief Utility for aligned object members
* \author Stefan Kalkowski
* \date 2017-09-01
*/
/*
* Copyright (C) 2017 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
*/
#ifndef _INCLUDE__BASE__INTERNAL__ALIGN_AT_H_
#define _INCLUDE__BASE__INTERNAL__ALIGN_AT_H_
#include <util/construct_at.h>
#include <base/stdint.h>
namespace Genode {
template<typename, size_t> class Align_at;
}
template <typename T, Genode::size_t ALIGN>
class Genode::Align_at
{
private:
char _space[sizeof(T) + ALIGN - 1];
T & _obj;
void * _start_addr()
{
bool aligned = !((addr_t)_space & (ALIGN - 1));
return aligned ? (void*)_space
: (void*)(((addr_t)_space & ~(ALIGN - 1)) + ALIGN);
}
public:
template <typename... ARGS>
Align_at(ARGS &&... args)
: _obj(*construct_at<T>(_start_addr(), args...)) { }
~Align_at() { _obj.~T(); }
T * operator -> () { return &_obj; }
T const * operator -> () const { return &_obj; }
T & operator * () { return _obj; }
T const & operator * () const { return _obj; }
};
#endif /* _INCLUDE__BASE__INTERNAL__ALIGN_AT_H_ */

View File

@ -28,7 +28,9 @@ namespace Hw {
Memory_region const core_page_tables();
Memory_region const core_mmio();
Memory_region const core_heap();
Memory_region const exception_vector();
Memory_region const system_exception_vector();
Memory_region const hypervisor_exception_vector();
Memory_region const supervisor_exception_vector();
Memory_region const boot_info();
}
}

View File

@ -36,14 +36,20 @@ Memory_region const Hw::Mm::core_stack_area() {
Memory_region const Hw::Mm::core_page_tables() {
return Memory_region(0xc0000000UL, 0x10000000UL); }
Memory_region const Hw::Mm::core_utcb_main_thread() {
return Memory_region(0xfffef000, sizeof(Native_utcb)); }
Memory_region const Hw::Mm::core_mmio() {
return Memory_region(0xd0000000UL, 0x10000000UL); }
Memory_region const Hw::Mm::system_exception_vector() {
return Memory_region(0xfff00000UL, 0x1000UL); }
Memory_region const Hw::Mm::hypervisor_exception_vector() {
return Memory_region(0xfff10000UL, 0x1000UL); }
Memory_region const Hw::Mm::boot_info() {
return Memory_region(0xfffe0000UL, 0x1000UL); }
Memory_region const Hw::Mm::exception_vector() {
Memory_region const Hw::Mm::core_utcb_main_thread() {
return Memory_region(0xfffef000, sizeof(Native_utcb)); }
Memory_region const Hw::Mm::supervisor_exception_vector() {
return Memory_region(0xffff0000UL, 0x1000UL); }

View File

@ -45,5 +45,5 @@ Memory_region const Hw::Mm::core_mmio() {
Memory_region const Hw::Mm::boot_info() {
return Memory_region(0xffffffe040000000UL, 0x1000UL); }
Memory_region const Hw::Mm::exception_vector() {
return Memory_region(0xfffffff000000000UL, 0x1000UL); }
Memory_region const Hw::Mm::supervisor_exception_vector() {
return Memory_region(KERNEL_START, 0x1000UL); }

View File

@ -110,7 +110,7 @@ struct Hw::Arm_cpu
struct Ttbr_64bit : Genode::Register<64>
{
struct Ba : Bitfield<5, 34> { }; /* translation table base */
struct Ba : Bitfield<4, 35> { }; /* translation table base */
struct Asid : Bitfield<48,8> { };
};
@ -121,9 +121,14 @@ struct Hw::Arm_cpu
** Large Physical Address Extensions **
***************************************/
struct T0sz : Bitfield<0, 3> { };
struct Irgn0 : Bitfield<8, 2> { };
struct Orgn0 : Bitfield<10, 2> { };
struct Sh0 : Bitfield<12, 2> { };
struct T1sz : Bitfield<16, 3> { };
struct Irgn1 : Bitfield<24, 2> { };
struct Orgn1 : Bitfield<26, 2> { };
struct Sh1 : Bitfield<28, 2> { };
struct Eae : Bitfield<31, 1> { }; /* extended address enable */
);

View File

@ -530,5 +530,13 @@ struct Hw::Page_table : Level_1_stage_1_translation_table
CORE_LEVEL_2_TT_COUNT *
TABLE_LEVEL_X_ENTRIES,
};
Page_table() : Level_1_stage_1_translation_table() { }
/**
* On ARM we do not need to copy top-level kernel entries
* because the virtual-memory kernel part is hold in a separate table
*/
explicit Page_table(Page_table &o) : Level_1_stage_1_translation_table() { }
};
#endif /* _SRC__LIB__HW__SPEC__ARM__LPAE_H_ */

View File

@ -456,6 +456,12 @@ class Hw::Page_table
Genode::memset(&_entries, 0, sizeof(_entries));
}
/**
* On ARM we do not need to copy top-level kernel entries
* because the virtual-memory kernel part is hold in a separate table
*/
explicit Page_table(Page_table &o) : Page_table() { }
/**
* Maximum virtual offset that can be translated by this table
*/

View File

@ -26,7 +26,7 @@ namespace Hw {
/**
* SBI calls to machine mode.
*
* Keep in sync with mode_transition.s.
* Keep in sync with exception_vector.s.
*/
constexpr Call_arg call_id_set_sys_timer() { return 200; }
constexpr Call_arg call_id_get_sys_timer() { return 201; }

View File

@ -222,7 +222,7 @@ class Sv39::Level_x_translation_table
typename Descriptor::access_t blk_desc =
Block_descriptor::create(flags, pa);
if (Descriptor::valid(desc) && desc == blk_desc)
if (Descriptor::valid(desc) && desc != blk_desc)
throw Double_insertion();
desc = blk_desc;
@ -362,7 +362,7 @@ namespace Sv39 {
Descriptor::access_t blk_desc =
Block_descriptor::create(flags, pa);
if (Descriptor::valid(desc) && desc == blk_desc)
if (Descriptor::valid(desc) && desc != blk_desc)
throw Double_insertion();
desc = blk_desc;
@ -394,6 +394,13 @@ namespace Hw {
_count(CORE_VM_AREA_SIZE, Sv39::SIZE_LOG2_2M),
};
Page_table() : Sv39::Level_1_translation_table() {}
Page_table(Page_table & kernel_table)
: Sv39::Level_1_translation_table() {
static unsigned first = (0xffffffc000000000UL & VM_MASK) >> Sv39::SIZE_LOG2_1G;
for (unsigned i = first; i < MAX_ENTRIES; i++)
_entries[i] = kernel_table._entries[i]; }
};
}

View File

@ -20,8 +20,7 @@
.macro _define_gdt tss_address
.align 4
.space 2
.global _mt_gdt_ptr
_mt_gdt_ptr:
__gdt_ptr:
.word 55 /* limit */
.long 0 /* base address */
@ -29,8 +28,8 @@
.set TSS_TYPE, 0x8900
.align 8
.global _mt_gdt_start
_mt_gdt_start:
.global __gdt_start
__gdt_start:
/* Null descriptor */
.quad 0
/* 64-bit code segment descriptor */
@ -55,6 +54,6 @@
.long ((\tss_address >> 24) & 0xff) << 24 | ((\tss_address >> 16) & 0xff) | TSS_TYPE
.long \tss_address >> 32
.long 0
.global _mt_gdt_end
_mt_gdt_end:
.global __gdt_end
__gdt_end:
.endm

View File

@ -622,6 +622,8 @@ class Hw::Pml4_table
/ (1UL << alignment);
}
inline void _invalidate_range(addr_t vo, size_t size);
public:
static constexpr size_t MIN_PAGE_SIZE_LOG2 = SIZE_LOG2_4KB;
@ -633,6 +635,13 @@ class Hw::Pml4_table
Genode::memset(&_entries, 0, sizeof(_entries));
}
explicit Pml4_table(Pml4_table & kernel_table) : Pml4_table()
{
static size_t first = (0xffffffc000000000 & SIZE_MASK) >> PAGE_SIZE_LOG2;
for (size_t i = first; i < MAX_ENTRIES; i++)
_entries[i] = kernel_table._entries[i];
}
/**
* Returns True if table does not contain any page mappings.
*
@ -668,8 +677,11 @@ class Hw::Pml4_table
* \param size region size
* \param alloc second level translation table allocator
*/
void remove_translation(addr_t vo, size_t size, Allocator & alloc) {
_range_op(vo, 0, size, Remove_func(alloc)); }
void remove_translation(addr_t vo, size_t size, Allocator & alloc)
{
_range_op(vo, 0, size, Remove_func(alloc));
_invalidate_range(vo, size);
}
} __attribute__((aligned(1 << ALIGNM_LOG2)));
@ -677,6 +689,8 @@ class Hw::Page_table : public Pml4_table
{
public:
using Pml4_table::Pml4_table;
enum {
TABLE_LEVEL_X_SIZE_LOG2 = SIZE_LOG2_4KB,
CORE_VM_AREA_SIZE = 1024 * 1024 * 1024,

View File

@ -35,8 +35,6 @@ struct Genode::Cpu_state
INTERRUPTS_END = 0xff,
};
addr_t ip = 0;
addr_t sp = 0;
addr_t r8 = 0;
addr_t r9 = 0;
addr_t r10 = 0;
@ -52,11 +50,12 @@ struct Genode::Cpu_state
addr_t rdi = 0;
addr_t rsi = 0;
addr_t rbp = 0;
addr_t errcode = 0;
addr_t eflags = 0;
addr_t trapno = RESET;
addr_t errcode = 0;
addr_t ip = 0;
addr_t cs = 0;
addr_t ds = 0;
addr_t eflags = 0;
addr_t sp = 0;
addr_t ss = 0;
};

View File

@ -1,6 +1,6 @@
SPECS += arm
CC_MARCH += -march=armv6
CC_MARCH += -march=armv6k
REP_INC_DIR += include/spec/arm_v6

View File

@ -27,7 +27,7 @@ SECTIONS
_prog_img_beg = .;
/* put entry code at the start of the text segment / raw binary */
*(.text.crt0)
KEEP (*(.text.crt0))
*(.init)
*(.text .text.* .gnu.linkonce.t.*)