hw_arndale: idle threads on secondary processors

fix #1006
This commit is contained in:
Martin Stein 2013-12-17 18:10:02 +01:00 committed by Norman Feske
parent e83849cf99
commit 6a3368ee27
67 changed files with 2115 additions and 872 deletions

8
base-hw/mk/spec-hw.mk Normal file
View File

@ -0,0 +1,8 @@
#
# \brief Offer build configurations that are specific to base-hw
# \author Martin Stein
# \date 2014-02-26
#
# configure multiprocessor mode
CC_OPT += -Wa,--defsym -Wa,PROCESSORS=$(PROCESSORS) -DPROCESSORS=$(PROCESSORS)

View File

@ -7,6 +7,9 @@
# denote wich specs are also fullfilled by this spec
SPECS += hw platform_arndale
# configure multiprocessor mode
PROCESSORS = 2
# add repository relative paths
REP_INC_DIR += include/exynos5_uart

View File

@ -7,6 +7,9 @@
# denote wich specs are also fullfilled by this spec
SPECS += hw platform_imx31 epit
# configure multiprocessor mode
PROCESSORS = 1
# set address where to link the text segment at
LD_TEXT_ADDR ?= 0x82000000

View File

@ -7,6 +7,9 @@
# denote wich specs are also fullfilled by this spec
SPECS += hw platform_imx53 epit
# configure multiprocessor mode
PROCESSORS = 1
# set address where to link the text segment at
LD_TEXT_ADDR ?= 0x70010000

View File

@ -7,6 +7,9 @@
# denote wich specs are also fullfilled by this spec
SPECS += hw platform_odroid_xu
# configure multiprocessor mode
PROCESSORS = 1
# add repository relative paths
REP_INC_DIR += include/exynos5_uart

View File

@ -7,6 +7,9 @@
# denote wich specs are also fullfilled by this spec
SPECS += hw platform_panda
# configure multiprocessor mode
PROCESSORS = 1
# set address where to link the text segment at
LD_TEXT_ADDR ?= 0x81000000

View File

@ -7,6 +7,9 @@
# denote wich specs are also fullfilled by this spec
SPECS += hw platform_pbxa9
# configure multiprocessor mode
PROCESSORS = 1
# set address where to link text segment at
LD_TEXT_ADDR ?= 0x70000000

View File

@ -7,6 +7,9 @@
# denote wich specs are also fullfilled by this spec
SPECS += hw platform_rpi
# configure multiprocessor mode
PROCESSORS = 1
# set address where to link the text segment at
LD_TEXT_ADDR ?= 0x800000

View File

@ -7,6 +7,9 @@
# denote wich specs are also fullfilled by this spec
SPECS += hw platform_vea9x4
# configure multiprocessor mode
PROCESSORS = 1
# set address where to link text segment at
LD_TEXT_ADDR ?= 0x01000000

View File

@ -77,8 +77,7 @@ Thread_event Thread::* Thread::_event(unsigned const id) const
void Thread::_mmu_exception()
{
cpu_scheduler()->remove(this);
_state = AWAITS_RESUME;
_unschedule(AWAITS_RESUME);
if (in_fault(_fault_addr, _fault_writes)) {
_fault_tlb = (addr_t)_pd->tlb();
_fault_signal = _fault.signal_context_id();
@ -87,3 +86,10 @@ void Thread::_mmu_exception()
}
PERR("unknown MMU exception");
}
/*************************
** Kernel::Cpu_context **
*************************/
void Kernel::Cpu_context::_init(size_t const stack_size) { r12 = stack_size; }

View File

@ -12,9 +12,33 @@
* under the terms of the GNU General Public License version 2.
*/
/************
** Macros **
************/
.include "macros.s"
/**
* Get base of the first kernel-stack and the common kernel-stack size
*
* \param base_dst_reg register that shall receive the stack-area base
* \param size_dst_reg register that shall receive the size of a kernel stack
*/
.macro _get_constraints_of_kernel_stacks base_dst_reg, size_dst_reg
ldr \base_dst_reg, =kernel_stack
ldr \size_dst_reg, =kernel_stack_size
ldr \size_dst_reg, [\size_dst_reg]
.endm
.section ".text.crt0"
/* program entry-point */
/****************************************
** Startup code for primary processor **
****************************************/
.global _start
_start:
@ -40,22 +64,29 @@
b 1b
2:
/* prepare the first call of the kernel main-routine */
ldr sp, =_kernel_stack_high
bl setup_kernel
/* setup temporary stack pointer for uniprocessor mode */
_get_constraints_of_kernel_stacks r0, r1
add sp, r0, r1
/* uniprocessor kernel-initialization which activates multiprocessor */
bl init_kernel_uniprocessor
/***************************************************
** Startup code that is common to all processors **
***************************************************/
.global _start_secondary_processors
_start_secondary_processors:
/* setup multiprocessor-aware kernel stack-pointer */
_get_constraints_of_kernel_stacks r0, r1
_init_kernel_sp r0, r1
/* do multiprocessor kernel-initialization */
bl init_kernel_multiprocessor
/* call the kernel main-routine */
ldr sp, =_kernel_stack_high
bl kernel
/* catch erroneous return of the kernel main-routine */
3: b 3b
.section .bss
/* kernel stack, must be aligned to an 8-byte boundary */
.align 3
.space 64 * 1024
.global _kernel_stack_high
_kernel_stack_high:
1: b 1b

View File

@ -0,0 +1,110 @@
/*
* \brief Macros that are used by multiple assembly files
* \author Martin Stein
* \date 2014-01-13
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
.include "macros_arm.s"
/*******************
** Common macros **
*******************/
/**
* Determine the top of the kernel stack of this processor and apply it as SP
*
* \base_reg register that contains the base of the kernel-stacks area
* \size_reg register that contains the size of one kernel stack
*/
.macro _init_kernel_sp base_reg, size_reg, buf_reg
/* get kernel name of processor */
_get_processor_id sp
/* calculate top of the kernel-stack of this processor and apply as SP */
add sp, #1
mul \size_reg, \size_reg, sp
add sp, \base_reg, \size_reg
.endm
/******************************************
** Macros regarding the mode transition **
******************************************/
/**
* Constant values that the mode transition uses
*/
.macro _mt_constants
/* kernel names of exceptions that can interrupt a user */
.set rst_type, 1
.set und_type, 2
.set svc_type, 3
.set pab_type, 4
.set dab_type, 5
.set irq_type, 6
.set fiq_type, 7
.set rst_pc_adjust, 0
.set und_pc_adjust, 4
.set svc_pc_adjust, 0
.set pab_pc_adjust, 4
.set dab_pc_adjust, 8
.set irq_pc_adjust, 4
.set fiq_pc_adjust, 4
/* offsets of the member variables in a processor context */
.set r12_offset, 12 * 4
.set sp_offset, 13 * 4
.set lr_offset, 14 * 4
.set pc_offset, 15 * 4
.set psr_offset, 16 * 4
.set exception_type_offset, 17 * 4
.set contextidr_offset, 18 * 4
.set section_table_offset, 19 * 4
/* alignment constraints */
.set min_page_size_log2, 12
/* size of local variables */
.set context_ptr_size, 1 * 4
.endm
/**
* Local data structures that the mode transition uses
*/
.macro _mt_local_variables
/* space for a copy of the kernel context */
.p2align 2
.global _mt_master_context_begin
_mt_master_context_begin:
.space 32 * 4
.global _mt_master_context_end
_mt_master_context_end:
/* space for a client context-pointer per processor */
.p2align 2
.global _mt_client_context_ptr
_mt_client_context_ptr:
.rept PROCESSORS
.space context_ptr_size
.endr
/* a globally mapped buffer per processor */
.p2align 2
.global _mt_buffer
_mt_buffer:
.rept PROCESSORS
.space buffer_size
.endr
.endm

View File

@ -0,0 +1,24 @@
/*
* \brief Macros that are used by multiple assembly files
* \author Martin Stein
* \date 2014-01-13
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/**
* Determine the kernel name of the executing processor
*
* \param target_reg register that shall receive the processor name
*/
.macro _get_processor_id target_reg
/* no multiprocessing supported for ARMv6 */
mov \target_reg, #0
.endm

View File

@ -11,6 +11,24 @@
* under the terms of the GNU General Public License version 2.
*/
.include "macros.s"
/***************
** Constants **
***************/
/* size of local variables */
.set buffer_size, 1 * 4
/* common constants */
_mt_constants
/************
** Macros **
************/
/**
* Invalidate all entries of the branch prediction cache
*
@ -22,7 +40,7 @@
*/
.macro _flush_branch_predictor
mcr p15, 0, sp, c7, c5, 6
/* swi 0xf00000 */
/*swi 0xf00000 */
.endm
/**
@ -47,13 +65,13 @@
/* load kernel cidr */
adr sp, _mt_master_context_begin
ldr sp, [sp, #18*4]
ldr sp, [sp, #contextidr_offset]
mcr p15, 0, sp, c13, c0, 1
_flush_branch_predictor
/* load kernel section table */
adr sp, _mt_master_context_begin
ldr sp, [sp, #19*4]
ldr sp, [sp, #section_table_offset]
mcr p15, 0, sp, c2, c0, 0
_flush_branch_predictor
@ -72,22 +90,22 @@
stmia sp, {r0-r12}^
/* save user lr and sp */
add r0, sp, #13*4
add r0, sp, #sp_offset
stmia r0, {sp,lr}^
/* adjust and save user pc */
.if \pc_adjust != 0
sub lr, lr, #\pc_adjust
.endif
str lr, [sp, #15*4]
str lr, [sp, #pc_offset]
/* save user psr */
mrs r0, spsr
str r0, [sp, #16*4]
str r0, [sp, #psr_offset]
/* save type of exception that interrupted the user */
mov r0, #\exception_type
str r0, [sp, #17*4]
str r0, [sp, #exception_type_offset]
/*
* Switch to supervisor mode
@ -102,133 +120,123 @@
adr r0, _mt_master_context_begin
/* load kernel context */
add r0, r0, #13*4
ldmia r0, {sp, lr, pc}
add r0, r0, #sp_offset
ldm r0, {sp, lr, pc}
.endm
/**********************************
** Linked into the text section **
**********************************/
.section .text
/*
* The mode transition PIC switches between a kernel context and a user
* context and thereby between their address spaces. Due to the latter
* Page aligned base of mode transition code.
*
* This position independent code switches between a kernel context and a
* user context and thereby between their address spaces. Due to the latter
* it must be mapped executable to the same region in every address space.
* To enable such switching, the kernel context must be stored within this
* region, thus one should map it solely accessable for privileged modes.
*/
.p2align 12
.p2align min_page_size_log2
.global _mt_begin
_mt_begin:
/*
* On user exceptions the CPU has to jump to one of the following
* 7 entry vectors to switch to a kernel context.
*/
.global _mt_kernel_entry_pic
_mt_kernel_entry_pic:
/*
* On user exceptions the CPU has to jump to one of the following
* seven entry vectors to switch to a kernel context.
*/
.global _mt_kernel_entry_pic
_mt_kernel_entry_pic:
b _rst_entry /* 0x00: reset */
b _und_entry /* 0x04: undefined instruction */
b _swi_entry /* 0x08: software interrupt */
b _pab_entry /* 0x0c: prefetch abort */
b _dab_entry /* 0x10: data abort */
nop /* 0x14: reserved */
b _irq_entry /* 0x18: interrupt request */
b _fiq_entry /* 0x1c: fast interrupt request */
b _rst_entry /* 0x00: reset */
b _und_entry /* 0x04: undefined instruction */
b _swi_entry /* 0x08: software interrupt */
b _pab_entry /* 0x0c: prefetch abort */
b _dab_entry /* 0x10: data abort */
nop /* 0x14: reserved */
b _irq_entry /* 0x18: interrupt request */
b _fiq_entry /* 0x1c: fast interrupt request */
/* PICs that switch from an user exception to the kernel */
_rst_entry: _user_to_kernel_pic 1, 0
_und_entry: _user_to_kernel_pic 2, 4
_swi_entry:
/* PICs that switch from a user exception to the kernel */
_rst_entry: _user_to_kernel_pic rst_type, rst_pc_adjust
_und_entry: _user_to_kernel_pic und_type, und_pc_adjust
_swi_entry:
/*
* FIXME fast SWI routines pollute the SVC SP but we have
* to call them especially in SVC mode
*/
/*
* FIXME fast SWI routines pollute the SVC SP but we have
* to call them especially in SVC mode
*/
/* check if SWI requests a fast service routine */
/* ldr sp, [r14, #-0x4]
and sp, sp, #0xffffff
*/
/* fast "instruction barrier" service routine */
/* cmp sp, #0xf00000
bne _mt_slow_swi
movs pc, r14
*/
/* slow high level service routine */
_mt_slow_swi:
_user_to_kernel_pic 3, 0
/* check if SWI requests a fast service routine */
/*ldr sp, [r14, #-0x4]*/
/*and sp, sp, #0xffffff*/
_pab_entry: _user_to_kernel_pic 4, 4
_dab_entry: _user_to_kernel_pic 5, 8
_irq_entry: _user_to_kernel_pic 6, 4
_fiq_entry: _user_to_kernel_pic 7, 4
/* fast "instruction barrier" service routine */
/*cmp sp, #0xf00000*/
/*bne _slow_swi_entry*/
/*movs pc, r14*/
/* kernel must jump to this point to switch to a user context */
.p2align 2
.global _mt_user_entry_pic
_mt_user_entry_pic:
/* slow high level service routine */
_slow_swi_entry: _user_to_kernel_pic svc_type, svc_pc_adjust
/* get user context pointer */
ldr lr, _mt_client_context_ptr
_pab_entry: _user_to_kernel_pic pab_type, pab_pc_adjust
_dab_entry: _user_to_kernel_pic dab_type, dab_pc_adjust
_irq_entry: _user_to_kernel_pic irq_type, irq_pc_adjust
_fiq_entry: _user_to_kernel_pic fiq_type, fiq_pc_adjust
/* buffer user pc */
ldr r0, [lr, #15*4]
adr r1, _mt_buffer
str r0, [r1]
/* kernel must jump to this point to switch to a user context */
.p2align 2
.global _mt_user_entry_pic
_mt_user_entry_pic:
/* buffer user psr */
ldr r0, [lr, #16*4]
msr spsr, r0
/* get user context pointer */
ldr lr, _mt_client_context_ptr
/* load user r0 ... r12 */
ldmia lr, {r0-r12}
/* buffer user pc */
ldr r0, [lr, #pc_offset]
adr r1, _mt_buffer
str r0, [r1]
/* load user sp and lr */
add sp, lr, #13*4
ldmia sp, {sp,lr}^
/* buffer user psr */
ldr r0, [lr, #psr_offset]
msr spsr, r0
/* get user cidr and section table */
ldr sp, [lr, #18*4]
ldr lr, [lr, #19*4]
/* load user r0 ... r12 */
ldm lr, {r0-r12}
/********************************************************
** From now on, until we leave kernel mode, we must **
** avoid access to memory that is not mapped globally **
********************************************************/
/* load user sp and lr */
add sp, lr, #sp_offset
ldm sp, {sp,lr}^
/* apply user contextidr and section table */
mcr p15, 0, sp, c13, c0, 1
mcr p15, 0, lr, c2, c0, 0
_flush_branch_predictor
/* get user cidr and section table */
ldr sp, [lr, #contextidr_offset]
ldr lr, [lr, #section_table_offset]
/* load user pc (implies application of the user psr) */
adr lr, _mt_buffer
ldmia lr, {pc}^
/********************************************************
** From now on, until we leave kernel mode, we must **
** avoid access to memory that is not mapped globally **
********************************************************/
/* leave some space for the kernel context */
.p2align 2
.global _mt_master_context_begin
_mt_master_context_begin: .space 32*4
.global _mt_master_context_end
_mt_master_context_end:
/* apply user contextidr and section table */
mcr p15, 0, sp, c13, c0, 1
mcr p15, 0, lr, c2, c0, 0
_flush_branch_predictor
/* pointer to the context backup space */
.p2align 2
.global _mt_client_context_ptr
_mt_client_context_ptr: .long 0
/* load user pc (implies application of the user psr) */
adr lr, _mt_buffer
ldm lr, {pc}^
/* a local word-sized buffer */
.p2align 2
.global _mt_buffer
_mt_buffer: .long 0
_mt_local_variables
.p2align 2
.global _mt_end
_mt_end:
/* FIXME this exists only because _vm_mon_entry pollutes kernel.cc */
/* FIXME exists only because _vm_mon_entry pollutes generic kernel code */
.global _mt_vm_entry_pic
_mt_vm_entry_pic:
1: b 1b

View File

@ -0,0 +1,27 @@
/*
* \brief Macros that are used by multiple assembly files
* \author Martin Stein
* \date 2014-01-13
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/**
* Determine the kernel name of the executing processor
*
* \param target_reg register that shall receive the processor name
*/
.macro _get_processor_id target_reg
/* read the multiprocessor affinity register */
mrc p15, 0, \target_reg, c0, c0, 5
/* get the affinity-0 bitfield from the read register value */
and \target_reg, \target_reg, #0xff
.endm

View File

@ -1,6 +1,6 @@
/*
* \brief Transition between kernel/userland, and secure/non-secure world
* \author Martin stein
* \author Martin Stein
* \author Stefan Kalkowski
* \date 2011-11-15
*/
@ -12,155 +12,204 @@
* under the terms of the GNU General Public License version 2.
*/
.include "macros.s"
/***************
** Constants **
***************/
/* hardware names of processor modes */
.set usr_mode, 16
.set fiq_mode, 17
.set irq_mode, 18
.set svc_mode, 19
.set abt_mode, 23
.set und_mode, 27
/* size of local variables */
.set buffer_size, 2 * 4
/* common constants */
_mt_constants
/************
** Macros **
************/
/**
* Switch from an interrupted user context to a kernel context
* Determine the base of the client context of the executing processor
*
* \param exception_type immediate exception type ID
* \param pc_adjust immediate value that gets subtracted from the
* user PC before it gets saved
* \param target_reg register that shall receive the base pointer
* \param buf_reg register that can be polluted by the macro
*/
.macro _get_client_context_ptr target_reg, buf_reg
/* get kernel name of processor */
_get_processor_id \buf_reg
/* multiply processor name with pointer size to get offset of pointer */
mov \target_reg, #context_ptr_size
mul \buf_reg, \buf_reg, \target_reg
/* get base of the pointer array */
adr \target_reg, _mt_client_context_ptr
/* add offset and base to get processor-local pointer */
add \target_reg, \target_reg, \buf_reg
ldr \target_reg, [\target_reg]
.endm
/**
* Determine the base of the globally mapped buffer of the executing processor
*
* \param target_reg register that shall receive the base pointer
* \param buf_reg register that can be polluted by the macro
*/
.macro _get_buffer_ptr target_reg, buf_reg
/* get kernel name of processor */
_get_processor_id \buf_reg
/* multiply processor name with buffer size to get offset of buffer */
mov \target_reg, #buffer_size
mul \buf_reg, \buf_reg, \target_reg
/* get base of the buffer array */
adr \target_reg, _mt_buffer
/* add offset and base to get processor-local buffer */
add \target_reg, \target_reg, \buf_reg
.endm
/**
* Compose a value for the translation-table-base register 0 and apply it
*
* \param section_table_reg register that contains targeted section-table base
*/
.macro _init_ttbr0 section_table_reg
/* IRGN bitfield is set to 1 to compose the TTBR0 value */
orr \section_table_reg, \section_table_reg, #0b1000000
/* write translation-table-base register 0 */
mcr p15, 0, \section_table_reg, c2, c0, 0
/* instruction and data synchronization barrier */
isb
dsb
.endm
/**
* Apply a value to the CONTEXTIDR register
*
* \param contexidr_reg register that contains the new CONTEXTIDR value
*/
.macro _init_contextidr contextidr_reg
/* write CONTEXTIDR register */
mcr p15, 0, \contextidr_reg, c13, c0, 1
/* finish all previous instructions */
isb
.endm
/**
* Save an interrupted user context and switch to the kernel context
*
* \param exception_type kernel name of exception type
* \param pc_adjust value that gets subtracted from saved user PC
*/
.macro _user_to_kernel_pic exception_type, pc_adjust
/*
* We expect that privileged modes are never interrupted by an
* exception. Thus we can assume that we always come from
* user mode at this point.
*/
/*************************************************************************
** Still in user protection domain, thus avoid access to kernel memory **
*************************************************************************/
/* when not in FIQ mode disable FIQs */
.if \exception_type != 6
/* disable fast interrupts when not in fast-interrupt mode */
.if \exception_type != fiq_type
cpsid f
.endif
/************************************************
** We're still in the user protection domain, **
** so we must avoid access to kernel memory **
************************************************/
/*
* The sp in svc mode still contains the base of the globally mapped
* buffer of this processor. Hence go to svc mode and buffer user r0 and
* user r1 to globally mapped memory to be able to pollute r0 and r1.
*/
.if \exception_type != rst_type && \exception_type != svc_type
cps #svc_mode
.endif
stm sp, {r0, r1}^
/* load kernel cidr */
adr sp, _mt_master_context_begin
ldr sp, [sp, #18*4]
mcr p15, 0, sp, c13, c0, 1
isb
/* make buffer pointer available to all modes */
mov r0, sp
/* load kernel section table */
/* switch back to previous privileged mode */
.if \exception_type == und_type
cps #und_mode
.endif
.if \exception_type == pab_type
cps #abt_mode
.endif
.if \exception_type == dab_type
cps #abt_mode
.endif
.if \exception_type == irq_type
cps #irq_mode
.endif
.if \exception_type == fiq_type
cps #fiq_mode
.endif
/* load kernel contextidr and base of the kernel section-table */
adr sp, _mt_master_context_begin
ldr sp, [sp, #19*4]
orr sp, sp, #0b1000000 /* set TTBR0 flags */
mcr p15, 0, sp, c2, c0, 0
isb
dsb
add sp, #contextidr_offset
ldm sp, {r1, sp}
/* switch to kernel protection-domain */
_init_contextidr r1
_init_ttbr0 sp
/*******************************************
** Now it's save to access kernel memory **
*******************************************/
/* get user context pointer */
ldr sp, _mt_client_context_ptr
/*
* Save user r0 ... r12. We explicitely target user registers
* via '^' because we might be in FIQ exception-mode where
* some of them are banked. Doesn't affect other modes.
*/
stmia sp, {r0-r12}^
/* save user lr and sp */
add r0, sp, #13*4
stmia r0, {sp,lr}^
/* get user context-pointer */
_get_client_context_ptr sp, r1
/* adjust and save user pc */
.if \pc_adjust != 0
sub lr, lr, #\pc_adjust
.endif
str lr, [sp, #15*4]
str lr, [sp, #pc_offset]
/* save user psr */
/* move buffer pointer to lr to enable us to save user r0 - r12 via stm */
mov lr, r0
/* restore user r0 and user r1 */
ldm lr, {r0, r1}
/* save user r0 - r12 */
stm sp, {r0-r12}^
/* save user sp and user lr */
add r0, sp, #sp_offset
stm r0, {sp, lr}^
/* get user psr and type of exception that interrupted the user */
mrs r0, spsr
str r0, [sp, #16*4]
mov r1, #\exception_type
/* save type of exception that interrupted the user */
mov r0, #\exception_type
str r0, [sp, #17*4]
/*
* Switch to supervisor mode
* FIXME This is done due to incorrect behavior when running the kernel
* high-level-code in FIQ-exception mode. Please debug this behavior
* and remove this switch.
*/
cps #19
/* get kernel context pointer */
adr r0, _mt_master_context_begin
/* load kernel context */
add r0, r0, #13*4
ldmia r0, {sp, lr, pc}
b _common_user_to_kernel_pic
.endm /* _user_to_kernel_pic */
/**
* Switch from kernel context to a user context
*/
.macro _kernel_to_user_pic
/* get user context pointer */
ldr lr, _mt_client_context_ptr
/* buffer user pc */
ldr r0, [lr, #15*4]
adr r1, _mt_buffer
str r0, [r1]
/* buffer user psr */
ldr r0, [lr, #16*4]
msr spsr, r0
/* load user r0 ... r12 */
ldmia lr, {r0-r12}
/* load user sp and lr */
add sp, lr, #13*4
ldmia sp, {sp,lr}^
/* get user contextidr and section table */
ldr sp, [lr, #18*4]
ldr lr, [lr, #19*4]
orr lr, lr, #0b1000000 /* set TTBR0 flags */
/********************************************************
** From now on, until we leave kernel mode, we must **
** avoid access to memory that is not mapped globally **
********************************************************/
/* apply user contextidr and section table */
mcr p15, 0, sp, c13, c0, 1
mcr p15, 0, lr, c2, c0, 0
isb
dsb
/* load user pc (implies application of the user psr) */
adr lr, _mt_buffer
ldmia lr, {pc}^
.endm /* _kernel_to_user_pic */
.macro _fiq_check_prior_mode
mrs r8, spsr /* load fiq-spsr */
and r8, #31
cmp r8, #16 /* check whether we come from user-mode */
beq 1f
mrs r8, spsr /* enable fiq-ignore bit */
orr r8, #64
msr spsr, r8
subs pc, lr, #4 /* resume previous exception */
1:
.endm /* _fiq_check_prior_mode */
/**
* Save sp, lr and spsr register banks of specified exception mode
*/
@ -179,7 +228,7 @@
* vm's PC before it gets saved
*/
.macro _vm_to_kernel exception_type, pc_adjust
ldr sp, _mt_client_context_ptr /* load context pointer */
ldr sp, _mt_client_context_ptr /* load context pointer */
stmia sp, {r0-lr}^ /* save user regs r0-r12,sp,lr */
add r0, sp, #15*4
.if \pc_adjust != 0 /* adjust pc if necessary */
@ -202,10 +251,7 @@
_save_bank 17 /* save fiq banks */
stmia r0!, {r8-r12} /* save fiq r8-r12 */
stmia r0!, {r3-r6} /* save MMU registers */
cps #19 /* switch to supervisor mode */
adr r0, _mt_master_context_begin /* get kernel context pointer */
add r0, r0, #13*4 /* load kernel context */
ldmia r0, {sp,lr,pc}
b _common_client_to_kernel_pic
.endm /* _vm_to_kernel */
@ -243,67 +289,175 @@
.endm /* _kernel_to_vm */
/**********************************
** Linked into the text section **
**********************************/
.section .text
/*
* The mode transition PIC switches between a kernel context and a user
* context and thereby between their address spaces. Due to the latter
* Page aligned base of mode transition code.
*
* This position independent code switches between a kernel context and a
* user context and thereby between their address spaces. Due to the latter
* it must be mapped executable to the same region in every address space.
* To enable such switching, the kernel context must be stored within this
* region, thus one should map it solely accessable for privileged modes.
*/
.p2align 12 /* page-aligned */
.p2align min_page_size_log2
.global _mt_begin
_mt_begin:
/*
* On user exceptions the CPU has to jump to one of the following
* 7 entry vectors to switch to a kernel context.
*/
.global _mt_kernel_entry_pic
_mt_kernel_entry_pic:
/*
* On user exceptions the CPU has to jump to one of the following
* seven entry vectors to switch to a kernel context.
*/
.global _mt_kernel_entry_pic
_mt_kernel_entry_pic:
b _rst_entry /* 0x00: reset */
b _und_entry /* 0x04: undefined instruction */
b _svc_entry /* 0x08: supervisor call */
b _pab_entry /* 0x0c: prefetch abort */
b _dab_entry /* 0x10: data abort */
nop /* 0x14: reserved */
b _irq_entry /* 0x18: interrupt request */
_fiq_check_prior_mode /* 0x1c: fast interrupt request */
_user_to_kernel_pic 7, 4
/***********************
** Exception entries **
***********************/
/* PICs that switch from an user exception to the kernel */
_rst_entry: _user_to_kernel_pic 1, 0
_und_entry: _user_to_kernel_pic 2, 4
_svc_entry: _user_to_kernel_pic 3, 0
_pab_entry: _user_to_kernel_pic 4, 4
_dab_entry: _user_to_kernel_pic 5, 8
_irq_entry: _user_to_kernel_pic 6, 4
b _rst_entry /* 0x00: reset */
b _und_entry /* 0x04: undefined instruction */
b _svc_entry /* 0x08: supervisor call */
b _pab_entry /* 0x0c: prefetch abort */
b _dab_entry /* 0x10: data abort */
nop /* 0x14: reserved */
b _irq_entry /* 0x18: interrupt request */
/* kernel must jump to this point to switch to a user context */
.p2align 2
.global _mt_user_entry_pic
_mt_user_entry_pic:
_kernel_to_user_pic
/******************************************************
** Entry for fast interrupt requests at offset 0x1c **
******************************************************/
/* leave some space for the kernel context */
.p2align 2
.global _mt_master_context_begin
_mt_master_context_begin: .space 32*4
.global _mt_master_context_end
_mt_master_context_end:
/* load the saved PSR of the the previous mode */
mrs r8, spsr
/* pointer to the context backup space */
.p2align 2
.global _mt_client_context_ptr
_mt_client_context_ptr: .long 0
/* get the M bitfield from the read PSR value */
and r9, r8, #0b11111
/* a local word-sized buffer */
.p2align 2
.global _mt_buffer
_mt_buffer: .long 0
/* skip following instructions if previous mode was user mode */
cmp r9, #usr_mode
beq 1f
/*
* If we reach this point, the previous mode was not the user
* mode, meaning an exception entry has been preempted by this
* fast interrupt before it could disable fast interrupts.
*/
/* disable fast interrupts in PSR value of previous mode */
orr r8, #0b1000000
/* apply PSR of previous mode */
msr spsr, r8
/*
* Resume excecution of previous exception entry leaving the fast
* interrupt unhandled till fast interrupts get enabled again.
*/
subs pc, lr, #4
/* switch to kernel to handle the fast interrupt */
1:
_user_to_kernel_pic fiq_type, fiq_pc_adjust
/***************************************************************
** Code that switches from a non-FIQ exception to the kernel **
***************************************************************/
_rst_entry: _user_to_kernel_pic rst_type, rst_pc_adjust
_und_entry: _user_to_kernel_pic und_type, und_pc_adjust
_svc_entry: _user_to_kernel_pic svc_type, svc_pc_adjust
_pab_entry: _user_to_kernel_pic pab_type, pab_pc_adjust
_dab_entry: _user_to_kernel_pic dab_type, dab_pc_adjust
_irq_entry: _user_to_kernel_pic irq_type, irq_pc_adjust
/**************************************************************
** Kernel-entry code that is common for all user exceptions **
**************************************************************/
_common_user_to_kernel_pic:
/* save user psr and type of exception that interrupted the user */
add sp, sp, #psr_offset
stm sp, {r0, r1}
/*********************************************************
** Kernel-entry code that is common for all exceptions **
*********************************************************/
_common_client_to_kernel_pic:
/*
* Switch to supervisor mode to circumvent incorrect behavior of
* kernel high-level code in fast interrupt mode and to ensure that
* we're in svc mode at kernel exit. The latter is because kernel
* exit stores a buffer pointer into its banked sp that is also
* needed by the subsequent kernel entry.
*/
cps #svc_mode
/* get base of the kernel-stacks area and the kernel-stack size */
adr r0, _mt_master_context_begin
add r1, r0, #r12_offset
ldm r1, {r2, r3}
/* determine top of the kernel stack of this processor and apply it as SP */
_init_kernel_sp r3, r2
/* apply kernel lr and kernel pc */
add r1, r0, #lr_offset
ldm r1, {lr, pc}
_mt_local_variables
/****************************************************************
** Code that switches from a kernel context to a user context **
****************************************************************/
.p2align 2
.global _mt_user_entry_pic
_mt_user_entry_pic:
/* get user context and globally mapped buffer of this processor */
_get_client_context_ptr lr, r0
_get_buffer_ptr sp, r0
/* buffer user pc and base of user section-table globally mapped */
ldr r0, [lr, #pc_offset]
ldr r1, [lr, #section_table_offset]
stm sp, {r0, r1}
/* buffer user psr in spsr */
ldr r0, [lr, #psr_offset]
msr spsr, r0
/* setup banked user sp and banked user lr */
add r0, lr, #sp_offset
ldm r0, {sp, lr}^
/* setup user r0 to r12 */
ldm lr, {r0-r12}^
/* load user contextidr */
ldr lr, [lr, #contextidr_offset]
/********************************************************
** From now on, until we leave kernel mode, we must **
** avoid access to memory that is not mapped globally **
********************************************************/
/* switch to user protection-domain */
_init_contextidr lr
ldr lr, [sp, #4]
_init_ttbr0 lr
/* apply user pc which implies application of spsr as user psr */
ldm sp, {pc}^
/* end of the mode transition code */
.global _mt_end
_mt_end:
@ -314,22 +468,22 @@
.p2align 4
.global _mon_kernel_entry
_mon_kernel_entry:
b _mon_rst_entry /* reset */
b _mon_und_entry /* undefined instruction */
b _mon_svc_entry /* supervisor call */
b _mon_pab_entry /* prefetch abort */
b _mon_dab_entry /* data abort */
nop /* reserved */
b _mon_irq_entry /* interrupt request */
_vm_to_kernel 7, 4 /* fast interrupt request */
b _mon_rst_entry /* reset */
b _mon_und_entry /* undefined instruction */
b _mon_svc_entry /* supervisor call */
b _mon_pab_entry /* prefetch abort */
b _mon_dab_entry /* data abort */
nop /* reserved */
b _mon_irq_entry /* interrupt request */
_vm_to_kernel fiq_type, 4 /* fast interrupt request */
/* PICs that switch from a vm exception to the kernel */
_mon_rst_entry: _vm_to_kernel 1, 0
_mon_und_entry: _vm_to_kernel 2, 4
_mon_svc_entry: _vm_to_kernel 3, 0
_mon_pab_entry: _vm_to_kernel 4, 4
_mon_dab_entry: _vm_to_kernel 5, 8
_mon_irq_entry: _vm_to_kernel 6, 4
_mon_rst_entry: _vm_to_kernel rst_type, 0
_mon_und_entry: _vm_to_kernel und_type, 4
_mon_svc_entry: _vm_to_kernel svc_type, 0
_mon_pab_entry: _vm_to_kernel pab_type, 4
_mon_dab_entry: _vm_to_kernel dab_type, 8
_mon_irq_entry: _vm_to_kernel irq_type, 4
/* kernel must jump to this point to switch to a vm */
.p2align 2

View File

@ -0,0 +1,40 @@
/*
* \brief Board driver for core
* \author Martin Stein
* \date 2012-04-23
*/
/*
* Copyright (C) 2012-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _BOARD_H_
#define _BOARD_H_
/* core includes */
#include <drivers/board_base.h>
namespace Genode
{
class Board : public Board_base
{
public:
static void prepare_kernel() { }
/**
* Tell secondary processors where to start execution from
*
* \param ip initial instruction pointer of secondary processors
*/
static void secondary_processors_ip(void * const ip)
{
*(void * volatile *)IRAM_BASE = ip;
}
};
}
#endif /* _BOARD_H_ */

View File

@ -0,0 +1,42 @@
/*
* \brief CPU driver for core
* \author Martin Stein
* \date 2012-04-23
*/
/*
* Copyright (C) 2012 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _ARNDALE__CPU_H_
#define _ARNDALE__CPU_H_
/* core includes */
#include <cpu/cortex_a15.h>
namespace Genode
{
/**
* CPU driver for core
*/
class Cpu : public Cortex_a15::Cpu
{
public:
/**
* Return kernel name of the executing processor
*/
static unsigned id() { return Mpidr::Aff_0::get(Mpidr::read()); }
/**
* Return kernel name of the primary processor
*/
static unsigned primary_id() { return Board::PRIMARY_MPIDR_AFF_0; }
};
}
#endif /* _ARNDALE__CPU_H_ */

View File

@ -12,6 +12,7 @@ REQUIRES += platform_arndale
INC_DIR += $(REP_DIR)/src/core/arndale
INC_DIR += $(REP_DIR)/src/core/exynos5
INC_DIR += $(REP_DIR)/src/core/arm
INC_DIR += $(REP_DIR)/src/core/arm_v7
# add C++ sources
SRC_CC += platform_services.cc \

View File

@ -0,0 +1,56 @@
/*
* \brief Timer for kernel
* \author Martin Stein
* \date 2012-04-23
*/
/*
* Copyright (C) 2012 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _ARNDALE__TIMER_H_
#define _ARNDALE__TIMER_H_
/* core includes */
#include <board.h>
#include <timer/exynos_mct.h>
namespace Kernel
{
/**
* Kernel timer
*/
class Timer : public Exynos_mct::Timer
{
public:
/**
* Return kernel name of timer interrupt of a specific processor
*
* \param processor_id kernel name of targeted processor
*/
static unsigned interrupt_id(unsigned const processor_id)
{
switch (processor_id) {
case 0:
return Genode::Board::MCT_IRQ_L0;
case 1:
return Genode::Board::MCT_IRQ_L1;
default:
PERR("unknown processor");
return 0;
}
}
/**
* Constructor
*/
Timer() : Exynos_mct::Timer(Genode::Board::MCT_MMIO_BASE,
Genode::Board::MCT_CLOCK) { }
};
}
#endif /* _ARNDALE__TIMER_H_ */

View File

@ -24,6 +24,8 @@ namespace Genode
public:
static void prepare_kernel() { }
static void secondary_processors_ip(void * const ip) { }
};
}

View File

@ -37,6 +37,24 @@ namespace Arm
DATA_ACCESS_ALIGNM = 4,
};
/**
* Multiprocessor affinity register
*/
struct Mpidr : Register<32>
{
struct Aff_0 : Bitfield<0, 8> { };
/**
* Read register value
*/
static access_t read()
{
access_t v;
asm volatile ("mrc p15, 0, %[v], c0, c0, 5" : [v] "=r" (v) ::);
return v;
}
};
/**
* Cache type register
*/
@ -574,10 +592,13 @@ namespace Arm
};
/**
* Flush all instruction caches
* Invalidate all entries of all instruction caches
*/
__attribute__((always_inline)) static void flush_instr_caches() {
asm volatile ("mcr p15, 0, %[rd], c7, c5, 0" :: [rd]"r"(0) : ); }
__attribute__((always_inline))
static void invalidate_instruction_caches()
{
asm volatile ("mcr p15, 0, %[rd], c7, c5, 0" :: [rd]"r"(0) : );
}
/**
* Flush all data caches
@ -590,7 +611,7 @@ namespace Arm
static void flush_caches()
{
flush_data_caches();
flush_instr_caches();
invalidate_instruction_caches();
}
/**

View File

@ -174,12 +174,57 @@ namespace Arm_v6
* Ensure that TLB insertions get applied
*/
static void tlb_insertions() { flush_tlb(); }
static void start_secondary_processors(void * const ip)
{
if (PROCESSORS > 1) { PERR("multiprocessing not implemented"); }
}
/**
* Invalidate all predictions about the future control-flow
*/
static void invalidate_control_flow_predictions()
{
/* FIXME invalidation of branch prediction not implemented */
}
/**
* Finish all previous data transfers
*/
static void data_synchronization_barrier()
{
/* FIXME data synchronization barrier not implemented */
}
/**
* Wait for the next interrupt as cheap as possible
*/
static void wait_for_interrupt()
{
/* FIXME cheap way of waiting is not implemented */
}
/**
* Return kernel name of the primary processor
*/
static unsigned primary_id() { return 0; }
/**
* Return kernel name of the executing processor
*/
static unsigned id() { return primary_id(); }
};
}
void Arm::Cpu::flush_data_caches() {
asm volatile ("mcr p15, 0, %[rd], c7, c14, 0" :: [rd]"r"(0) : ); }
/**************
** Arm::Cpu **
**************/
void Arm::Cpu::flush_data_caches()
{
asm volatile ("mcr p15, 0, %[rd], c7, c14, 0" :: [rd]"r"(0) : );
}
#endif /* _CPU__ARM_V6_H_ */

View File

@ -222,6 +222,38 @@ namespace Arm_v7
Nsacr::Cpnsae11::bits(1);
asm volatile ("mcr p15, 0, %[rd], c1, c1, 2" : : [rd] "r" (rd));
}
/**
* Invalidate all predictions about the future control-flow
*/
static void invalidate_control_flow_predictions()
{
asm volatile ("mcr p15, 0, r0, c7, c5, 6");
}
/**
* Finish all previous data transfers
*/
static void data_synchronization_barrier() { asm volatile ("dsb"); }
/**
* Enable secondary processors that loop on wait-for-event
*
* \param ip initial instruction pointer for secondary processors
*/
static void start_secondary_processors(void * const ip)
{
if (PROCESSORS > 1) {
Genode::Board::secondary_processors_ip(ip);
data_synchronization_barrier();
asm volatile ("sev\n");
}
}
/**
* Wait for the next interrupt as cheap as possible
*/
static void wait_for_interrupt() { asm volatile ("wfi"); }
};
}

View File

@ -10,6 +10,7 @@ REQUIRES = platform_imx31
# add include paths
INC_DIR += $(REP_DIR)/src/core/imx31
INC_DIR += $(REP_DIR)/src/core/arm
INC_DIR += $(REP_DIR)/src/core/arm_v6
# add C++ sources
SRC_CC += platform_services.cc \

View File

@ -23,7 +23,13 @@ namespace Kernel
{
public:
enum { IRQ = Genode::Board::EPIT_1_IRQ };
/**
* Return kernel name of timer interrupt
*/
static unsigned interrupt_id(unsigned)
{
return Genode::Board::EPIT_1_IRQ;
}
/**
* Constructor

View File

@ -96,6 +96,11 @@ namespace Imx53
aips_1()->prepare_kernel();
aips_2()->prepare_kernel();
}
/**
* Tell secondary processors where to start execution from
*/
static void secondary_processors_ip(void *) { }
};
}

View File

@ -22,7 +22,20 @@ namespace Genode
/**
* CPU driver for core
*/
class Cpu : public Cortex_a8::Cpu { };
class Cpu : public Cortex_a8::Cpu
{
public:
/**
* Return kernel name of the primary processor
*/
static unsigned primary_id() { return 0; }
/**
* Return kernel name of the executing processor
*/
static unsigned id() { return primary_id(); }
};
}
#endif /* _IMX53__CPU_H_ */

View File

@ -127,11 +127,16 @@ namespace Imx53
write<Priomask::Mask>(0x1f);
write<Intctrl>(Intctrl::Enable::bits(1) |
Intctrl::Nsen::bits(1) |
Intctrl::Nsen_mask::bits(1));
Intctrl::Nsen::bits(1) |
Intctrl::Nsen_mask::bits(1));
}
/**
* Initialize processor local interface of the controller
*/
void init_processor_local() { }
/**
* Receive a pending request number 'i'
*/
@ -176,12 +181,15 @@ namespace Imx53
}
/**
* Unmask interrupt 'i'
* Unmask interrupt
*
* \param interrupt_id kernel name of targeted interrupt
*/
void unmask(unsigned const i)
void unmask(unsigned const interrupt_id, unsigned)
{
if (i <= MAX_INTERRUPT_ID)
write<Enset::Set_enable>(1, i);
if (interrupt_id <= MAX_INTERRUPT_ID) {
write<Enset::Set_enable>(1, interrupt_id);
}
}
/**

View File

@ -11,6 +11,7 @@ REQUIRES += platform_imx53
# add include paths
INC_DIR += $(REP_DIR)/src/core/imx53
INC_DIR += $(REP_DIR)/src/core/arm
INC_DIR += $(REP_DIR)/src/core/arm_v7
# add C++ sources
SRC_CC += platform_services.cc \

View File

@ -27,7 +27,13 @@ namespace Imx53
{
public:
enum { IRQ = Board::EPIT_1_IRQ };
/**
* Return kernel name of timer interrupt
*/
static unsigned interrupt_id(unsigned)
{
return Board::EPIT_1_IRQ;
}
/**
* Constructor

View File

@ -22,8 +22,13 @@
extern int _mon_kernel_entry;
void Kernel::trustzone_initialization(Pic *pic)
void Kernel::init_trustzone(Pic * pic)
{
/* check for compatibility */
if (PROCESSORS > 1) {
PERR("trustzone not supported with multiprocessing");
return;
}
/* set exception vector entry */
Genode::Cpu::mon_exception_entry_at((Genode::addr_t)&_mon_kernel_entry);

View File

@ -19,7 +19,7 @@ namespace Kernel {
class Pic;
void trustzone_initialization(Pic *pic);
void init_trustzone(Pic * pic);
}
#endif /* _CORE__INCLUDE__TRUSTZONE_H_ */

View File

@ -14,6 +14,7 @@
/* core includes */
#include <kernel/irq.h>
#include <pic.h>
#include <cpu.h>
using namespace Kernel;
@ -21,4 +22,4 @@ namespace Kernel { Pic * pic(); }
void Irq::_disable() const { pic()->mask(_id()); }
void Irq::_enable() const { pic()->unmask(_id()); }
void Irq::_enable() const { pic()->unmask(_id(), Genode::Cpu::id()); }

View File

@ -40,8 +40,8 @@
using namespace Kernel;
extern Genode::Native_thread_id _main_thread_id;
extern int _kernel_stack_high;
extern "C" void CORE_MAIN();
extern void * _start_secondary_processors;
Genode::Native_utcb * _main_thread_utcb;
@ -62,11 +62,6 @@ namespace Kernel
namespace Kernel
{
/**
* Idle thread entry
*/
static void idle_main() { while (1) ; }
Pd_ids * pd_ids() { return unmanaged_singleton<Pd_ids>(); }
Thread_ids * thread_ids() { return unmanaged_singleton<Thread_ids>(); }
Signal_context_ids * signal_context_ids() { return unmanaged_singleton<Signal_context_ids>(); }
@ -78,14 +73,21 @@ namespace Kernel
Signal_receiver_pool * signal_receiver_pool() { return unmanaged_singleton<Signal_receiver_pool>(); }
/**
* Access to static kernel timer
* Return singleton kernel-timer
*/
static Timer * timer() { static Timer _object; return &_object; }
void reset_lap_time()
Timer * timer()
{
timer()->start_one_shot(timer()->ms_to_tics(USER_LAP_TIME_MS));
static Timer _object;
return &_object;
}
/**
* Start a new scheduling lap
*/
void reset_lap_time(unsigned const processor_id)
{
unsigned const tics = timer()->ms_to_tics(USER_LAP_TIME_MS);
timer()->start_one_shot(tics, processor_id);
}
@ -124,28 +126,6 @@ namespace Kernel
namespace Kernel
{
/**
* Access to static CPU scheduler
*/
Cpu_scheduler * cpu_scheduler()
{
/* create idle thread */
static char idle_stack[DEFAULT_STACK_SIZE]
__attribute__((aligned(Cpu::DATA_ACCESS_ALIGNM)));
static Thread idle(Priority::MAX, "idle");
static bool init = 0;
if (!init) {
enum { STACK_SIZE = sizeof(idle_stack)/sizeof(idle_stack[0]) };
idle.ip = (addr_t)&idle_main;;
idle.sp = (addr_t)&idle_stack[STACK_SIZE];;
idle.init(0, core_id(), 0, 0);
init = 1;
}
/* create CPU scheduler with a permanent idle thread */
static Cpu_scheduler cpu_sched(&idle);
return &cpu_sched;
}
/**
* Get attributes of the mode transition region in every PD
*/
@ -162,64 +142,136 @@ namespace Kernel
unsigned pd_alignm_log2() { return Tlb::ALIGNM_LOG2; }
size_t vm_size() { return sizeof(Vm); }
enum { STACK_SIZE = 64 * 1024 };
/**
* Handle an interrupt request
* Return lock that guards all kernel data against concurrent access
*/
void handle_interrupt()
Lock & data_lock()
{
static Lock s;
return s;
}
addr_t core_tlb_base;
unsigned core_pd_id;
/**
* Handle interrupt request
*
* \param processor kernel object of targeted processor
* \param processor_id kernel name of targeted processor
*/
void handle_interrupt(Processor * const processor,
unsigned const processor_id)
{
/* determine handling for specific interrupt */
unsigned irq_id;
if (pic()->take_request(irq_id))
{
switch (irq_id) {
case Timer::IRQ: {
cpu_scheduler()->yield();
timer()->clear_interrupt();
reset_lap_time();
break; }
default: {
/* check wether the interrupt is a scheduling timeout */
if (timer()->interrupt_id(processor_id) == irq_id)
{
/* handle scheduling timeout */
processor->scheduler()->yield();
timer()->clear_interrupt(processor_id);
reset_lap_time(processor_id);
} else {
/* try to inform the user interrupt-handler */
Irq::occurred(irq_id);
break; }
}
}
/* disengage interrupt controller from IRQ */
/* end interrupt request at controller */
pic()->finish_request();
}
}
/**
* Prepare the first call of the kernel main-routine
* Enable kernel-entry assembly to get an exclusive stack at every processor
*/
extern "C" void setup_kernel()
char kernel_stack[PROCESSORS][Kernel::STACK_SIZE] __attribute__((aligned()));
unsigned kernel_stack_size = Kernel::STACK_SIZE;
/**
* Setup kernel enviroment before activating secondary processors
*/
extern "C" void init_kernel_uniprocessor()
{
/************************************************************************
** As atomic operations are broken in physical mode on some platforms **
** we must avoid the use of 'cmpxchg' by now (includes not using any **
** local static objects. **
************************************************************************/
/* calculate in advance as needed later when data writes aren't allowed */
core_tlb_base = core()->tlb()->base();
core_pd_id = core_id();
/* initialize all processor objects */
multiprocessor();
/* go multiprocessor mode */
Cpu::start_secondary_processors(&_start_secondary_processors);
}
/**
* Setup kernel enviroment after activating secondary processors
*/
extern "C" void init_kernel_multiprocessor()
{
/***********************************************************************
** As updates on a cached kernel lock might not be visible to **
** processors that have not enabled caches, we can't synchronize the **
** activation of MMU and caches. Hence we must avoid write access to **
** kernel data by now. **
***********************************************************************/
/* synchronize data view of all processors */
Cpu::flush_data_caches();
Cpu::invalidate_instruction_caches();
Cpu::invalidate_control_flow_predictions();
Cpu::data_synchronization_barrier();
/* initialize processor in physical mode */
Cpu::init_phys_kernel();
/* enable kernel timer */
pic()->unmask(Timer::IRQ);
/* TrustZone initialization code */
trustzone_initialization(pic());
/* enable performance counter */
perf_counter()->enable();
/* switch to core address space */
Cpu::init_virt_kernel(core()->tlb()->base(), core_id());
Cpu::init_virt_kernel(core_tlb_base, core_pd_id);
/************************************
** Now it's safe to use 'cmpxchg' **
************************************/
Lock::Guard guard(data_lock());
/*******************************************
** Now it's save to write to kernel data **
*******************************************/
/*
* From this point on, it is safe to use 'cmpxchg', i.e., to create
* singleton objects via the static-local object pattern. See
* the comment in 'src/base/singleton.h'.
* TrustZone initialization code
*
* FIXME This is a plattform specific feature
*/
init_trustzone(pic());
/* create the core main thread */
/*
* Enable performance counter
*
* FIXME This is an optional processor specific feature
*/
perf_counter()->enable();
/* initialize interrupt controller */
pic()->init_processor_local();
unsigned const processor_id = Cpu::id();
pic()->unmask(Timer::interrupt_id(processor_id), processor_id);
/* as primary processor create the core main thread */
if (Cpu::primary_id() == processor_id)
{
/* get stack memory that fullfills the constraints for core stacks */
enum {
@ -242,11 +294,12 @@ extern "C" void setup_kernel()
_main_thread_utcb->start_info()->init(t.id(), Genode::Native_capability());
t.ip = (addr_t)CORE_MAIN;;
t.sp = (addr_t)s + STACK_SIZE;
t.init(0, core_id(), &utcb, 1);
t.init(multiprocessor()->select(processor_id), core_id(), &utcb, 1);
/* kernel initialization finished */
init_platform();
}
/* kernel initialization finished */
init_platform();
reset_lap_time();
reset_lap_time(processor_id);
}
@ -255,27 +308,32 @@ extern "C" void setup_kernel()
*/
extern "C" void kernel()
{
cpu_scheduler()->head()->handle_exception();
cpu_scheduler()->head()->proceed();
data_lock().lock();
unsigned const processor_id = Cpu::id();
Processor * const processor = multiprocessor()->select(processor_id);
Processor_scheduler * const scheduler = processor->scheduler();
scheduler->head()->handle_exception(processor_id);
scheduler->head()->proceed(processor_id);
}
Kernel::Mode_transition_control * Kernel::mtc()
{
/* compose CPU context for kernel entry */
struct Kernel_context : Cpu::Context
{
/**
* Constructor
*/
Kernel_context()
{
ip = (addr_t)kernel;
sp = (addr_t)&_kernel_stack_high;
core()->admit(this);
}
} * const k = unmanaged_singleton<Kernel_context>();
/* create singleton processor context for kernel */
Cpu_context * const cpu_context = unmanaged_singleton<Cpu_context>();
/* initialize mode transition page */
return unmanaged_singleton<Mode_transition_control>(k);
return unmanaged_singleton<Mode_transition_control>(cpu_context);
}
Kernel::Execution_context::~Execution_context() { }
Kernel::Cpu_context::Cpu_context()
{
_init(STACK_SIZE);
sp = (addr_t)kernel_stack;
ip = (addr_t)kernel;
core()->admit(this);
}

View File

@ -16,9 +16,12 @@
namespace Kernel
{
class Processor;
unsigned core_id();
void handle_interrupt();
void handle_interrupt(Processor * const processor,
unsigned const processor_id);
}
#endif /* _KERNEL__KERNEL_H_ */

View File

@ -0,0 +1,24 @@
/*
* \brief Representation of a common instruction processor
* \author Martin Stein
* \date 2014-01-14
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* core includes */
#include <kernel/multiprocessor.h>
using namespace Kernel;
Multiprocessor * Kernel::multiprocessor()
{
static Multiprocessor s;
return &s;
}

View File

@ -0,0 +1,148 @@
/*
* \brief Provide a processor object for every available processor
* \author Martin Stein
* \date 2014-01-14
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _KERNEL__MULTIPROCESSOR_H_
#define _KERNEL__MULTIPROCESSOR_H_
/* base includes */
#include <unmanaged_singleton.h>
/* core includes */
#include <kernel/thread.h>
namespace Kernel
{
typedef Cpu_scheduler Processor_scheduler;
/**
* Thread that consumes processor time if no other thread is available
*/
class Idle_thread;
/**
* Representation of a single common instruction processor
*/
class Processor;
/**
* Provides a processor object for every provided processor
*/
class Multiprocessor;
/**
* Return multiprocessor singleton
*/
Multiprocessor * multiprocessor();
/**
* Return kernel name of the core protection-domain
*/
unsigned core_id();
}
class Kernel::Idle_thread : public Thread
{
private:
enum {
STACK_SIZE = 4 * 1024,
STACK_ALIGNM = Cpu::DATA_ACCESS_ALIGNM,
};
char _stack[STACK_SIZE] __attribute__((aligned(STACK_ALIGNM)));
/**
* Main function of all idle threads
*/
static void _main() { while (1) { Cpu::wait_for_interrupt(); } }
public:
/**
* Constructor
*
* \param processor kernel object of targeted processor
*/
Idle_thread(Processor * const processor)
:
Thread(Priority::MAX, "idle")
{
ip = (addr_t)&_main;
sp = (addr_t)&_stack[STACK_SIZE];
init(processor, core_id(), 0, 0);
}
};
class Kernel::Processor
{
private:
Idle_thread _idle;
Processor_scheduler _scheduler;
public:
/**
* Constructor
*/
Processor() : _idle(this), _scheduler(&_idle) { }
/***************
** Accessors **
***************/
Processor_scheduler * scheduler() { return &_scheduler; }
};
class Kernel::Multiprocessor
{
private:
char _data[PROCESSORS][sizeof(Processor)];
public:
/**
* Initialize the objects of one of the available processors
*
* \param id kernel name of the targeted processor
*/
Multiprocessor()
{
for (unsigned i = 0; i < PROCESSORS; i++) {
new (_data[i]) Processor;
}
}
/**
* Return the object of a specific processor
*
* \param id kernel name of the targeted processor
*/
Processor * select(unsigned const id) const
{
return id < PROCESSORS ? (Processor *)_data[id] : 0;
}
/**
* Return the object of the primary processor
*/
Processor * primary() const
{
return (Processor *)_data[Cpu::primary_id()];
}
};
#endif /* _KERNEL__MULTIPROCESSOR_H_ */

View File

@ -14,6 +14,9 @@
#ifndef _KERNEL__PD_H_
#define _KERNEL__PD_H_
/* Genode includes */
#include <cpu/atomic.h>
/* core includes */
#include <kernel/configuration.h>
#include <kernel/object.h>
@ -30,6 +33,49 @@ extern Genode::addr_t _mt_client_context_ptr;
extern Genode::addr_t _mt_master_context_begin;
extern Genode::addr_t _mt_master_context_end;
namespace Kernel
{
/**
* Lock that enables synchronization inside the kernel
*/
class Lock;
}
class Kernel::Lock
{
private:
int volatile _locked;
/**
* Finish all previously started memory transactions
*/
void _memory_barrier() { asm volatile ("" : : : "memory"); }
public:
Lock() : _locked(0) { }
/**
* Request the lock
*/
void lock() { while (!Genode::cmpxchg(&_locked, 0, 1)); }
/**
* Free the lock
*/
void unlock()
{
_memory_barrier();
_locked = 0;
}
/**
* Provide guard semantic for this type of lock
*/
typedef Genode::Lock_guard<Kernel::Lock> Guard;
};
namespace Kernel
{
/**
@ -59,6 +105,8 @@ namespace Kernel
Pd_ids * pd_ids();
Pd_pool * pd_pool();
Lock & data_lock();
}
class Kernel::Mode_transition_control
@ -73,6 +121,31 @@ class Kernel::Mode_transition_control
addr_t const _virt_user_entry;
/**
* Continue execution of client context
*
* \param context targeted client processor-context
* \param processor_id kernel name of targeted processor
* \param entry_raw raw pointer to assembly entry-code
*/
void _continue_client(void * const context, unsigned const processor_id,
addr_t const entry_raw)
{
/* override client-context pointer of the executing processor */
addr_t const context_ptr_base = (addr_t)&_mt_client_context_ptr;
size_t const context_ptr_offset = processor_id * sizeof(context);
addr_t const context_ptr = context_ptr_base + context_ptr_offset;
*(void * *)context_ptr = context;
/* unlock kernel data */
data_lock().unlock();
/* call assembly code that applies the virtual-machine context */
typedef void (* Entry)();
Entry __attribute__((noreturn)) const entry = (Entry)entry_raw;
entry();
}
public:
enum {
@ -126,21 +199,27 @@ class Kernel::Mode_transition_control
}
/**
* Continue user-mode execution with CPU context 'c'
* Continue execution of userland context
*
* \param context targeted userland context
* \param processor_id kernel name of targeted processor
*/
void continue_user(Cpu::Context * const c)
void continue_user(Cpu::Context * const context,
unsigned const processor_id)
{
_mt_client_context_ptr = (addr_t)c;
((void(*)(void))_virt_user_entry)();
_continue_client(context, processor_id, _virt_user_entry);
}
/**
* Continue VM execution with CPU state 's'
* Continue execution of virtual machine
*
* \param context targeted virtual-machine context
* \param processor_id kernel name of targeted processor
*/
void continue_vm(Cpu_state_modes * s)
void continue_vm(Cpu_state_modes * const context,
unsigned const processor_id)
{
_mt_client_context_ptr = (addr_t)s;
((void(*)(void))&_mt_vm_entry_pic)();
_continue_client(context, processor_id, _mt_vm_entry_pic);
}
};

View File

@ -58,11 +58,6 @@ namespace Kernel
class Execution_context;
typedef Scheduler<Execution_context> Cpu_scheduler;
/**
* Return the systems CPU scheduler
*/
Cpu_scheduler * cpu_scheduler();
}
template <typename T>
@ -304,13 +299,17 @@ class Kernel::Execution_context : public Cpu_scheduler::Item
/**
* Handle an exception that occured during execution
*
* \param processor_id kernel name of targeted processor
*/
virtual void handle_exception() = 0;
virtual void handle_exception(unsigned const processor_id) = 0;
/**
* Continue execution
*
* \param processor_id kernel name of targeted processor
*/
virtual void proceed() = 0;
virtual void proceed(unsigned const processor_id) = 0;
/**
* Constructor
@ -322,10 +321,7 @@ class Kernel::Execution_context : public Cpu_scheduler::Item
/**
* Destructor
*/
virtual ~Execution_context()
{
if (list()) { cpu_scheduler()->remove(this); }
}
virtual ~Execution_context();
};
#endif /* _KERNEL__SCHEDULER_H_ */

View File

@ -21,7 +21,9 @@
#include <kernel/kernel.h>
#include <kernel/thread.h>
#include <kernel/vm.h>
#include <kernel/irq.h>
#include <platform_pd.h>
#include <pic.h>
using namespace Kernel;
@ -31,12 +33,12 @@ unsigned Thread::pd_id() const { return _pd ? _pd->id() : 0; }
bool Thread::_core() const { return pd_id() == core_id(); }
namespace Kernel { void reset_lap_time(unsigned const processor_id); }
void Thread::_signal_context_kill_pending()
{
assert(_state == SCHEDULED);
_state = AWAITS_SIGNAL_CONTEXT_KILL;
cpu_scheduler()->remove(this);
_unschedule(AWAITS_SIGNAL_CONTEXT_KILL);
}
@ -58,8 +60,7 @@ void Thread::_signal_context_kill_failed()
void Thread::_await_signal(Signal_receiver * const receiver)
{
cpu_scheduler()->remove(this);
_state = AWAITS_SIGNAL;
_unschedule(AWAITS_SIGNAL);
_signal_receiver = receiver;
}
@ -90,8 +91,7 @@ void Thread::_await_ipc()
{
switch (_state) {
case SCHEDULED:
cpu_scheduler()->remove(this);
_state = AWAITS_IPC;
_unschedule(AWAITS_IPC);
return;
default:
PERR("wrong thread state to await IPC");
@ -159,18 +159,25 @@ int Thread::_resume()
void Thread::_pause()
{
assert(_state == AWAITS_RESUME || _state == SCHEDULED);
cpu_scheduler()->remove(this);
_state = AWAITS_RESUME;
_unschedule(AWAITS_RESUME);
}
void Thread::_schedule()
{
cpu_scheduler()->insert(this);
if (_state == SCHEDULED) { return; }
_processor->scheduler()->insert(this);
_state = SCHEDULED;
}
void Thread::_unschedule(State const s)
{
if (_state == SCHEDULED) { _processor->scheduler()->remove(this); }
_state = s;
}
Thread::Thread(unsigned const priority, char const * const label)
:
Execution_context(priority),
@ -184,17 +191,17 @@ Thread::Thread(unsigned const priority, char const * const label)
cpu_exception = RESET;
}
Thread::~Thread() { if (Execution_context::list()) { _unschedule(STOPPED); } }
void
Thread::init(unsigned const cpu_id, unsigned const pd_id_arg,
Thread::init(Processor * const processor, unsigned const pd_id_arg,
Native_utcb * const utcb_phys, bool const start)
{
assert(_state == AWAITS_START)
/* FIXME: support SMP */
if (cpu_id) { PERR("multicore processing not supported"); }
/* store thread parameters */
_processor = processor;
_utcb_phys = utcb_phys;
/* join protection domain */
@ -213,18 +220,14 @@ Thread::init(unsigned const cpu_id, unsigned const pd_id_arg,
}
void Thread::_stop()
{
if (_state == SCHEDULED) { cpu_scheduler()->remove(this); }
_state = STOPPED;
}
void Thread::_stop() { _unschedule(STOPPED); }
void Thread::handle_exception()
void Thread::handle_exception(unsigned const processor_id)
{
switch (cpu_exception) {
case SUPERVISOR_CALL:
_call();
_call(processor_id);
return;
case PREFETCH_ABORT:
_mmu_exception();
@ -233,17 +236,17 @@ void Thread::handle_exception()
_mmu_exception();
return;
case INTERRUPT_REQUEST:
handle_interrupt();
handle_interrupt(_processor, processor_id);
return;
case FAST_INTERRUPT_REQUEST:
handle_interrupt();
handle_interrupt(_processor, processor_id);
return;
case RESET:
return;
default:
PERR("unknown exception");
_stop();
reset_lap_time();
reset_lap_time(processor_id);
}
}
@ -255,9 +258,9 @@ void Thread::_receive_yielded_cpu()
}
void Thread::proceed()
void Thread::proceed(unsigned const processor_id)
{
mtc()->continue_user(static_cast<Cpu::Context *>(this));
mtc()->continue_user(this, processor_id);
}
@ -366,8 +369,14 @@ void Thread::_call_start_thread()
user_arg_0(0);
return;
}
/* start thread */
t->init(cpu_id, pd_id, utcb, 1);
/*
* Start thread
*
* FIXME: The affinity of a thread is ignored by now.
* Instead we always assign the primary processor.
*/
if (cpu_id) { PERR("multiprocessing not supported"); }
t->init(multiprocessor()->primary(), pd_id, utcb, 1);
user_arg_0((Call_ret)t->_pd->tlb());
}
@ -437,7 +446,7 @@ void Thread::_call_yield_thread()
{
Thread * const t = Thread::pool()->object(user_arg_1());
if (t) { t->_receive_yielded_cpu(); }
cpu_scheduler()->yield();
_processor->scheduler()->yield();
}
@ -614,14 +623,8 @@ void Thread::_print_activity_table()
void Thread::_print_activity(bool const printing_thread)
{
static Thread * idle = dynamic_cast<Thread *>(cpu_scheduler()->idle());
Genode::printf("\033[33m[%u] %s", pd_id(), pd_label());
Genode::printf(" (%u) %s:\033[0m", id(), label());
if (id() == idle->id()) {
Genode::printf("\033[32m run\033[0m");
_print_common_activity();
return;
}
switch (_state) {
case AWAITS_START: {
Genode::printf("\033[32m init\033[0m");
@ -935,7 +938,7 @@ int Thread::_write_reg(addr_t const id, addr_t const value)
}
void Thread::_call()
void Thread::_call(unsigned const processor_id)
{
switch (user_arg_0()) {
case Call_id::NEW_THREAD: _call_new_thread(); return;
@ -969,6 +972,6 @@ void Thread::_call()
default:
PERR("unknown kernel call");
_stop();
reset_lap_time();
reset_lap_time(processor_id);
}
}

View File

@ -30,8 +30,6 @@ namespace Kernel
typedef Genode::Cpu Cpu;
typedef Genode::Native_utcb Native_utcb;
void reset_lap_time();
/**
* Kernel backend for userland execution-contexts
*/
@ -40,10 +38,38 @@ namespace Kernel
class Thread_ids : public Id_allocator<MAX_THREADS> { };
typedef Object_pool<Thread> Thread_pool;
class Processor;
Thread_ids * thread_ids();
Thread_pool * thread_pool();
/**
* Processor context of the kernel
*/
class Cpu_context;
}
struct Kernel::Cpu_context : Cpu::Context
{
private:
/**
* Hook for environment specific initializations
*
* \param stack_size size of kernel stack
*/
void _init(size_t const stack_size);
public:
/**
* Constructor
*/
Cpu_context();
};
class Kernel::Thread
:
public Cpu::User_context,
@ -76,6 +102,7 @@ class Kernel::Thread
Native_utcb * _utcb_phys;
Signal_receiver * _signal_receiver;
char const * const _label;
Processor * _processor;
/**
* Notice that another thread yielded the CPU to this thread
@ -114,6 +141,11 @@ class Kernel::Thread
*/
void _schedule();
/**
* Pause execution rawly
*/
void _unschedule(State const s);
/**
* Pause execution
*/
@ -139,9 +171,11 @@ class Kernel::Thread
void _mmu_exception();
/**
* Handle kernel-call request of this thread
* Handle kernel-call request of the thread
*
* \param processor_id kernel name of the trapped processor
*/
void _call();
void _call(unsigned const processor_id);
/**
* Read a thread register
@ -268,15 +302,20 @@ class Kernel::Thread
*/
Thread(unsigned const priority, char const * const label);
/**
* Destructor
*/
~Thread();
/**
* Prepare thread to get scheduled the first time
*
* \param cpu_id kernel name of targeted processor
* \param processor kernel object of targeted processor
* \param pd_id kernel name of target protection domain
* \param utcb core local pointer to userland thread-context
* \param start wether to start executing the thread
*/
void init(unsigned const cpu_id, unsigned const pd_id,
void init(Processor * const processor, unsigned const pd_id,
Native_utcb * const utcb, bool const start);
@ -284,8 +323,8 @@ class Kernel::Thread
** Execution_context **
***********************/
void handle_exception();
void proceed();
void handle_exception(unsigned const processor_id);
void proceed(unsigned const processor_id);
/***************

View File

@ -21,6 +21,7 @@
#include <kernel/scheduler.h>
#include <kernel/kernel.h>
#include <kernel/pd.h>
#include <kernel/multiprocessor.h>
#include <kernel/signal_receiver.h>
#include <cpu.h>
@ -48,6 +49,7 @@ class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
Genode::addr_t dfar;
};
Processor * const _processor;
Vm_state * const _state;
Signal_context * const _context;
@ -63,6 +65,7 @@ class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
Signal_context * const context)
:
Execution_context(Priority::MIN),
_processor(multiprocessor()->primary()),
_state((Vm_state * const)state), _context(context)
{ }
@ -71,31 +74,34 @@ class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
** Vm_session **
****************/
void run() { cpu_scheduler()->insert(this); }
void run() { _processor->scheduler()->insert(this); }
void pause() { cpu_scheduler()->remove(this); }
void pause() { _processor->scheduler()->remove(this); }
/***********************
** Execution_context **
***********************/
void handle_exception()
void handle_exception(unsigned const processor_id)
{
switch(_state->cpu_exception) {
case Genode::Cpu_state::INTERRUPT_REQUEST:
case Genode::Cpu_state::FAST_INTERRUPT_REQUEST:
handle_interrupt();
handle_interrupt(_processor, processor_id);
return;
case Genode::Cpu_state::DATA_ABORT:
_state->dfar = Genode::Cpu::Dfar::read();
default:
cpu_scheduler()->remove(this);
_processor->scheduler()->remove(this);
_context->submit(1);
}
}
void proceed() { mtc()->continue_vm(_state); }
void proceed(unsigned const processor_id)
{
mtc()->continue_vm(_state, processor_id);
}
};
#endif /* _KERNEL__VM_H_ */

View File

@ -11,8 +11,8 @@
* under the terms of the GNU General Public License version 2.
*/
#ifndef _EXYNOS5__CPU_H_
#define _EXYNOS5__CPU_H_
#ifndef _ODROID_XU__CPU_H_
#define _ODROID_XU__CPU_H_
/* core includes */
#include <cpu/cortex_a15.h>
@ -22,8 +22,21 @@ namespace Genode
/**
* CPU driver for core
*/
class Cpu : public Cortex_a15::Cpu { };
class Cpu : public Cortex_a15::Cpu
{
public:
/**
* Return kernel name of the executing processor
*/
static unsigned id() { return 0; }
/**
* Return kernel name of the primary processor
*/
static unsigned primary_id() { return primary_id(); }
};
}
#endif /* _EXYNOS5__CPU_H_ */
#endif /* _ODROID_XU__CPU_H_ */

View File

@ -11,6 +11,7 @@ REQUIRES += platform_odroid_xu
INC_DIR += $(REP_DIR)/src/core/odroid_xu
INC_DIR += $(REP_DIR)/src/core/exynos5
INC_DIR += $(REP_DIR)/src/core/arm
INC_DIR += $(REP_DIR)/src/core/arm_v7
# add C++ sources
SRC_CC += platform_services.cc \

View File

@ -11,8 +11,8 @@
* under the terms of the GNU General Public License version 2.
*/
#ifndef _EXYNOS5__TIMER_H_
#define _EXYNOS5__TIMER_H_
#ifndef _ODROID_XU__TIMER_H_
#define _ODROID_XU__TIMER_H_
/* core includes */
#include <board.h>
@ -27,7 +27,15 @@ namespace Kernel
{
public:
enum { IRQ = Genode::Board::MCT_IRQ_L0 };
/**
* Return kernel name of timer interrupt of a specific processor
*
* \param processor_id kernel name of targeted processor
*/
static unsigned interrupt_id(unsigned)
{
return Genode::Board::MCT_IRQ_L0;
}
/**
* Constructor
@ -37,4 +45,4 @@ namespace Kernel
};
}
#endif /* _EXYNOS5__TIMER_H_ */
#endif /* _ODROID_XU__TIMER_H_ */

View File

@ -22,7 +22,20 @@ namespace Genode
/**
* CPU driver for core
*/
class Cpu : public Cortex_a9::Cpu { };
class Cpu : public Cortex_a9::Cpu
{
public:
/**
* Return kernel name of the primary processor
*/
static unsigned primary_id() { return 0; }
/**
* Return kernel name of the executing processor
*/
static unsigned id() { return primary_id(); }
};
}
#endif /* _PANDA__CPU_H_ */

View File

@ -11,6 +11,7 @@ REQUIRES += platform_panda
# add include paths
INC_DIR += $(REP_DIR)/src/core/panda
INC_DIR += $(REP_DIR)/src/core/arm
INC_DIR += $(REP_DIR)/src/core/arm_v7
# add C++ sources
SRC_CC += platform_services.cc \

View File

@ -22,7 +22,20 @@ namespace Genode
/**
* CPU driver for core
*/
class Cpu : public Cortex_a9::Cpu { };
class Cpu : public Cortex_a9::Cpu
{
public:
/**
* Return kernel name of the primary processor
*/
static unsigned primary_id() { return 0; }
/**
* Return kernel name of the executing processor
*/
static unsigned id() { return primary_id(); }
};
}
#endif /* _PBXA9__CPU_H_ */

View File

@ -11,6 +11,7 @@ REQUIRES += platform_pbxa9
# add include paths
INC_DIR += $(REP_DIR)/src/core/pbxa9
INC_DIR += $(REP_DIR)/src/core/arm
INC_DIR += $(REP_DIR)/src/core/arm_v7
# add C++ sources
SRC_CC += platform_services.cc \

View File

@ -26,301 +26,318 @@ namespace Arm_gic
*
* ARM generic interrupt controller, Architecture version 2.0
*/
class Pic
{
public:
class Pic;
}
enum { MAX_INTERRUPT_ID = 1023 };
class Arm_gic::Pic
{
public:
protected:
enum { MAX_INTERRUPT_ID = 1023 };
enum {
MIN_SPI = 32,
SPURIOUS_ID = 1023,
protected:
enum {
MIN_SPI = 32,
SPURIOUS_ID = 1023,
};
/**
* Distributor interface
*/
struct Distr : public Mmio
{
/**
* Constructor
*/
Distr(addr_t const base) : Mmio(base) { }
/**
* Control register
*/
struct Ctlr : Register<0x000, 32>
{
struct Enable : Bitfield<0,1> { };
};
/**
* Distributor interface
* Controller type register
*/
struct Distr : public Mmio
struct Typer : Register<0x004, 32>
{
/**
* Constructor
*/
Distr(addr_t const base) : Mmio(base) { }
/**
* Control register
*/
struct Ctlr : Register<0x000, 32>
{
struct Enable : Bitfield<0,1> { };
};
/**
* Controller type register
*/
struct Typer : Register<0x004, 32>
{
struct It_lines_number : Bitfield<0,5> { };
struct Cpu_number : Bitfield<5,3> { };
};
/**
* Interrupt group register
*/
struct Igroupr :
Register_array<0x80, 32, MAX_INTERRUPT_ID + 1, 1>
{
struct Group_status : Bitfield<0, 1> { };
};
/**
* Interrupt set enable registers
*/
struct Isenabler :
Register_array<0x100, 32, MAX_INTERRUPT_ID + 1, 1, true>
{
struct Set_enable : Bitfield<0, 1> { };
};
/**
* Interrupt clear enable registers
*/
struct Icenabler :
Register_array<0x180, 32, MAX_INTERRUPT_ID + 1, 1, true>
{
struct Clear_enable : Bitfield<0, 1> { };
};
/**
* Interrupt priority level registers
*/
struct Ipriorityr :
Register_array<0x400, 32, MAX_INTERRUPT_ID + 1, 8>
{
enum { GET_MIN = 0xff };
struct Priority : Bitfield<0, 8> { };
};
/**
* Interrupt processor target registers
*/
struct Itargetsr :
Register_array<0x800, 32, MAX_INTERRUPT_ID + 1, 8>
{
enum { ALL = 0xff };
struct Cpu_targets : Bitfield<0, 8> { };
};
/**
* Interrupt configuration registers
*/
struct Icfgr :
Register_array<0xc00, 32, MAX_INTERRUPT_ID + 1, 2>
{
struct Edge_triggered : Bitfield<1, 1> { };
};
/**
* Minimum supported interrupt priority
*/
Ipriorityr::access_t min_priority()
{
write<Ipriorityr::Priority>(Ipriorityr::GET_MIN, 0);
return read<Ipriorityr::Priority>(0);
}
/**
* Maximum supported interrupt priority
*/
Ipriorityr::access_t max_priority() { return 0; }
/**
* ID of the maximum supported interrupt
*/
Typer::access_t max_interrupt()
{
enum { LINE_WIDTH_LOG2 = 5 };
Typer::access_t lnr = read<Typer::It_lines_number>();
return ((lnr + 1) << LINE_WIDTH_LOG2) - 1;
}
} _distr;
struct It_lines_number : Bitfield<0,5> { };
struct Cpu_number : Bitfield<5,3> { };
};
/**
* CPU interface
* Interrupt group register
*/
struct Cpu : public Mmio
struct Igroupr :
Register_array<0x80, 32, MAX_INTERRUPT_ID + 1, 1>
{
/**
* Constructor
*/
Cpu(addr_t const base) : Mmio(base) { }
/**
* Control register
*/
struct Ctlr : Register<0x00, 32>
{
/* Without security extension */
struct Enable : Bitfield<0,1> { };
/* In a secure world */
struct Enable_grp0 : Bitfield<0,1> { };
struct Enable_grp1 : Bitfield<1,1> { };
struct Fiq_en : Bitfield<3,1> { };
};
/**
* Priority mask register
*/
struct Pmr : Register<0x04, 32>
{
struct Priority : Bitfield<0,8> { };
};
/**
* Binary point register
*/
struct Bpr : Register<0x08, 32>
{
enum { NO_PREEMPTION = 7 };
struct Binary_point : Bitfield<0,3> { };
};
/**
* Interrupt acknowledge register
*/
struct Iar : Register<0x0c, 32, true>
{
struct Irq_id : Bitfield<0,10> { };
struct Cpu_id : Bitfield<10,3> { };
};
/**
* End of interrupt register
*/
struct Eoir : Register<0x10, 32, true>
{
struct Irq_id : Bitfield<0,10> { };
struct Cpu_id : Bitfield<10,3> { };
};
} _cpu;
unsigned const _max_interrupt;
unsigned _last_request;
struct Group_status : Bitfield<0, 1> { };
};
/**
* Wether the security extension is used or not
* Interrupt set enable registers
*/
inline static bool _use_security_ext();
public:
struct Isenabler :
Register_array<0x100, 32, MAX_INTERRUPT_ID + 1, 1, true>
{
struct Set_enable : Bitfield<0, 1> { };
};
/**
* Constructor, all interrupts get masked
* Interrupt clear enable registers
*/
Pic(addr_t const distr_base, addr_t const cpu_base) :
_distr(distr_base), _cpu(cpu_base),
_max_interrupt(_distr.max_interrupt()),
_last_request(SPURIOUS_ID)
struct Icenabler :
Register_array<0x180, 32, MAX_INTERRUPT_ID + 1, 1, true>
{
/* with security extension any board has its own init */
if (_use_security_ext()) return;
struct Clear_enable : Bitfield<0, 1> { };
};
/* disable device */
_distr.write<Distr::Ctlr::Enable>(0);
/**
* Interrupt priority level registers
*/
struct Ipriorityr :
Register_array<0x400, 32, MAX_INTERRUPT_ID + 1, 8>
{
enum { GET_MIN = 0xff };
/* supported priority range */
unsigned const min_prio = _distr.min_priority();
unsigned const max_prio = _distr.max_priority();
struct Priority : Bitfield<0, 8> { };
};
/* configure every shared peripheral interrupt */
for (unsigned i=MIN_SPI; i <= _max_interrupt; i++)
{
_distr.write<Distr::Icfgr::Edge_triggered>(0, i);
_distr.write<Distr::Ipriorityr::Priority>(max_prio, i);
_distr.write<Distr::Itargetsr::Cpu_targets>(
Distr::Itargetsr::ALL, i);
}
/**
* Interrupt processor target registers
*/
struct Itargetsr :
Register_array<0x800, 32, MAX_INTERRUPT_ID + 1, 8>
{
enum { ALL = 0xff };
/* disable the priority filter */
_cpu.write<Cpu::Pmr::Priority>(min_prio);
struct Cpu_targets : Bitfield<0, 8> { };
};
/* disable preemption of interrupt handling by interrupts */
_cpu.write<Cpu::Bpr::Binary_point>(Cpu::Bpr::NO_PREEMPTION);
/**
* Interrupt configuration registers
*/
struct Icfgr :
Register_array<0xc00, 32, MAX_INTERRUPT_ID + 1, 2>
{
struct Edge_triggered : Bitfield<1, 1> { };
};
/* enable device */
_distr.write<Distr::Ctlr::Enable>(1);
_cpu.write<Cpu::Ctlr::Enable>(1);
/**
* Minimum supported interrupt priority
*/
Ipriorityr::access_t min_priority()
{
write<Ipriorityr::Priority>(Ipriorityr::GET_MIN, 0);
return read<Ipriorityr::Priority>(0);
}
/**
* Get the ID of the last interrupt request
*
* \return True if the request with ID 'i' is treated as accepted
* by the CPU and awaits an subsequently 'finish_request'
* call. Otherwise this returns false and the value of 'i'
* remains useless.
* Maximum supported interrupt priority
*/
bool take_request(unsigned & i)
Ipriorityr::access_t max_priority() { return 0; }
/**
* ID of the maximum supported interrupt
*/
Typer::access_t max_interrupt()
{
_last_request = _cpu.read<Cpu::Iar::Irq_id>();
i = _last_request;
return valid(i);
enum { LINE_WIDTH_LOG2 = 5 };
Typer::access_t lnr = read<Typer::It_lines_number>();
return ((lnr + 1) << LINE_WIDTH_LOG2) - 1;
}
} _distr;
/**
* CPU interface
*/
struct Cpu : public Mmio
{
/**
* Complete the last request that was taken via 'take_request'
* Constructor
*/
void finish_request()
Cpu(addr_t const base) : Mmio(base) { }
/**
* Control register
*/
struct Ctlr : Register<0x00, 32>
{
if (!valid(_last_request)) return;
_cpu.write<Cpu::Eoir>(Cpu::Eoir::Irq_id::bits(_last_request) |
Cpu::Eoir::Cpu_id::bits(0) );
_last_request = SPURIOUS_ID;
}
/* Without security extension */
struct Enable : Bitfield<0,1> { };
/* In a secure world */
struct Enable_grp0 : Bitfield<0,1> { };
struct Enable_grp1 : Bitfield<1,1> { };
struct Fiq_en : Bitfield<3,1> { };
};
/**
* Check if 'i' is a valid interrupt request ID at the device
* Priority mask register
*/
bool valid(unsigned const i) const { return i <= _max_interrupt; }
/**
* Unmask all interrupts
*/
void unmask()
struct Pmr : Register<0x04, 32>
{
for (unsigned i=0; i <= _max_interrupt; i++)
_distr.write<Distr::Isenabler::Set_enable>(1, i);
}
struct Priority : Bitfield<0,8> { };
};
/**
* Unmask interrupt 'i'
* Binary point register
*/
void unmask(unsigned const i) {
_distr.write<Distr::Isenabler::Set_enable>(1, i); }
/**
* Mask all interrupts
*/
void mask()
struct Bpr : Register<0x08, 32>
{
for (unsigned i=0; i <= _max_interrupt; i++)
_distr.write<Distr::Icenabler::Clear_enable>(1, i);
}
enum { NO_PREEMPTION = 7 };
struct Binary_point : Bitfield<0,3> { };
};
/**
* Mask interrupt 'i'
* Interrupt acknowledge register
*/
void mask(unsigned const i) {
_distr.write<Distr::Icenabler::Clear_enable>(1, i); }
};
}
struct Iar : Register<0x0c, 32, true>
{
struct Irq_id : Bitfield<0,10> { };
struct Cpu_id : Bitfield<10,3> { };
};
/**
* End of interrupt register
*/
struct Eoir : Register<0x10, 32, true>
{
struct Irq_id : Bitfield<0,10> { };
struct Cpu_id : Bitfield<10,3> { };
};
} _cpu;
unsigned const _max_interrupt;
unsigned _last_request;
/**
* Wether the security extension is used or not
*/
inline static bool _use_security_ext();
public:
/**
* Constructor
*/
Pic(addr_t const distr_base, addr_t const cpu_base)
:
_distr(distr_base), _cpu(cpu_base),
_max_interrupt(_distr.max_interrupt()),
_last_request(SPURIOUS_ID)
{
/* with security extension any board has its own init */
if (_use_security_ext()) return;
/* disable device */
_distr.write<Distr::Ctlr::Enable>(0);
/* configure every shared peripheral interrupt */
for (unsigned i=MIN_SPI; i <= _max_interrupt; i++)
{
_distr.write<Distr::Icfgr::Edge_triggered>(0, i);
_distr.write<Distr::Ipriorityr::Priority>(_distr.max_priority(), i);
}
/* enable device */
_distr.write<Distr::Ctlr::Enable>(1);
}
/**
* Initialize processor local interface of the controller
*/
void init_processor_local()
{
/* disable the priority filter */
_cpu.write<Cpu::Pmr::Priority>(_distr.min_priority());
/* disable preemption of interrupt handling by interrupts */
_cpu.write<Cpu::Bpr::Binary_point>(Cpu::Bpr::NO_PREEMPTION);
/* enable device */
_cpu.write<Cpu::Ctlr::Enable>(1);
}
/**
* Get the ID of the last interrupt request
*
* \return True if the request with ID 'i' is treated as accepted
* by the CPU and awaits an subsequently 'finish_request'
* call. Otherwise this returns false and the value of 'i'
* remains useless.
*/
bool take_request(unsigned & i)
{
_last_request = _cpu.read<Cpu::Iar::Irq_id>();
i = _last_request;
return valid(i);
}
/**
* Complete the last request that was taken via 'take_request'
*/
void finish_request()
{
if (!valid(_last_request)) return;
_cpu.write<Cpu::Eoir>(Cpu::Eoir::Irq_id::bits(_last_request) |
Cpu::Eoir::Cpu_id::bits(0) );
_last_request = SPURIOUS_ID;
}
/**
* Check if 'i' is a valid interrupt request ID at the device
*/
bool valid(unsigned const i) const { return i <= _max_interrupt; }
/**
* Unmask all interrupts
*/
void unmask()
{
for (unsigned i=0; i <= _max_interrupt; i++) {
_distr.write<Distr::Isenabler::Set_enable>(1, i);
}
}
/**
* Unmask interrupt and assign it to a specific processor
*
* \param interrupt_id kernel name of targeted interrupt
* \param processor_id kernel name of targeted processor
*/
void unmask(unsigned const interrupt_id, unsigned const processor_id)
{
unsigned const targets = 1 << processor_id;
_distr.write<Distr::Itargetsr::Cpu_targets>(targets, interrupt_id);
_distr.write<Distr::Isenabler::Set_enable>(1, interrupt_id);
}
/**
* Mask all interrupts
*/
void mask()
{
for (unsigned i=0; i <= _max_interrupt; i++) {
_distr.write<Distr::Icenabler::Clear_enable>(1, i);
}
}
/**
* Mask specific interrupt
*
* \param interrupt_id kernel name of targeted interrupt
*/
void mask(unsigned const interrupt_id)
{
_distr.write<Distr::Icenabler::Clear_enable>(1, interrupt_id);
}
};
#endif /* _PIC__ARM_GIC_H_ */

View File

@ -142,6 +142,11 @@ namespace Imx31
write<Nipriority>(Nipriority::ALL_LOWEST, i);
}
/**
* Initialize processor local interface of the controller
*/
void init_processor_local() { }
/**
* Receive a pending request number 'i'
*/
@ -182,10 +187,16 @@ namespace Imx31
}
/**
* Unmask interrupt 'i'
* Unmask interrupt
*
* \param interrupt_id kernel name of targeted interrupt
*/
void unmask(unsigned const i) {
if (i <= MAX_INTERRUPT_ID) write<Intennum>(i); }
void unmask(unsigned const interrupt_id, unsigned)
{
if (interrupt_id <= MAX_INTERRUPT_ID) {
write<Intennum>(interrupt_id);
}
}
/**
* Mask interrupt 'i'

View File

@ -1,24 +0,0 @@
/*
* \brief Board driver for core
* \author Norman Feske
* \date 2013-04-05
*/
/*
* Copyright (C) 2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _RPI__BOARD_H_
#define _RPI__BOARD_H_
/* Genode includes */
#include <drivers/board_base.h>
namespace Genode { struct Board; }
struct Genode::Board : Genode::Board_base { static void prepare_kernel() { } };
#endif /* _RPI__BOARD_H_ */

View File

@ -58,6 +58,8 @@ namespace Kernel
Pic() : Genode::Mmio(Genode::Board::IRQ_CONTROLLER_BASE) { mask(); }
void init_processor_local() { }
bool take_request(unsigned &irq)
{
/* read basic IRQ status mask */
@ -96,7 +98,7 @@ namespace Kernel
write<Irq_disable_gpu_2>(~0);
}
void unmask(unsigned const i)
void unmask(unsigned const i, unsigned)
{
if (i < 8)
write<Irq_enable_basic>(1 << i);

View File

@ -10,6 +10,7 @@ REQUIRES = platform_rpi
# add include paths
INC_DIR += $(REP_DIR)/src/core/rpi
INC_DIR += $(REP_DIR)/src/core/arm
INC_DIR += $(REP_DIR)/src/core/arm_v6
# add C++ sources
SRC_CC += platform_services.cc \

View File

@ -47,9 +47,12 @@ class Kernel::Timer : public Genode::Mmio
Timer() : Mmio(Board_base::SYSTEM_TIMER_MMIO_BASE) { }
enum { IRQ = Board_base::SYSTEM_TIMER_IRQ };
static unsigned interrupt_id(unsigned)
{
return Board_base::SYSTEM_TIMER_IRQ;
}
inline void start_one_shot(uint32_t const tics)
inline void start_one_shot(uint32_t const tics, unsigned)
{
write<Clo>(0);
write<Cmp>(read<Clo>() + tics);
@ -61,7 +64,7 @@ class Kernel::Timer : public Genode::Mmio
return (Board_base::SYSTEM_TIMER_CLOCK / 1000) * ms;
}
void clear_interrupt()
void clear_interrupt(unsigned)
{
write<Cs::Status>(1);
read<Cs>();

View File

@ -52,6 +52,7 @@ SRC_CC += console.cc \
kernel/vm.cc \
kernel/signal_receiver.cc \
kernel/irq.cc \
kernel/multiprocessor.cc \
rm_session_support.cc \
trustzone.cc \
pager.cc \

View File

@ -51,9 +51,9 @@ namespace Cortex_a9
struct Event : Bitfield<0,1> { }; /* if counter hit zero */
};
public:
void _clear_interrupt() { write<Interrupt_status::Event>(1); }
enum { IRQ = Cortex_a9::Cpu::PRIVATE_TIMER_IRQ };
public:
/**
* Constructor, clears the interrupt output
@ -61,16 +61,26 @@ namespace Cortex_a9
Timer() : Mmio(Cortex_a9::Cpu::PRIVATE_TIMER_MMIO_BASE)
{
write<Control::Timer_enable>(0);
clear_interrupt();
_clear_interrupt();
}
/**
* Start one-shot run with an IRQ delay of 'tics'
* Return kernel name of timer interrupt
*/
inline void start_one_shot(uint32_t const tics)
static unsigned interrupt_id(unsigned)
{
return Cortex_a9::Cpu::PRIVATE_TIMER_IRQ;
}
/**
* Start single timeout run
*
* \param tics delay of timer interrupt
*/
inline void start_one_shot(unsigned const tics, unsigned)
{
/* reset timer */
clear_interrupt();
_clear_interrupt();
Control::access_t control = 0;
Control::Irq_enable::set(control, 1);
write<Control>(control);
@ -91,10 +101,7 @@ namespace Cortex_a9
/**
* Clear interrupt output line
*/
void clear_interrupt()
{
write<Interrupt_status::Event>(1);
}
void clear_interrupt(unsigned) { _clear_interrupt(); }
};
}

View File

@ -26,81 +26,149 @@ namespace Exynos_mct
*/
class Timer : public Mmio
{
enum {
PRESCALER = 1,
DIV_MUX = 0,
};
private:
/**
* MCT configuration
*/
struct Mct_cfg : Register<0x0, 32>
{
struct Prescaler : Bitfield<0, 8> { };
struct Div_mux : Bitfield<8, 3> { };
};
enum {
PRESCALER = 1,
DIV_MUX = 0,
};
/**
* Local timer 0 free running counter buffer
*/
struct L0_frcntb : Register<0x310, 32> { };
/**
* MCT configuration
*/
struct Mct_cfg : Register<0x0, 32>
{
struct Prescaler : Bitfield<0, 8> { };
struct Div_mux : Bitfield<8, 3> { };
};
/**
* Local timer 0 configuration
*/
struct L0_tcon : Register<0x320, 32>
{
struct Frc_start : Bitfield<3, 1> { };
};
/**
* Local timer 0 expired status
*/
struct L0_int_cstat : Register<0x330, 32, true>
{
struct Frcnt : Bitfield<1, 1> { };
};
/*******************
** Local timer 0 **
*******************/
/**
* Local timer 0 interrupt enable
*/
struct L0_int_enb : Register<0x334, 32>
{
struct Frceie : Bitfield<1, 1> { };
};
/**
* Free running counter buffer
*/
struct L0_frcntb : Register<0x310, 32> { };
/**
* Local timer 0 write status
*/
struct L0_wstat : Register<0x340, 32, true>
{
struct Frcntb : Bitfield<2, 1> { };
struct Tcon : Bitfield<3, 1> { };
};
/**
* Configuration
*/
struct L0_tcon : Register<0x320, 32>
{
struct Frc_start : Bitfield<3, 1> { };
};
/**
* Write to reg that replies via ack bit and clear ack bit
*/
template <typename DEST, typename ACK>
void _acked_write(typename DEST::Register_base::access_t const v)
{
typedef typename DEST::Register_base Dest;
typedef typename ACK::Bitfield_base Ack;
write<Dest>(v);
while (!read<Ack>());
write<Ack>(1);
}
/**
* Expired status
*/
struct L0_int_cstat : Register<0x330, 32, true>
{
struct Frcnt : Bitfield<1, 1> { };
};
unsigned long const _tics_per_ms;
/**
* Interrupt enable
*/
struct L0_int_enb : Register<0x334, 32>
{
struct Frceie : Bitfield<1, 1> { };
};
/**
* Start and stop counting
*/
void _run(bool const run)
{
_acked_write<L0_tcon, L0_wstat::Tcon>
(L0_tcon::Frc_start::bits(run));
}
/**
* Write status
*/
struct L0_wstat : Register<0x340, 32, true>
{
struct Frcntb : Bitfield<2, 1> { };
struct Tcon : Bitfield<3, 1> { };
};
struct L0_frcnto : Register<0x314, 32> { };
/**
* Start and stop counting
*/
void _run_0(bool const run)
{
_acked_write<L0_tcon, L0_wstat::Tcon>
(L0_tcon::Frc_start::bits(run));
}
/*******************
** Local timer 1 **
*******************/
/**
* Free running counter buffer
*/
struct L1_frcntb : Register<0x410, 32> { };
/**
* Configuration
*/
struct L1_tcon : Register<0x420, 32>
{
struct Frc_start : Bitfield<3, 1> { };
};
/**
* Expired status
*/
struct L1_int_cstat : Register<0x430, 32, true>
{
struct Frcnt : Bitfield<1, 1> { };
};
/**
* Interrupt enable
*/
struct L1_int_enb : Register<0x434, 32>
{
struct Frceie : Bitfield<1, 1> { };
};
/**
* Write status
*/
struct L1_wstat : Register<0x440, 32, true>
{
struct Frcntb : Bitfield<2, 1> { };
struct Tcon : Bitfield<3, 1> { };
};
struct L1_frcnto : Register<0x414, 32> { };
/**
* Start and stop counting
*/
void _run_1(bool const run)
{
_acked_write<L1_tcon, L1_wstat::Tcon>
(L1_tcon::Frc_start::bits(run));
}
/********************
** Helper methods **
********************/
/**
* Write to reg that replies via ack bit and clear ack bit
*/
template <typename DEST, typename ACK>
void _acked_write(typename DEST::Register_base::access_t const v)
{
typedef typename DEST::Register_base Dest;
typedef typename ACK::Bitfield_base Ack;
write<Dest>(v);
while (!read<Ack>());
write<Ack>(1);
}
unsigned long const _tics_per_ms;
public:
@ -115,16 +183,33 @@ namespace Exynos_mct
Mct_cfg::Div_mux::set(mct_cfg, DIV_MUX);
write<Mct_cfg>(mct_cfg);
write<L0_int_enb>(L0_int_enb::Frceie::bits(1));
write<L1_int_enb>(L1_int_enb::Frceie::bits(1));
}
/**
* Start one-shot run with an IRQ delay of 'tics'
* Start single timeout run
*
* \param tics delay of timer interrupt
* \param processor_id kernel name of processor of targeted timer
*/
inline void start_one_shot(unsigned const tics)
inline void start_one_shot(unsigned const tics,
unsigned const processor_id)
{
_run(0);
_acked_write<L0_frcntb, L0_wstat::Frcntb>(tics);
_run(1);
switch (processor_id) {
case 0:
_run_0(0);
_acked_write<L0_frcntb, L0_wstat::Frcntb>(tics);
_run_0(1);
return;
case 1:
_run_1(0);
_acked_write<L1_frcntb, L1_wstat::Frcntb>(tics);
_run_1(1);
return;
default:
while (1) { }
return;
}
}
/**
@ -138,7 +223,45 @@ namespace Exynos_mct
/**
* Clear interrupt output line
*/
void clear_interrupt() { write<L0_int_cstat::Frcnt>(1); }
void clear_interrupt(unsigned const processor_id)
{
switch (processor_id) {
case 0:
write<L0_int_cstat::Frcnt>(1);
return;
case 1:
write<L1_int_cstat::Frcnt>(1);
return;
default:
return;
}
}
unsigned value(unsigned const processor_id)
{
switch (processor_id) {
case 0:
return read<L0_frcnto>();
case 1:
return read<L1_frcnto>();
default:
while (1) { }
return 0;
}
}
unsigned irq_state(unsigned const processor_id)
{
switch (processor_id) {
case 0:
return read<L0_int_cstat::Frcnt>();
case 1:
return read<L1_int_cstat::Frcnt>();
default:
while (1) { }
return 0;
}
}
};
}

View File

@ -67,10 +67,6 @@ namespace Arm
/**
* Second level translation table
*
* A table is dedicated to either secure or non-secure mode. All
* translations done by this table apply to domain 0. They are not
* shareable and have zero-filled memory region attributes.
*/
class Page_table
{
@ -371,12 +367,6 @@ namespace Arm
/**
* First level translation table
*
* A table is dedicated to either secure or non-secure mode. All
* translations done by this table apply to domain 0. They are not
* shareable and have zero-filled memory region attributes. The size
* of this table is fixed to such a value that this table translates
* a space wich is addressable by 32 bit.
*/
class Section_table
{

View File

@ -13,4 +13,4 @@
#include <trustzone.h>
void Kernel::trustzone_initialization(Pic *pic) { }
void Kernel::init_trustzone(Pic * pic) { }

View File

@ -22,7 +22,20 @@ namespace Genode
/**
* CPU driver for core
*/
class Cpu : public Cortex_a9::Cpu { };
class Cpu : public Cortex_a9::Cpu
{
public:
/**
* Return kernel name of the primary processor
*/
static unsigned primary_id() { return 0; }
/**
* Return kernel name of the executing processor
*/
static unsigned id() { return primary_id(); }
};
}
#endif /* _VEA9X4__CPU_H_ */

View File

@ -8,6 +8,7 @@
# add include paths
INC_DIR += $(REP_DIR)/src/core/vea9x4
INC_DIR += $(REP_DIR)/src/core/arm
INC_DIR += $(REP_DIR)/src/core/arm_v7
# add C++ sources
SRC_CC += platform_services.cc \

View File

@ -20,8 +20,13 @@
extern int _mon_kernel_entry;
void Kernel::trustzone_initialization(Pic *pic)
void Kernel::init_trustzone(Pic * pic)
{
/* check for compatibility */
if (PROCESSORS > 1) {
PERR("trustzone not supported with multiprocessing");
return;
}
/* set exception vector entry */
Genode::Cpu::mon_exception_entry_at((Genode::addr_t)&_mon_kernel_entry);

View File

@ -114,23 +114,10 @@ namespace Genode
/* disable timer */
write<Cr::En>(0);
clear_interrupt();
clear_interrupt(0);
}
public:
/**
* Constructor
*/
Epit_base(addr_t base) : Mmio(base) { _reset(); }
/**
* Start a one-shot run
*
* \param tics native timer value used to assess the delay
* of the timer interrupt as of the call
*/
void start_one_shot(unsigned const tics)
void _start_one_shot(unsigned const tics)
{
/* stop timer */
_reset();
@ -144,6 +131,23 @@ namespace Genode
write<Cr::En>(1);
}
public:
/**
* Constructor
*/
Epit_base(addr_t base) : Mmio(base) { _reset(); }
/**
* Start single timeout run
*
* \param tics delay of timer interrupt
*/
void start_one_shot(unsigned const tics, unsigned)
{
_start_one_shot(tics);
}
/**
* Stop the timer from a one-shot run
*
@ -161,7 +165,7 @@ namespace Genode
/**
* Clear interrupt output line
*/
void clear_interrupt() { write<Sr::Ocif>(1); }
void clear_interrupt(unsigned) { write<Sr::Ocif>(1); }
/**
* Translate milliseconds to a native timer value

View File

@ -55,6 +55,7 @@ namespace Genode
MCT_MMIO_SIZE = 0x1000,
MCT_CLOCK = 24000000,
MCT_IRQ_L0 = 152,
MCT_IRQ_L1 = 153,
/* USB */
USB_HOST20_IRQ = 103,
@ -74,6 +75,12 @@ namespace Genode
/* wether board provides security extension */
SECURITY_EXTENSION = 1,
/* IRAM */
IRAM_BASE = 0x02020000,
/* hardware name of the primary processor */
PRIMARY_MPIDR_AFF_0 = 0,
};
};
}

View File

@ -53,8 +53,7 @@ namespace Genode
/**
* Count down 'value', raise IRQ output, wrap counter and continue
*/
void run_and_wrap(unsigned long value) {
start_one_shot(value); }
void run_and_wrap(unsigned long value) { _start_one_shot(value); }
/**
* Maximum timeout value