Simple drivers for the Cortex A9 components

This commit is contained in:
Martin Stein 2012-05-24 12:56:30 +02:00 committed by Christian Helmuth
parent 056f980d4e
commit dce09679bc
3 changed files with 1564 additions and 0 deletions

View File

@ -0,0 +1,607 @@
/*
* \brief Simple Driver for the ARM Cortex A9
* \author Martin stein
* \date 2011-11-03
*/
/*
* Copyright (C) 2011-2012 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _BASE__INCLUDE__DRIVERS__CPU__CORTEX_A9__CORE_H_
#define _BASE__INCLUDE__DRIVERS__CPU__CORTEX_A9__CORE_H_
/* Genode includes */
#include <util/register.h>
#include <util/mmio.h>
#include <drivers/board.h>
#include <drivers/cpu/cortex_a9/timer.h>
namespace Genode
{
class Section_table;
/**
* Cortex A9 driver
*/
struct Cortex_a9
{
enum
{
/* Common */
DATA_ACCESS_ALIGNM = 4,
CLK = Board::CORTEX_A9_CLOCK, /* CPU interface clock */
PERIPH_CLK = CLK, /* Clock for CPU internal components */
MIN_PAGE_SIZE_LOG2 = 12,
MAX_PAGE_SIZE_LOG2 = 20,
HIGHEST_EXCEPTION_ENTRY = 0xffff0000,
/* Interrupt controller */
PL390_DISTRIBUTOR_MMIO_BASE = Board::CORTEX_A9_PRIVATE_MEM_BASE + 0x1000,
PL390_DISTRIBUTOR_MMIO_SIZE = 0x1000,
PL390_CPU_MMIO_BASE = Board::CORTEX_A9_PRIVATE_MEM_BASE + 0x100,
PL390_CPU_MMIO_SIZE = 0x100,
/* Timer */
PRIVATE_TIMER_MMIO_BASE = Board::CORTEX_A9_PRIVATE_MEM_BASE + 0x600,
PRIVATE_TIMER_MMIO_SIZE = 0x10,
PRIVATE_TIMER_IRQ = 29,
TIMER_MMIO = PRIVATE_TIMER_MMIO_BASE,
TIMER_IRQ = PRIVATE_TIMER_IRQ,
};
/* Exceotion type IDs */
enum Exception_type
{
RESET = 1,
UNDEFINED_INSTRUCTION = 2,
SUPERVISOR_CALL = 3,
PREFETCH_ABORT = 4,
DATA_ABORT = 5,
INTERRUPT_REQUEST = 6,
FAST_INTERRUPT_REQUEST = 7,
};
typedef Cortex_a9_timer<PERIPH_CLK> Timer;
/**
* Common parts of fault status registers
*/
struct Fsr : Register<32>
{
/* Fault status encoding */
enum Fault_status {
SECTION_TRANSLATION_FAULT = 5,
PAGE_TRANSLATION_FAULT = 7,
};
struct Fs_3_0 : Bitfield<0, 4> { }; /* Fault status bits [3:0] */
struct Fs_4 : Bitfield<10, 1> { }; /* Fault status bits [4] */
};
/**
* Instruction fault status register
*/
struct Ifsr : Fsr
{
/**
* Read register
*/
static access_t read() {
access_t v;
asm volatile ("mrc p15, 0, %[v], c5, c0, 1\n" : [v]"=r"(v) :: );
return v;
}
/**
* Read fault status
*/
static Fault_status fault_status() {
access_t const v = read();
return (Fault_status)(Fs_3_0::get(v) |
(Fs_4::get(v) << Fs_3_0::WIDTH));
}
};
/**
* Instruction fault address register
*/
struct Ifar : Register<32>
{
/**
* Read register
*/
static access_t read() {
access_t v;
asm volatile ("mrc p15, 0, %[v], c6, c0, 2\n" : [v]"=r"(v) :: );
return v;
}
};
/**
* Data fault status register
*/
struct Dfsr : Fsr
{
struct Wnr : Bitfield<11, 1> { }; /* Write not read bit */
/**
* Read register
*/
static access_t read() {
access_t v;
asm volatile ("mrc p15, 0, %[v], c5, c0, 0\n" : [v]"=r"(v) :: );
return v;
}
/**
* Read fault status
*/
static Fault_status fault_status() {
access_t const v = read();
return (Fault_status)(Fs_3_0::get(v) | (4<<Fs_4::get(v)));
}
};
/**
* Data fault address register
*/
struct Dfar : Register<32>
{
/**
* Read register
*/
static access_t read() {
access_t v;
asm volatile ("mrc p15, 0, %[v], c6, c0, 0\n" : [v]"=r"(v) :: );
return v;
}
};
/**
* Process identification register
*/
struct Contextidr : Register<32>
{
struct Asid : Bitfield<0,8> /* ID part used by MMU */
{
enum { MAX = MASK };
};
struct Procid : Bitfield<8,24> { }; /* ID part used by debug/trace */
/**
* Write whole register
*/
static void write(access_t const v)
{
asm volatile ("mcr p15, 0, %[v], c13, c0, 1\n" :: [v]"r"(v) : );
}
};
/**
* A system control register
*/
struct Sctlr : Register<32>
{
struct M : Bitfield<0,1> { }; /* MMU enable bit */
struct C : Bitfield<2,1> { }; /* Cache enable bit */
struct I : Bitfield<12,1> { }; /* Instruction cache enable bit */
struct V : Bitfield<13,1> { }; /* Exception vectors bit */
/**
* Read whole register
*/
static access_t read()
{
access_t v;
asm volatile ("mrc p15, 0, %[v], c1, c0, 0\n" : [v]"=r"(v) :: );
return v;
};
/**
* Write whole register
*/
static void write(access_t const v)
{
asm volatile ("mcr p15, 0, %[v], c1, c0, 0\n" :: [v]"r"(v) : );
}
};
/**
* The translation table base control register
*/
struct Ttbcr : Register<32>
{
/********************
* Always available *
********************/
struct N : Bitfield<0,3> /* Base address width */
{ };
/******************************************
* Only available with security extension *
******************************************/
struct Pd0 : Bitfield<4,1> { }; /* Translation table walk disable bit for TTBR0 */
struct Pd1 : Bitfield<5,1> { }; /* Translation table walk disable bit for TTBR1 */
/**
* Read whole register, only in privileged CPU mode
*/
static access_t read();
/**
* Write whole register, only in privileged CPU mode
*/
static void write(access_t const v)
{
asm volatile ("mcr p15, 0, %[v], c2, c0, 2" :: [v]"r"(v) : );
}
};
/**
* The domain access control register
*/
struct Dacr : Register<32>
{
enum Dx_values { NO_ACCESS = 0, CLIENT = 1, MANAGER = 3 };
/**
* Access values for the 16 available domains
*/
struct D0 : Bitfield<0,2> { };
struct D1 : Bitfield<2,2> { };
struct D2 : Bitfield<4,2> { };
struct D3 : Bitfield<6,2> { };
struct D4 : Bitfield<8,2> { };
struct D5 : Bitfield<10,2> { };
struct D6 : Bitfield<12,2> { };
struct D7 : Bitfield<14,2> { };
struct D8 : Bitfield<16,2> { };
struct D9 : Bitfield<18,2> { };
struct D10 : Bitfield<20,2> { };
struct D11 : Bitfield<22,2> { };
struct D12 : Bitfield<24,2> { };
struct D13 : Bitfield<26,2> { };
struct D14 : Bitfield<28,2> { };
struct D15 : Bitfield<30,2> { };
/**
* Write whole register, only in privileged CPU mode
*/
static void write(access_t const v)
{
asm volatile ("mcr p15, 0, %[v], c3, c0, 0" :: [v]"r"(v) : );
}
};
/**
* Translation table base register 0
*
* \detail Typically for process specific spaces, references first level
* table with a size between 128B and 16KB according to TTBCR.N,
*/
struct Ttbr0 : Register<32>
{
/********************
* Always available *
********************/
struct S : Bitfield<1,1> { }; /* Shareable bit */
struct Rgn : Bitfield<3,2> /* Region bits */
{
enum { OUTER_NON_CACHEABLE = 0b00,
OUTER_WBACK_WALLOCATE_CACHEABLE = 0b01,
OUTER_WTHROUGH_CACHEABLE = 0b10,
OUTER_WBACK_NO_WALLCOATE_CACHEABLE = 0b11,
};
};
struct Nos : Bitfield<5,1> { }; /* Not outer shareable bit */
struct Base_address : Bitfield<14,18> { }; /* Translation table base address (Driver supports only 16KB alignment) */
/*********************************************
* Only available without security extension *
*********************************************/
struct C : Bitfield<0,1> { }; /* Cacheable bit */
/******************************************
* Only available with security extension *
******************************************/
struct Irgn_1 : Bitfield<0,1> /* Inner region bit 0 */
{
enum { INNER_NON_CACHEABLE = 0b0,
INNER_WBACK_WALLOCATE_CACHEABLE = 0b0,
INNER_WTHROUGH_CACHEABLE = 0b1,
INNER_WBACK_NO_WALLCOATE_CACHEABLE = 0b1,
};
};
struct Irgn_0 : Bitfield<6,1> /* Inner region bit 1 */
{
enum { INNER_NON_CACHEABLE = 0b0,
INNER_WBACK_WALLOCATE_CACHEABLE = 0b1,
INNER_WTHROUGH_CACHEABLE = 0b0,
INNER_WBACK_NO_WALLCOATE_CACHEABLE = 0b1,
};
};
/**
* Read whole register, only in privileged CPU mode
*/
static access_t read();
/**
* Write whole register, only in privileged CPU mode
*/
static void write(access_t const v)
{
asm volatile ("mcr p15, 0, %[v], c2, c0, 0" :: [v]"r"(v) : );
}
};
/**
* A current program status register
*/
struct Cpsr : Register<32>
{
struct M : Bitfield<0,5> /* Processor mode */
{
enum { /* <Privileged>, <Description> */
USER = 0b10000, /* 0, Application code */
FIQ = 0b10001, /* 1, Entered at fast interrupt */
IRQ = 0b10010, /* 1, Entered at normal interrupt */
SUPERVISOR = 0b10011, /* 1, Most kernel code */
MONITOR = 0b10110, /* 1, A secure mode, switch sec./non-sec. */
ABORT = 0b10111, /* 1, Entered at aborts */
UNDEFINED = 0b11011, /* 1, Entered at instruction-related error */
SYSTEM = 0b11111, /* 1, Applications that require privileged */
};
};
struct F : Bitfield<6,1> { }; /* Fast interrupt request disable */
struct I : Bitfield<7,1> { }; /* Interrupt request disable */
struct A : Bitfield<8,1> { }; /* Asynchronous abort disable */
/**
* Read whole register
*/
static access_t read()
{
access_t v;
asm volatile ("mrs %[v], cpsr\n" : [v] "=r" (v) : : );
return v;
}
/**
* Write whole register
*/
static void write(access_t & v)
{
asm volatile ("msr cpsr, %[v]\n" : : [v] "r" (v) : );
}
};
/**
* Secure configuration register
*/
struct Scr : Register<32>
{
struct Ns : Bitfield<0, 1> { }; /* Non secure bit */
/**
* Read whole register
*/
static access_t read()
{
access_t v;
asm volatile ("mrc p15, 0, %[v], c1, c1, 0" : [v]"=r"(v) ::);
return v;
}
};
/**
* An execution state
*/
struct Context
{
/* General purpose registers, offset 0*4 .. 15*4 */
uint32_t
r0, r1, r2, r3, r4, r5, r6, r7,
r8, r9, r10, r11, r12, sp, lr, pc;
/* Special registers, offset 16*4 .. 17*4 */
uint32_t psr, contextidr;
/* Additional state info, offset 18*4 .. 19*4 */
uint32_t exception_type, section_table;
/***************
** Accessors **
***************/
void software_tlb(Section_table * const st)
{ section_table = (addr_t)st; }
Section_table * software_tlb() { return (Section_table *)section_table; }
void instruction_ptr(addr_t const p) { pc = p; }
addr_t instruction_ptr() { return pc; }
void return_ptr(addr_t const p) { lr = p; }
void stack_ptr(addr_t const p) { sp = p; }
void pd_id(unsigned long const id) { contextidr = id; }
};
/**
* Enable interrupt requests
*/
static void enable_irqs()
{
Cpsr::access_t cpsr = Cpsr::read();
Cpsr::I::clear(cpsr);
Cpsr::write(cpsr);
}
/**
* Set CPU exception entry to a given address
*
* \return 0 Exception entry set to the given address
* <0 Otherwise
*/
static int exception_entry_at(addr_t a)
{
Sctlr::access_t sctlr = Sctlr::read();
switch (a) {
case 0x0:
Sctlr::V::clear(sctlr);
break;
case 0xffff0000:
Sctlr::V::set(sctlr);
break;
default:
return -1;
}
Sctlr::write(sctlr);
return 0;
}
/**
* Are we in secure mode?
*/
static bool secure_mode_active()
{
if (!Board::CORTEX_A9_SECURITY_EXTENSION) return 0;
if (Cpsr::M::get(Cpsr::read()) != Cpsr::M::MONITOR)
{
return !Scr::Ns::get(Scr::read());
}
return 1;
}
/**
* Enable the MMU
*
* \param section_table Section translation table of the initial
* address space this function switches to
* \param process_id Process ID of the initial address space
*/
static void enable_mmu (Section_table * const section_table,
unsigned long const process_id)
{
/* Initialize domains */
Dacr::write (Dacr::D0::bits (Dacr::CLIENT)
| Dacr::D1::bits (Dacr::NO_ACCESS)
| Dacr::D2::bits (Dacr::NO_ACCESS)
| Dacr::D3::bits (Dacr::NO_ACCESS)
| Dacr::D4::bits (Dacr::NO_ACCESS)
| Dacr::D5::bits (Dacr::NO_ACCESS)
| Dacr::D6::bits (Dacr::NO_ACCESS)
| Dacr::D7::bits (Dacr::NO_ACCESS)
| Dacr::D8::bits (Dacr::NO_ACCESS)
| Dacr::D9::bits (Dacr::NO_ACCESS)
| Dacr::D10::bits (Dacr::NO_ACCESS)
| Dacr::D11::bits (Dacr::NO_ACCESS)
| Dacr::D12::bits (Dacr::NO_ACCESS)
| Dacr::D13::bits (Dacr::NO_ACCESS)
| Dacr::D14::bits (Dacr::NO_ACCESS)
| Dacr::D15::bits (Dacr::NO_ACCESS));
/* Switch process ID */
Contextidr::write(process_id);
/* Install section table */
Ttbr0::write (Ttbr0::Base_address::masked ((addr_t)section_table));
Ttbcr::write (Ttbcr::N::bits(0)
| Ttbcr::Pd0::bits(0)
| Ttbcr::Pd1::bits(0) );
/* Enable MMU without instruction-, data-, or unified caches */
Sctlr::access_t sctlr = Sctlr::read();
Sctlr::M::set(sctlr);
Sctlr::I::clear(sctlr);
Sctlr::C::clear(sctlr);
Sctlr::write(sctlr);
flush_branch_prediction();
}
/**
* Invalidate all entries of the branch predictor array
*
* \detail Must be inline to avoid dependence on the branch predictor
*/
__attribute__((always_inline)) inline static void flush_branch_prediction()
{
asm volatile ("mcr p15, 0, r0, c7, c5, 6\n"
"isb");
}
/**
* Invalidate at least all TLB entries regarding a specific process
*
* \param process_id ID of the targeted process
*/
static void flush_tlb_by_pid (unsigned const process_id)
{
asm volatile ("mcr p15, 0, %[asid], c8, c7, 2 \n"
:: [asid]"r"(Contextidr::Asid::masked(process_id)) : );
flush_branch_prediction();
}
/**
* Does a pagefault exist and originate from a lack of translation?
*
* \param c CPU Context that triggered the pagefault
* \param va Holds the virtual fault-address if this
* function returns 1
* \param w Indicates wether the fault was caused by a write access
* if this function returns 1
*/
static bool translation_miss(Context * c, addr_t & va, bool & w)
{
/* Determine fault type */
switch (c->exception_type)
{
case PREFETCH_ABORT: {
/* Is fault caused by translation miss? */
Ifsr::Fault_status const fs = Ifsr::fault_status();
if(fs == Ifsr::SECTION_TRANSLATION_FAULT ||
fs == Ifsr::PAGE_TRANSLATION_FAULT)
{
/* Fetch fault data */
w = 0;
va = Ifar::read();
return 1;
}
return 0; }
case DATA_ABORT: {
/* Is fault caused by translation miss? */
Dfsr::Fault_status const fs = Dfsr::fault_status();
if(fs == Dfsr::SECTION_TRANSLATION_FAULT ||
fs == Dfsr::PAGE_TRANSLATION_FAULT)
{
/* Fetch fault data */
Dfsr::access_t const dfsr = Dfsr::read();
w = Dfsr::Wnr::get(dfsr);
va = Dfar::read();
return 1;
}
return 0; }
default: return 0;
}
}
};
}
#endif /* _BASE__INCLUDE__DRIVERS__CPU__CORTEX_A9__CORE_H_ */

View File

@ -0,0 +1,845 @@
/*
* \brief Driver for Cortex A9 section tables as software TLB
* \author Martin Stein
* \date 2012-02-22
*/
/*
* Copyright (C) 2012 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _BASE__INCLUDE__DRIVERS__CPU__CORTEX_A9__SECTION_TABLE_H_
#define _BASE__INCLUDE__DRIVERS__CPU__CORTEX_A9__SECTION_TABLE_H_
/* Genode includes */
#include <util/register.h>
#include <base/printf.h>
#include <drivers/cpu/cortex_a9/core.h>
namespace Genode
{
/**
* Check if 'p' is aligned to 1 << 'alignm_log2'
*/
bool inline aligned(addr_t const a, unsigned long const alignm_log2)
{
return a == ((a >> alignm_log2) << alignm_log2);
}
/**
* Common access permission [1:0] bitfield values
*/
struct Ap_1_0_bitfield
{
enum { KERNEL_AND_USER_NO_ACCESS = 0,
KERNEL_AND_USER_SAME_ACCESS = 3 };
};
/**
* Common access permission [2] bitfield values
*/
struct Ap_2_bitfield
{
enum { KERNEL_RW_OR_NO_ACCESS = 0,
KERNEL_RO_ACCESS = 1 };
};
/**
* Cortex A9 second level translation table
*
* \detail A table is dedicated to either secure or non-secure
* mode. All translations done by this table apply
* domain 0. They are not shareable and have zero-filled
* memory region attributes.
*/
class Page_table
{
enum {
_1KB_LOG2 = 10,
_4KB_LOG2 = 12,
_64KB_LOG2 = 16,
_1MB_LOG2 = 20,
};
public:
enum {
SIZE_LOG2 = _1KB_LOG2,
SIZE = 1 << SIZE_LOG2,
ALIGNM_LOG2 = SIZE_LOG2,
VIRT_SIZE_LOG2 = _1MB_LOG2,
VIRT_SIZE = 1 << VIRT_SIZE_LOG2,
VIRT_BASE_MASK = ~((1 << VIRT_SIZE_LOG2) - 1),
};
protected:
/**
* Common descriptor structure
*/
struct Descriptor : Register<32>
{
/* Descriptor types */
enum Type { FAULT, SMALL_PAGE, LARGE_PAGE };
struct Type_1 : Bitfield<1, 1> { };
struct Type_2 : Bitfield<0, 1> { };
/**
* Get descriptor type of 'v'
*/
static Type type(access_t const v)
{
access_t const t1 = Type_1::get(v);
if (t1 == 0) {
access_t const t2 = Type_2::get(v);
if (t2 == 0) return FAULT;
if (t2 == 1) return LARGE_PAGE;
}
if (t1 == 1) return SMALL_PAGE;
return FAULT;
}
/**
* Set descriptor type of 'v'
*/
static void type(access_t & v, Type const t)
{
switch (t) {
case FAULT:
Type_1::set(v, 0);
Type_2::set(v, 0);
break;
case SMALL_PAGE:
Type_1::set(v, 1);
break;
case LARGE_PAGE:
Type_1::set(v, 0);
Type_2::set(v, 1);
break;
}
}
/**
* Invalidate descriptor 'v'
*/
static void invalidate(access_t & v) { type(v, FAULT); }
/**
* Return if descriptor 'v' is valid
*/
static bool valid(access_t & v) { return type(v) != FAULT; }
};
/**
* Represents an untranslated virtual region
*/
struct Fault : Descriptor
{
enum {
VIRT_SIZE_LOG2 = _4KB_LOG2,
VIRT_SIZE = 1 << VIRT_SIZE_LOG2,
VIRT_BASE_MASK = ~((1 << VIRT_SIZE_LOG2) - 1)
};
};
/**
* Large page descriptor structure
*
* \detail Must always occur as group of 16 consecutive copies, this
* groups must be aligned on a 16 word boundary (Represents
* 64KB = 16 * Small page size)
*/
struct Large_page : Descriptor
{
enum { VIRT_SIZE_LOG2 = _64KB_LOG2,
VIRT_SIZE = 1 << VIRT_SIZE_LOG2,
VIRT_BASE_MASK = ~((1 << VIRT_SIZE_LOG2) - 1) };
struct B : Bitfield<2, 1> { }; /* Part of the memory region attributes */
struct C : Bitfield<3, 1> { }; /* Part of the memory region attributes */
struct Ap_1_0 : Bitfield<4, 2>, /* Access permission bits [1:0] */
Ap_1_0_bitfield { };
struct Ap_2 : Bitfield<9, 1>, /* Access permission bits [2] */
Ap_2_bitfield { };
struct S : Bitfield<10, 1> { }; /* Shareable bit */
struct Ng : Bitfield<11, 1> { }; /* Not global bit */
struct Tex : Bitfield<12, 3> { }; /* Part of the memory region attributes */
struct Xn : Bitfield<15, 1> { }; /* Execute never bit */
struct Pa_31_16 : Bitfield<16, 16> { }; /* Physical address bits [31:16] */
};
/**
* Small page descriptor structure
*/
struct Small_page : Descriptor
{
enum {
VIRT_SIZE_LOG2 = _4KB_LOG2,
VIRT_SIZE = 1 << VIRT_SIZE_LOG2,
VIRT_BASE_MASK = ~((1 << VIRT_SIZE_LOG2) - 1)
};
struct Xn : Bitfield<0, 1> { }; /* Execute never bit */
struct B : Bitfield<2, 1> { }; /* Part of the memory region attributes */
struct C : Bitfield<3, 1> { }; /* Part of the memory region attributes */
struct Ap_1_0 : Bitfield<4, 2>, /* Access permission bits [1:0] */
Ap_1_0_bitfield { };
struct Tex : Bitfield<6, 3> { }; /* Part of the memory region attributes */
struct Ap_2 : Bitfield<9, 1>, /* Access permission bits [2] */
Ap_2_bitfield { };
struct S : Bitfield<10, 1> { }; /* Shareable bit */
struct Ng : Bitfield<11, 1> { }; /* Not global bit */
struct Pa_31_12 : Bitfield<12, 20> { }; /* Physical address bits [31:12] */
/**
* Permission configuration according to given access rights
*
* \param r Readability
* \param w Writeability
* \param x Executability
* \return Descriptor value configured with appropriate
* access permissions and the rest left zero
*/
static access_t access_permission_bits(bool const r,
bool const w,
bool const x)
{
access_t v = Xn::bits(!x);
if (r) {
v |= Ap_1_0::bits(Ap_1_0::KERNEL_AND_USER_SAME_ACCESS);
if(w) v |= Ap_2::bits(Ap_2::KERNEL_RW_OR_NO_ACCESS);
else v |= Ap_2::bits(Ap_2::KERNEL_RO_ACCESS);
}
else if (w) {
PDBG("Write only translations not supported");
while (1) ;
}
else {
v |= Ap_1_0::bits(Ap_1_0::KERNEL_AND_USER_NO_ACCESS)
| Ap_2::bits(Ap_2::KERNEL_RO_ACCESS);
}
return v;
}
};
/* Table payload
* Attention: Must be the only member of this class */
Descriptor::access_t _entries[SIZE/sizeof(Descriptor::access_t)];
enum { MAX_INDEX = sizeof(_entries) / sizeof(_entries[0]) - 1 };
/**
* Get entry index by virtual offset
*
* \param i Is overridden with the resulting index
* \param vo Virtual offset relative to the virtual table base
* \retval <0 If virtual offset couldn't be resolved,
* in this case 'i' reside invalid
*/
int _index_by_vo (unsigned long & i, addr_t const vo) const
{
if (vo > max_virt_offset()) return -1;
i = vo >> Small_page::VIRT_SIZE_LOG2;
return 0;
}
public:
/**
* Placement new operator
*/
void * operator new (size_t, void * p) { return p; }
/**
* Constructor
*/
Page_table()
{
/* Check table alignment */
if (!aligned((addr_t)this, ALIGNM_LOG2)
|| (addr_t)this != (addr_t)_entries)
{
PDBG("Insufficient table alignment");
while (1) ;
}
/* Start with an empty table */
for (unsigned i = 0; i <= MAX_INDEX; i++)
Descriptor::invalidate(_entries[i]);
}
/**
* Maximum virtual offset that can be translated by this table
*/
static addr_t max_virt_offset()
{
return (MAX_INDEX << Small_page::VIRT_SIZE_LOG2)
+ (Small_page::VIRT_SIZE - 1);
}
/**
* Insert one atomic translation into this table
*
* \param vo Offset of the virtual region represented
* by the translation within the virtual
* region represented by this table
* \param pa Base of the physical backing store
* \param size_log2 Log2(Size of the translated region),
* must be supported by this table
* \param r Shall one can read trough this translation
* \param w Shall one can write trough this translation
* \param x Shall one can execute trough this
* translation
*
* \detail This method overrides an existing translation in case
* that it spans the the same virtual range and is not
* a link to another table level
*/
void insert_translation (addr_t const vo, addr_t const pa,
unsigned long const size_log2,
bool const r, bool const w, bool const x,
bool const global)
{
/* Validate virtual address */
unsigned long i;
if (_index_by_vo (i, vo)) {
PDBG("Invalid virtual offset");
while (1) ;
}
/* Select descriptor type by the translation size */
if (size_log2 == Small_page::VIRT_SIZE_LOG2)
{
/* Can we write to the targeted entry? */
if (Descriptor::valid(_entries[i]) &&
Descriptor::type(_entries[i]) != Descriptor::SMALL_PAGE)
{
PDBG("Couldn't override entry");
while (1) ;
}
/* Compose descriptor */
_entries[i] = Small_page::access_permission_bits(r, w, x)
| Small_page::Ng::bits(!global)
| Small_page::Pa_31_12::masked(pa);
Descriptor::type(_entries[i], Descriptor::SMALL_PAGE);
return;
}
PDBG("Translation size not supported");
while (1) ;
}
/**
* Remove translations, wich overlap with a given virtual region
*
* \param vo Offset of the virtual region within the region
* represented by this table
* \param size Region size
*/
void remove_region (addr_t const vo, size_t const size)
{
/* Traverse all possibly affected entries */
addr_t residual_vo = vo;
unsigned long i;
while (1)
{
/* Is anything left over to remove? */
if (residual_vo >= vo + size) return;
/* Does the residual region overlap with the region
* represented by this table? */
if (_index_by_vo(i, residual_vo)) return;
/* Update current entry and recalculate the residual region */
switch (Descriptor::type(_entries[i]))
{
case Descriptor::FAULT:
{
residual_vo = (residual_vo & Fault::VIRT_BASE_MASK)
+ Fault::VIRT_SIZE;
break;
}
case Descriptor::SMALL_PAGE:
{
residual_vo = (residual_vo & Small_page::VIRT_BASE_MASK)
+ Small_page::VIRT_SIZE;
Descriptor::invalidate(_entries[i]);
break;
}
case Descriptor::LARGE_PAGE:
{
PDBG("Removal of large pages not implemented");
while (1) ;
break;
}
}
}
return;
}
/**
* Does this table solely contain invalid entries
*/
bool empty()
{
for (unsigned i = 0; i <= MAX_INDEX; i++) {
if (Descriptor::valid(_entries[i])) return false;
}
return true;
}
} __attribute__((aligned(1<<Page_table::ALIGNM_LOG2)));
/**
* Cortex A9 first level translation table
*
* \detail A table is dedicated to either secure or non-secure
* mode. All translations done by this table apply
* domain 0. They are not shareable and have zero-filled
* memory region attributes. The size of this table is fixed
* to such a value that this table translates a space wich is
* addressable by 32 bit.
*/
class Section_table
{
enum {
_16KB_LOG2 = 14,
_1MB_LOG2 = 20,
_16MB_LOG2 = 24,
};
public:
enum {
SIZE_LOG2 = _16KB_LOG2,
SIZE = 1 << SIZE_LOG2,
ALIGNM_LOG2 = SIZE_LOG2,
VIRT_SIZE_LOG2 = _1MB_LOG2,
VIRT_SIZE = 1 << VIRT_SIZE_LOG2,
VIRT_BASE_MASK = ~((1 << VIRT_SIZE_LOG2) - 1),
MAX_COSTS_PER_TRANSLATION = sizeof(Page_table),
MAX_TRANSL_SIZE_LOG2 = 20,
MIN_TRANSL_SIZE_LOG2 = 12,
};
protected:
/**
* A first level translation descriptor
*/
struct Descriptor : Register<32>
{
/* Descriptor types */
enum Type { FAULT, PAGE_TABLE, SECTION, SUPERSECTION };
struct Type_1 : Bitfield<0, 2> { }; /* Entry type encoding 1 */
struct Type_2 : Bitfield<18, 1> { }; /* Entry type encoding 2 */
/**
* Get descriptor type of 'v'
*/
static Type type(access_t const v)
{
access_t const t1 = Type_1::get(v);
if (t1 == 0) return FAULT;
if (t1 == 1) return PAGE_TABLE;
if (t1 == 2) {
access_t const t2 = Type_2::get(v);
if (t2 == 0) return SECTION;
if (t2 == 1) return SUPERSECTION;
}
return FAULT;
}
/**
* Set descriptor type of 'v'
*/
static void type(access_t & v, Type const t)
{
switch (t) {
case FAULT: Type_1::set(v, 0); break;
case PAGE_TABLE: Type_1::set(v, 1); break;
case SECTION:
Type_1::set(v, 2);
Type_2::set(v, 0); break;
case SUPERSECTION:
Type_1::set(v, 2);
Type_2::set(v, 1); break;
}
}
/**
* Invalidate descriptor 'v'
*/
static void invalidate(access_t & v) { type(v, FAULT); }
/**
* Return if descriptor 'v' is valid
*/
static bool valid(access_t & v) { return type(v) != FAULT; }
};
/**
* Represents an untranslated virtual region
*/
struct Fault : Descriptor
{
enum {
VIRT_SIZE_LOG2 = _1MB_LOG2,
VIRT_SIZE = 1 << VIRT_SIZE_LOG2,
VIRT_BASE_MASK = ~((1 << VIRT_SIZE_LOG2) - 1)
};
};
/**
* References a second level translation table for the virtual
* region it represents
*/
struct Page_table_descriptor : Descriptor
{
struct Ns : Bitfield<3, 1> { }; /* Non-secure bit */
struct Domain : Bitfield<5, 4> { }; /* Domain field */
struct Pa_31_10 : Bitfield<10, 22> { }; /* Physical address bits [31:10] */
};
/**
* Supersection-descriptor structure
*
* \detail Must always occur as group of 16 consecutive copies, this
* groups must be aligned on a 16 word boundary.
*/
struct Supersection : Descriptor
{
enum {
VIRT_SIZE_LOG2 = _16MB_LOG2,
VIRT_SIZE = 1 << VIRT_SIZE_LOG2,
VIRT_BASE_MASK = ~((1 << VIRT_SIZE_LOG2) - 1)
};
struct B : Bitfield<2, 1> { }; /* Part of the memory region attributes */
struct C : Bitfield<3, 1> { }; /* Part of the memory region attributes */
struct Xn : Bitfield<4, 1> { }; /* Execute never bit */
struct Pa_39_36 : Bitfield<5, 4> { }; /* Extendend physical address bits [39:36] */
struct Ap_1_0 : Bitfield<10, 2>, /* Access permission bits [1:0] */
Ap_1_0_bitfield { };
struct Tex : Bitfield<12, 3> { }; /* Part of the memory region attributes */
struct Ap_2 : Bitfield<15, 1>, /* Access permission bits [2] */
Ap_2_bitfield { };
struct S : Bitfield<16, 1> { }; /* Shareable bit */
struct Ng : Bitfield<17, 1> { }; /* Not global bit */
struct Ns : Bitfield<19, 1> { }; /* Non-secure bit */
struct Pa_35_32 : Bitfield<20, 4> { }; /* Extendend physical address bits [35:32] */
struct Pa_31_24 : Bitfield<24, 8> { }; /* Physical address bits [31:24] */
};
/**
* Section-descriptor structure
*/
struct Section : Descriptor
{
enum {
VIRT_SIZE_LOG2 = _1MB_LOG2,
VIRT_SIZE = 1 << VIRT_SIZE_LOG2,
VIRT_BASE_MASK = ~((1 << VIRT_SIZE_LOG2) - 1)
};
struct B : Bitfield<2, 1> { }; /* Part of the memory region attributes */
struct C : Bitfield<3, 1> { }; /* Part of the memory region attributes */
struct Xn : Bitfield<4, 1> { }; /* Execute never bit */
struct Domain : Bitfield<5, 4> { }; /* Domain field */
struct Ap_1_0 : Bitfield<10, 2>, /* Access permission bits [1:0] */
Ap_1_0_bitfield { };
struct Tex : Bitfield<12, 3> { }; /* Part of the memory region attributes */
struct Ap_2 : Bitfield<15, 1>, /* Access permission bits [2] */
Ap_2_bitfield { };
struct S : Bitfield<16, 1> { }; /* Shareable bit */
struct Ng : Bitfield<17, 1> { }; /* Not global bit */
struct Ns : Bitfield<19, 1> { }; /* Non-secure bit */
struct Pa_31_20 : Bitfield<20, 12> { }; /* Physical address bits [31:20] */
/**
* Permission configuration according to given access rights
*
* \param r Readability
* \param w Writeability
* \param x Executability
* \return Descriptor value configured with appropriate
* access permissions and the rest left zero
*/
static access_t access_permission_bits(bool const r,
bool const w,
bool const x)
{
access_t v = Xn::bits(!x);
if (r) {
v |= Ap_1_0::bits(Ap_1_0::KERNEL_AND_USER_SAME_ACCESS);
if(w) v |= Ap_2::bits(Ap_2::KERNEL_RW_OR_NO_ACCESS);
else v |= Ap_2::bits(Ap_2::KERNEL_RO_ACCESS);
}
else if (w) {
PDBG("Write only sections not supported");
while (1) ;
}
else {
v |= Ap_1_0::bits(Ap_1_0::KERNEL_AND_USER_NO_ACCESS)
| Ap_2::bits(Ap_2::KERNEL_RW_OR_NO_ACCESS);
}
return v;
}
};
/* Table payload
* Attention: Must be the first member of this class */
Descriptor::access_t _entries[SIZE/sizeof(Descriptor::access_t)];
enum { MAX_INDEX = sizeof(_entries) / sizeof(_entries[0]) - 1 };
/* Is this table dedicated to secure mode or to non-secure mode */
bool _secure;
/**
* Get entry index by virtual offset
*
* \param i Is overridden with the resulting index
* \param vo Offset within the virtual region represented
* by this table
* \retval <0 If virtual offset couldn't be resolved,
* in this case 'i' reside invalid
*/
int _index_by_vo(unsigned long & i, addr_t const vo) const
{
if (vo > max_virt_offset()) return -1;
i = vo >> Section::VIRT_SIZE_LOG2;
return 0;
}
public:
/**
* Constructor for a table that adopts current secure mode status
*/
Section_table() : _secure(Cortex_a9::secure_mode_active())
{
/* Check table alignment */
if (!aligned((addr_t)this, ALIGNM_LOG2)
|| (addr_t)this != (addr_t)_entries)
{
PDBG("Insufficient table alignment");
while (1) ;
}
/* Start with an empty table */
for (unsigned i = 0; i <= MAX_INDEX; i++)
Descriptor::invalidate(_entries[i]);
}
/**
* Maximum virtual offset that can be translated by this table
*/
static addr_t max_virt_offset()
{
return (MAX_INDEX << Section::VIRT_SIZE_LOG2)
+ (Section::VIRT_SIZE - 1);
}
/**
* Insert one atomic translation into this table
*
* \param vo Offset of the virtual region represented
* by the translation within the virtual
* region represented by this table
* \param pa Base of the physical backing store
* \param size_log2 Size log2 of the translated region
* \param r Shall one can read trough this translation
* \param w Shall one can write trough this translation
* \param x Shall one can execute trough this translation
* \param global Shall the translation apply to all
* address spaces
* \param extra_space If > 0 it must point to a portion of
* size-aligned memory space wich may be used
* furthermore by the table for the incurring
* administrative costs of the translation.
* To determine the amount of additionally
* needed memory one can instrument this
* method with 'extra_space' set to 0.
* The so donated memory may be regained by
* using the method 'regain_memory'
* \retval 0 Translation successfully inserted
* \retval >0 Translation not inserted, the return value
* is the size log2 of additional size-aligned
* space that is needed to do the translation.
* This occurs solely when 'extra_space' is 0.
*
* \detail This method overrides an existing translation in case
* that it spans the the same virtual range and is not
* a link to another table level
*/
unsigned long insert_translation (addr_t const vo, addr_t const pa,
unsigned long const size_log2,
bool const r, bool const w,
bool const x, bool const global,
void * const extra_space = 0)
{
/* Validate virtual address */
unsigned long i;
if (_index_by_vo (i, vo)) {
PDBG("Invalid virtual offset");
while (1) ;
}
/* Select descriptor type by translation size */
if (size_log2 < Section::VIRT_SIZE_LOG2)
{
Page_table * pt;
/* Does an appropriate page table already exist? */
if (Descriptor::type(_entries[i]) == Descriptor::PAGE_TABLE)
{
pt = (Page_table *)(addr_t)
Page_table_descriptor::Pa_31_10::masked(_entries[i]);
}
/* Is there some extra space to create a page table? */
else if (extra_space)
{
/* Can we write to the targeted entry? */
if (Descriptor::valid(_entries[i])) {
PDBG ("Couldn't override entry");
while (1) ;
}
/* Create and link page table,
* the page table checks alignment by itself */
pt = new (extra_space) Page_table();
_entries[i] = Page_table_descriptor::Ns::bits(!_secure)
| Page_table_descriptor::Pa_31_10::masked((addr_t)pt);
Descriptor::type(_entries[i], Descriptor::PAGE_TABLE);
}
/* Request additional memory to create a page table */
else return Page_table::SIZE_LOG2;
/* Insert translation */
pt->insert_translation(vo - Section::Pa_31_20::masked(vo),
pa, size_log2, r, w, x, global);
return 0;
}
if (size_log2 == Section::VIRT_SIZE_LOG2)
{
/* Can we write to the targeted entry? */
if (Descriptor::valid(_entries[i]) &&
Descriptor::type(_entries[i]) != Descriptor::SECTION)
{
PDBG("Couldn't override entry");
while (1) ;
}
/* Compose section descriptor */
_entries[i] = Section::access_permission_bits(r, w, x)
| Section::Ns::bits(!_secure)
| Section::Ng::bits(!global)
| Section::Pa_31_20::masked(pa);
Descriptor::type(_entries[i], Descriptor::SECTION);
return 0;
}
PDBG("Translation size not supported");
while (1) ;
}
/**
* Remove translations, wich overlap with a given virtual region
*
* \param vo Offset of the virtual region within the region
* represented by this table
* \param size Region size
*/
void remove_region (addr_t const vo, size_t const size)
{
/* Traverse all possibly affected entries */
addr_t residual_vo = vo;
unsigned long i;
while (1)
{
/* Is anything left over to remove? */
if (residual_vo >= vo + size) return;
/* Does the residual region overlap with the region
* represented by this table? */
if (_index_by_vo(i, residual_vo)) return;
/* Update current entry and recalculate the residual region */
switch (Descriptor::type(_entries[i]))
{
case Descriptor::FAULT:
{
residual_vo = (residual_vo & Fault::VIRT_BASE_MASK)
+ Fault::VIRT_SIZE;
break;
}
case Descriptor::PAGE_TABLE:
{
/* Instruct page table to remove residual region */
Page_table * const pt = (Page_table *)
(addr_t)Page_table_descriptor::Pa_31_10::masked(_entries[i]);
size_t const residual_size = vo + size - residual_vo;
addr_t const pt_vo = residual_vo
- Section::Pa_31_20::masked(residual_vo);
pt->remove_region(pt_vo, residual_size);
/* Recalculate residual region */
residual_vo = (residual_vo & Page_table::VIRT_BASE_MASK)
+ Page_table::VIRT_SIZE;
break;
}
case Descriptor::SECTION:
{
Descriptor::invalidate(_entries[i]);
residual_vo = (residual_vo & Section::VIRT_BASE_MASK)
+ Section::VIRT_SIZE;
break;
}
case Descriptor::SUPERSECTION:
{
PDBG("Removal of supersections not implemented");
while (1);
break;
}
}
}
}
/**
* Get a portion of memory that is no longer used by this table
*
* \param base Base of regained memory portion if method returns 1
* \param s Size of regained memory portion if method returns 1
*/
bool regain_memory (void * & base, size_t & s)
{
/* Walk through all entries */
for (unsigned i = 0; i <= MAX_INDEX; i++)
{
if (Descriptor::type(_entries[i]) == Descriptor::PAGE_TABLE)
{
Page_table * const pt = (Page_table *)
(addr_t)Page_table_descriptor::Pa_31_10::masked(_entries[i]);
if (pt->empty())
{
/* We've found an useless page table */
Descriptor::invalidate(_entries[i]);
base = (void *)pt;
s = sizeof(Page_table);
return true;
}
}
}
return false;
}
} __attribute__((aligned(1<<Section_table::ALIGNM_LOG2)));
}
#endif /* _BASE__INCLUDE__DRIVERS__CPU__CORTEX_A9__SECTION_TABLE_H_ */

View File

@ -0,0 +1,112 @@
/*
* \brief Driver base for the private timer of the ARM Cortex-A9
* \author Martin stein
* \date 2011-12-13
*/
/*
* Copyright (C) 2011-2012 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _BASE__INCLUDE__DRIVERS__CPU__CORTEX_A9__TIMER_H_
#define _BASE__INCLUDE__DRIVERS__CPU__CORTEX_A9__TIMER_H_
/* Genode includes */
#include <util/mmio.h>
namespace Genode
{
/**
* Driver base for the private timer of the ARM Cortex-A9
*/
template <unsigned long CLK>
struct Cortex_a9_timer : public Mmio
{
enum { TICS_PER_MS = CLK / 1000, };
/**
* Load value rgeister
*/
struct Load : Register<0x0, 32> { };
/**
* Timer counter value register
*/
struct Counter : Register<0x4, 32> { };
/**
* Timer control register
*/
struct Control : Register<0x8, 32>
{
struct Timer_enable : Bitfield<0,1> { }; /* 1: 'Counter' decrements, 0: 'Counter' stays 0 */
struct Auto_reload : Bitfield<1,1> { }; /* 1: Auto reload mode, 0: One shot mode */
struct Irq_enable : Bitfield<2,1> { }; /* 1: IRQ = 'Interrupt_status::Event' 0: IRQ = 0 */
struct Prescaler : Bitfield<8,8> { }; /* modifies the clock period for the decrementing */
};
/**
* Timer interrupt status register
*/
struct Interrupt_status : Register<0xc, 32>
{
struct Event : Bitfield<0,1> { }; /* 'Event' = !'Counter' */
};
/**
* Constructor, clears the interrupt output
*/
Cortex_a9_timer(addr_t const mmio_base) : Mmio(mmio_base) {
clear_interrupt(); }
/**
* Start a one-shot run
* \param tics native timer value used to assess the delay
* of the timer interrupt as of the call
*/
inline void start_one_shot(uint32_t const tics);
/**
* Translate milliseconds to a native timer value
*/
static uint32_t ms_to_tics(unsigned long const ms) {
return ms * TICS_PER_MS; }
/**
* Stop the timer and return last timer value
*/
unsigned long stop()
{
unsigned long const v = read<Counter>();
write<typename Control::Timer_enable>(0);
return v;
}
/**
* Clear interrupt output line
*/
void clear_interrupt() { write<typename Interrupt_status::Event>(1); }
};
}
template <unsigned long CLOCK>
void Genode::Cortex_a9_timer<CLOCK>::start_one_shot(uint32_t const tics)
{
/* Reset timer */
clear_interrupt();
write<Control>(Control::Timer_enable::bits(0) |
Control::Auto_reload::bits(0) |
Control::Irq_enable::bits(1) |
Control::Prescaler::bits(0));
/* Load timer and start decrementing */
write<Load>(tics);
write<typename Control::Timer_enable>(1);
}
#endif /* _BASE__INCLUDE__DRIVERS__CPU__CORTEX_A9__TIMER_H_ */