hw: map a dataspace in core according to its flags

Thereby removing obsolete quirk for uncached CPU state dataspace
in VM session.

Fixes #1198
This commit is contained in:
Stefan Kalkowski 2014-07-08 15:46:53 +02:00 committed by Norman Feske
parent c5380674df
commit 609f7abb0a
14 changed files with 55 additions and 82 deletions

View File

@ -28,8 +28,6 @@ namespace Trustzone
*/
SECURE_RAM_BASE = Genode::Board_base::RAM0_BASE,
SECURE_RAM_SIZE = 256 * 1024 * 1024,
VM_STATE_BASE = SECURE_RAM_BASE + SECURE_RAM_SIZE,
VM_STATE_SIZE = 1 << 20,
NONSECURE_RAM_BASE = Genode::Board_base::RAM1_BASE,
NONSECURE_RAM_SIZE = Genode::Board_base::RAM1_SIZE,
};

View File

@ -20,10 +20,8 @@
namespace Trustzone
{
enum {
VM_STATE_SIZE = 1 << 20,
SECURE_RAM_BASE = Genode::Board_base::RAM_3_BASE,
SECURE_RAM_SIZE = Genode::Board_base::RAM_3_SIZE - VM_STATE_SIZE,
VM_STATE_BASE = SECURE_RAM_BASE + SECURE_RAM_SIZE,
SECURE_RAM_SIZE = Genode::Board_base::RAM_3_SIZE,
NONSECURE_RAM_BASE = 0x80000000,
NONSECURE_RAM_SIZE = 0x20000000,
};

View File

@ -50,8 +50,8 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
/* allocate range in core's virtual address space */
void *virt_addr;
if (!platform()->region_alloc()->alloc_aligned(page_rounded_size,
&virt_addr,
get_page_size_log2()).is_ok()) {
&virt_addr,
get_page_size_log2()).is_ok()) {
PERR("Could not allocate virtual address range in core of size %zd\n",
page_rounded_size);
return false;
@ -59,7 +59,10 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
/* map the dataspace's physical pages to corresponding virtual addresses */
unsigned num_pages = page_rounded_size >> get_page_size_log2();
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
Page_flags const flags = Page_flags::apply_mapping(ds.object()->writable(),
ds.object()->cacheability(),
ds.object()->is_io_mem());
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages, flags))
return 0;
return virt_addr;

View File

@ -30,15 +30,7 @@ void Genode::platform_add_local_services(Genode::Rpc_entrypoint *ep,
{
using namespace Genode;
/*
* We use an extra portion of RAM for the VM state,
* so we can map it non-cached to core instead of normal, cached RAM.
* In future, when core only maps memory on demand, this extra allocator,
* can be eliminated.
*/
static Synchronized_range_allocator<Allocator_avl> vm_alloc(0);
vm_alloc.add_range(Trustzone::VM_STATE_BASE, Trustzone::VM_STATE_SIZE);
static Vm_root vm_root(ep, sh, &vm_alloc);
static Vm_root vm_root(ep, sh);
static Local_service vm_ls(Vm_session::service_name(), &vm_root);
ls->insert(&vm_ls);
}

View File

@ -57,9 +57,6 @@ Native_region * Platform::_core_only_mmio_regions(unsigned const i)
/* interrupt controller */
{ Board::TZIC_MMIO_BASE, Board::TZIC_MMIO_SIZE },
/* vm state memory */
{ Trustzone::VM_STATE_BASE, Trustzone::VM_STATE_SIZE },
/* central security unit */
{ Board::CSU_BASE, Board::CSU_SIZE },
};

View File

@ -17,6 +17,9 @@
/* Genode includes */
#include <base/printf.h>
/* core includes */
#include <page_flags.h>
namespace Genode {
/**
@ -30,7 +33,8 @@ namespace Genode {
* \return true on success
*/
bool map_local(addr_t from_phys, addr_t to_virt, size_t num_pages,
bool io_mem = false);
Page_flags flags = { true, true, false, false,
false, CACHED });
/**
* Unmap pages from core's address space

View File

@ -40,13 +40,6 @@ namespace Genode
return Page_flags { writeable, true, false, false,
io_mem, cacheable }; }
/**
* Create flag POD for kernel when it creates the core space
*/
static const Page_flags map_core_area(bool const io_mem) {
return Page_flags { true, true, false, false, io_mem,
io_mem ? UNCACHED : CACHED}; }
/**
* Create flag POD for the mode transition region
*/

View File

@ -24,17 +24,12 @@ namespace Genode {
class Vm_root : public Root_component<Vm_session_component>
{
private:
Range_allocator *_ram_alloc;
protected:
Vm_session_component *_create_session(const char *args)
{
size_t ram_quota = Arg_string::find_arg(args, "ram_quota").long_value(0);
return new (md_alloc())
Vm_session_component(ep(), _ram_alloc, ram_quota);
return new (md_alloc()) Vm_session_component(ep(), ram_quota);
}
public:
@ -46,10 +41,8 @@ namespace Genode {
* \param md_alloc meta-data allocator to be used by root component
*/
Vm_root(Rpc_entrypoint *session_ep,
Allocator *md_alloc,
Range_allocator *ram_alloc)
: Root_component<Vm_session_component>(session_ep, md_alloc),
_ram_alloc(ram_alloc){ }
Allocator *md_alloc)
: Root_component<Vm_session_component>(session_ep, md_alloc) { }
};
}

View File

@ -29,34 +29,24 @@ namespace Genode {
{
private:
Rpc_entrypoint *_ds_ep;
Range_allocator *_ram_alloc;
unsigned _vm_id;
void *_vm;
addr_t _ds_addr;
Dataspace_component _ds;
Dataspace_capability _ds_cap;
Rpc_entrypoint *_ds_ep;
Range_allocator *_ram_alloc;
unsigned _vm_id;
void *_vm;
Dataspace_component _ds;
Dataspace_capability _ds_cap;
addr_t _ds_addr;
static size_t _ds_size() {
return align_addr(sizeof(Cpu_state_modes),
get_page_size_log2()); }
addr_t _alloc_ds(size_t *ram_quota)
{
addr_t addr;
if (_ds_size() > *ram_quota ||
_ram_alloc->alloc_aligned(_ds_size(), (void**)&addr,
get_page_size_log2()).is_error())
throw Root::Quota_exceeded();
*ram_quota -= _ds_size();
return addr;
}
addr_t _alloc_ds(size_t &ram_quota);
public:
Vm_session_component(Rpc_entrypoint *ds_ep,
Range_allocator *ram_alloc,
size_t ram_quota);
Vm_session_component(Rpc_entrypoint *ds_ep,
size_t ram_quota);
~Vm_session_component();

View File

@ -125,7 +125,9 @@ namespace Kernel
using namespace Genode;
Translation_table *tt = Platform_pd::translation_table();
const Page_flags flags = Page_flags::map_core_area(io_mem);
const Page_flags flags =
Page_flags::apply_mapping(true, io_mem ? UNCACHED : CACHED,
io_mem);
start = trunc_page(start);
size_t size = round_page(end) - start;

View File

@ -192,10 +192,10 @@ void Core_parent::exit(int exit_value)
** Support for core memory management **
****************************************/
bool Genode::map_local(addr_t from_phys, addr_t to_virt, size_t num_pages, bool io_mem)
bool Genode::map_local(addr_t from_phys, addr_t to_virt, size_t num_pages,
Page_flags flags)
{
Translation_table *tt = Kernel::core_pd()->translation_table();
const Page_flags flags = Page_flags::map_core_area(io_mem);
try {
for (unsigned i = 0; i < 2; i++) {
@ -245,7 +245,7 @@ bool Core_mem_allocator::Mapped_mem_allocator::_map_local(addr_t virt_addr,
{
Genode::Page_slab * slab = Kernel::core_pd()->platform_pd()->page_slab();
slab->backing_store(_core_mem_allocator->raw());
bool ret = ::map_local(phys_addr, virt_addr, size / get_page_size(), false);
bool ret = ::map_local(phys_addr, virt_addr, size / get_page_size());
slab->backing_store(_core_mem_allocator);
return ret;
}

View File

@ -30,15 +30,7 @@ void Genode::platform_add_local_services(Genode::Rpc_entrypoint *ep,
{
using namespace Genode;
/*
* We use an extra portion of RAM for the VM state,
* so we can map it non-cached to core instead of normal, cached RAM.
* In future, when core only maps memory on demand, this extra allocator,
* can be eliminated.
*/
static Synchronized_range_allocator<Allocator_avl> vm_alloc(0);
vm_alloc.add_range(Trustzone::VM_STATE_BASE, Trustzone::VM_STATE_SIZE);
static Vm_root vm_root(ep, sh, &vm_alloc);
static Vm_root vm_root(ep, sh);
static Local_service vm_ls(Vm_session::service_name(), &vm_root);
ls->insert(&vm_ls);
}

View File

@ -53,9 +53,6 @@ Native_region * Platform::_core_only_mmio_regions(unsigned const i)
/* Core UART */
{ Board::PL011_0_MMIO_BASE, Board::PL011_0_MMIO_SIZE },
/* vm state memory */
{ Trustzone::VM_STATE_BASE, Trustzone::VM_STATE_SIZE },
};
return i < sizeof(_regions)/sizeof(_regions[0]) ? &_regions[i] : 0;
}

View File

@ -20,17 +20,30 @@
/* core includes */
#include <kernel/core_interface.h>
#include <vm_session_component.h>
#include <platform.h>
#include <core_env.h>
using namespace Genode;
addr_t Vm_session_component::_alloc_ds(size_t &ram_quota)
{
addr_t addr;
if (_ds_size() > ram_quota ||
platform()->ram_alloc()->alloc_aligned(_ds_size(), (void**)&addr,
get_page_size_log2()).is_error())
throw Root::Quota_exceeded();
ram_quota -= _ds_size();
return addr;
}
void Vm_session_component::exception_handler(Signal_context_capability handler)
{
if (_vm_id) {
PWRN("Cannot register exception_handler repeatedly");
return;
}
_vm_id = Kernel::new_vm(_vm, (void*)_ds.core_local_addr(), handler.dst());
}
@ -56,16 +69,16 @@ void Vm_session_component::pause(void)
Vm_session_component::Vm_session_component(Rpc_entrypoint *ds_ep,
Range_allocator *ram_alloc,
size_t ram_quota)
: _ds_ep(ds_ep), _ram_alloc(ram_alloc), _vm_id(0),
_ds_addr(_alloc_ds(&ram_quota)),
_ds(_ds_size(), _ds_addr, _ds_addr, UNCACHED, true, 0),
: _ds_ep(ds_ep), _vm_id(0),
_ds(_ds_size(), _alloc_ds(ram_quota), UNCACHED, true, 0),
_ds_cap(static_cap_cast<Dataspace>(_ds_ep->manage(&_ds)))
{
_ds.assign_core_local_addr(core_env()->rm_session()->attach(_ds_cap));
/* alloc needed memory */
if (Kernel::vm_size() > ram_quota ||
!_ram_alloc->alloc(Kernel::vm_size(), &_vm))
!platform()->core_mem_alloc()->alloc(Kernel::vm_size(), &_vm))
throw Root::Quota_exceeded();
}
@ -76,6 +89,7 @@ Vm_session_component::~Vm_session_component()
_ds_ep->dissolve(&_ds);
/* free region in allocator */
_ram_alloc->free((void*)_ds.core_local_addr());
_ram_alloc->free(_vm);
core_env()->rm_session()->detach(_ds.core_local_addr());
platform()->ram_alloc()->free((void*)_ds.phys_addr());
platform()->core_mem_alloc()->free(_vm);
}