hw: move MMU-related functions into Address_space

* Introduce a hw specific Address_space interface for protection
  domains, which combines all memory-virtualization related functionality
* Introduce a core-specific Platform_pd object that solves all the hen-egg
  problems formerly distributed in kernel and core-platform code

Ref #595
Ref #1443
This commit is contained in:
Stefan Kalkowski 2015-04-28 14:07:51 +02:00 committed by Christian Helmuth
parent d4c55bec2a
commit b8f178e647
6 changed files with 362 additions and 310 deletions

View File

@ -26,14 +26,6 @@ namespace Kernel
class Vm;
class User_irq;
addr_t mode_transition_base();
size_t mode_transition_size();
size_t thread_size();
size_t pd_size();
unsigned pd_alignment_log2();
size_t signal_context_size();
size_t signal_receiver_size();
/**
* Kernel names of the kernel calls
*/

View File

@ -28,139 +28,181 @@
#include <page_slab.h>
#include <kernel/kernel.h>
namespace Hw
{
/**
* Memory virtualization interface of a protection domain
*/
class Address_space;
}
namespace Genode
{
class Platform_thread;
class Platform_thread; /* forward declaration */
/**
* Platform specific part of a Genode protection domain
*/
class Platform_pd : public Address_space
{
protected:
class Platform_pd;
Lock _lock; /* safeguard translation table and slab */
Native_capability _parent;
Native_thread_id _main_thread;
char const * const _label;
Translation_table * _tt; /* translation table virtual addr. */
Translation_table * _tt_phys; /* translation table physical addr. */
uint8_t _kernel_pd_data[sizeof(Kernel::Pd)];
Kernel::Pd * _kernel_pd;
Page_slab * _pslab; /* page table allocator */
public:
/**
* Constructor for core pd
*
* \param tt translation table address
* \param slab page table allocator
*/
Platform_pd(Translation_table * tt, Page_slab * slab)
: _main_thread(0), _label("core"), _tt(tt),
_tt_phys(tt), _pslab(slab) { }
/**
* Constructor for non-core pd
*
* \param label name of protection domain
*/
Platform_pd(Allocator * md_alloc, char const *label)
: _main_thread(0), _label(label),
_kernel_pd(reinterpret_cast<Kernel::Pd*>(_kernel_pd_data))
{
Lock::Guard guard(_lock);
Core_mem_allocator * cma =
static_cast<Core_mem_allocator*>(platform()->core_mem_alloc());
void *tt;
/* get some aligned space for the translation table */
if (!cma->alloc_aligned(sizeof(Translation_table), (void**)&tt,
Translation_table::ALIGNM_LOG2).is_ok()) {
PERR("failed to allocate kernel object");
throw Root::Quota_exceeded();
}
_tt = construct_at<Translation_table>(tt);
_tt_phys = (Translation_table*) cma->phys_addr(_tt);
_pslab = new (cma) Page_slab(cma);
Kernel::mtc()->map(_tt, _pslab);
/* create kernel object */
if (Kernel::new_pd(_kernel_pd, this)) {
PERR("failed to create kernel object");
throw Root::Unavailable();
}
}
/**
* Destructor
*/
~Platform_pd();
/**
* Bind thread 't' to protection domain
*
* \return 0 on success or
* -1 if failed
*/
int bind_thread(Platform_thread * t)
{
/* is this the first and therefore main thread in this PD? */
if (!_main_thread)
{
/* annotate that we've got a main thread from now on */
_main_thread = t->id();
return t->join_pd(this, 1, Address_space::weak_ptr());
}
return t->join_pd(this, 0, Address_space::weak_ptr());
}
/**
* Unbind thread 't' from protection domain
*/
void unbind_thread(Platform_thread *t) {
t->join_pd(nullptr, false, Address_space::weak_ptr()); }
/**
* Assign parent interface to protection domain
*/
int assign_parent(Native_capability parent)
{
if (!parent.valid()) {
PERR("parent invalid");
return -1;
}
_parent = parent;
return 0;
}
/***************
** Accessors **
***************/
Lock * lock() { return &_lock; }
char const * const label() { return _label; }
Page_slab * page_slab() { return _pslab; }
Translation_table * translation_table() { return _tt; }
Translation_table * translation_table_phys() { return _tt_phys; }
void page_slab(Page_slab *pslab) { _pslab = pslab; }
Kernel::Pd * kernel_pd() { return _kernel_pd; }
/*****************************
** Address-space interface **
*****************************/
void flush(addr_t, size_t);
};
/**
* Platform specific part of Core's protection domain
*/
class Core_platform_pd;
}
class Hw::Address_space : public Genode::Address_space
{
private:
friend class Genode::Platform;
friend class Genode::Core_mem_allocator::Mapped_mem_allocator;
Genode::Lock _lock; /* table lock */
Genode::Translation_table * _tt; /* table virtual addr. */
Genode::Translation_table * _tt_phys; /* table physical addr. */
Genode::Page_slab * _pslab; /* page table allocator */
Kernel::Pd * _kernel_pd;
static inline Genode::Core_mem_allocator * _cma();
static inline void * _tt_alloc();
protected:
/**
* Core-specific constructor
*
* \param pd pointer to kernel's pd object
* \param tt pointer to translation table
* \param slab pointer to page slab allocator
*/
Address_space(Kernel::Pd * pd,
Genode::Translation_table * tt,
Genode::Page_slab * slab);
public:
/**
* Constructor
*
* \param pd pointer to kernel's pd object
*/
Address_space(Kernel::Pd* pd);
/**
* Insert memory mapping into translation table of the address space
*
* \param virt virtual start address
* \param phys physical start address
* \param size size of memory region
* \param flags translation table flags (e.g. caching attributes)
*/
bool insert_translation(Genode::addr_t virt, Genode::addr_t phys,
Genode::size_t size, Genode::Page_flags flags);
/*****************************
** Address-space interface **
*****************************/
void flush(Genode::addr_t, Genode::size_t);
/***************
** Accessors **
***************/
Kernel::Pd * kernel_pd() { return _kernel_pd; }
Genode::Translation_table * translation_table() { return _tt; }
Genode::Translation_table * translation_table_phys() {
return _tt_phys; }
};
class Genode::Platform_pd : public Hw::Address_space
{
private:
Native_capability _parent;
bool _thread_associated = false;
char const * const _label;
uint8_t _kernel_object[sizeof(Kernel::Pd)];
protected:
/**
* Constructor for core pd
*
* \param tt translation table address
* \param slab page table allocator
*/
Platform_pd(Translation_table * tt, Page_slab * slab);
public:
/**
* Constructor for non-core pd
*
* \param label name of protection domain
*/
Platform_pd(Allocator * md_alloc, char const *label);
~Platform_pd();
/**
* Bind thread 't' to protection domain
*
* \return 0 on success or
* -1 if failed
*/
int bind_thread(Platform_thread * t);
/**
* Unbind thread 't' from protection domain
*/
void unbind_thread(Platform_thread *t);
/**
* Assign parent interface to protection domain
*/
int assign_parent(Native_capability parent);
/***************
** Accessors **
***************/
char const * const label() { return _label; }
};
class Genode::Core_platform_pd : public Genode::Platform_pd
{
private:
static inline Translation_table * const _table();
static inline Page_slab * const _slab();
/**
* Establish initial one-to-one mappings for core/kernel.
* This function avoids to take the core-pd's translation table
* lock in contrast to normal translation insertions to
* circumvent strex/ldrex problems in early bootstrap code
* on some ARM SoCs.
*
* \param start physical/virtual start address of area
* \param end physical/virtual end address of area
* \param io_mem true if it should be marked as device memory
*/
void _map(addr_t start, addr_t end, bool io_mem);
public:
Core_platform_pd();
};
#endif /* _CORE__INCLUDE__PLATFORM_PD_H_ */

View File

@ -39,13 +39,11 @@
/* base-hw includes */
#include <kernel/irq.h>
#include <kernel/perf_counter.h>
using namespace Kernel;
extern "C" void _core_start(void);
extern Genode::Native_thread_id _main_thread_id;
extern void * _start_secondary_cpus;
extern int _prog_img_beg;
extern int _prog_img_end;
extern void * _start_secondary_cpus;
static_assert(sizeof(Genode::sizet_arithm_t) >= 2 * sizeof(size_t),
"Bad result type for size_t arithmetics.");
@ -65,100 +63,6 @@ namespace Kernel
*/
void test();
/**
* Static kernel PD that describes core
*/
Pd * core_pd()
{
typedef Early_translations_slab Slab;
typedef Early_translations_allocator Allocator;
typedef Genode::Translation_table Table;
constexpr addr_t table_align = 1 << Table::ALIGNM_LOG2;
struct Core_pd : Platform_pd, Pd
{
/**
* Establish initial one-to-one mappings for core/kernel.
* This function avoids to take the core-pd's translation table
* lock in contrast to normal translation insertions to
* circumvent strex/ldrex problems in early bootstrap code
* on some ARM SoCs.
*
* \param start physical/virtual start address of area
* \param end physical/virtual end address of area
* \param io_mem true if it should be marked as device memory
*/
void map(addr_t start, addr_t end, bool io_mem)
{
using namespace Genode;
Translation_table *tt = Platform_pd::translation_table();
const Page_flags flags =
Page_flags::apply_mapping(true, io_mem ? UNCACHED : CACHED,
io_mem);
start = trunc_page(start);
size_t size = round_page(end) - start;
try {
tt->insert_translation(start, start, size, flags, page_slab());
} catch(Page_slab::Out_of_slabs) {
PERR("Not enough page slabs");
} catch(Allocator::Out_of_memory) {
PERR("Translation table needs to much RAM");
} catch(...) {
PERR("Invalid mapping %p -> %p (%zx)", (void*)start,
(void*)start, size);
}
}
/**
* Constructor
*/
Core_pd(Table * const table, Slab * const slab)
: Platform_pd(table, slab), Pd(table, this)
{
using namespace Genode;
Platform_pd::_kernel_pd = this;
/* map exception vector for core */
Kernel::mtc()->map(table, slab);
/* map core's program image */
map((addr_t)&_prog_img_beg, (addr_t)&_prog_img_end, false);
/* map core's mmio regions */
Native_region * r = Platform::_core_only_mmio_regions(0);
for (unsigned i = 0; r;
r = Platform::_core_only_mmio_regions(++i))
map(r->base, r->base + r->size, true);
}
};
Allocator * const alloc = unmanaged_singleton<Allocator>();
Table * const table = unmanaged_singleton<Table, table_align>();
Slab * const slab = unmanaged_singleton<Slab, Slab::ALIGN>(alloc);
return unmanaged_singleton<Core_pd>(table, slab);
}
/**
* Get attributes of the mode transition region in every PD
*/
addr_t mode_transition_base() { return mtc()->VIRT_BASE; }
size_t mode_transition_size() { return mtc()->SIZE; }
/**
* Get attributes of the kernel objects
*/
size_t thread_size() { return sizeof(Thread); }
size_t signal_context_size() { return sizeof(Signal_context); }
size_t signal_receiver_size() { return sizeof(Signal_receiver); }
unsigned pd_alignm_log2() { return Genode::Translation_table::ALIGNM_LOG2; }
size_t pd_size() { return sizeof(Genode::Translation_table) + sizeof(Pd); }
enum { STACK_SIZE = 64 * 1024 };
/**
@ -169,15 +73,17 @@ namespace Kernel
static Lock s;
return s;
}
addr_t core_tt_base;
unsigned core_pd_id;
}
Kernel::Id_allocator & Kernel::id_alloc() {
return *unmanaged_singleton<Id_allocator>(); }
Pd * Kernel::core_pd() {
return unmanaged_singleton<Genode::Core_platform_pd>()->kernel_pd(); }
Pic * Kernel::pic() { return unmanaged_singleton<Pic>(); }

View File

@ -116,7 +116,7 @@ Platform::Platform()
_vm_start(VIRT_ADDR_SPACE_START), _vm_size(VIRT_ADDR_SPACE_SIZE)
{
static Page_slab pslab(&_core_mem_alloc);
Kernel::core_pd()->platform_pd()->page_slab(&pslab);
Kernel::core_pd()->platform_pd()->_pslab = &pslab;
_core_mem_allocator = &_core_mem_alloc;
/*
@ -197,48 +197,17 @@ void Core_parent::exit(int exit_value)
bool Genode::map_local(addr_t from_phys, addr_t to_virt, size_t num_pages,
Page_flags flags)
{
Translation_table *tt = Kernel::core_pd()->translation_table();
try {
for (;;) {
try {
Lock::Guard guard(*Kernel::core_pd()->platform_pd()->lock());
tt->insert_translation(to_virt, from_phys,
num_pages * get_page_size(), flags,
Kernel::core_pd()->platform_pd()->page_slab());
return true;
} catch(Page_slab::Out_of_slabs) {
PDBG("Page_slab::Out_of_slabs");
Kernel::core_pd()->platform_pd()->page_slab()->alloc_slab_block();
}
}
} catch(Allocator::Out_of_memory) {
PERR("Translation table needs to much RAM");
} catch(...) {
PERR("Invalid mapping %p -> %p (%zx)", (void*)from_phys, (void*)to_virt,
get_page_size() * num_pages);
}
return false;
Platform_pd * pd = Kernel::core_pd()->platform_pd();
return pd->insert_translation(to_virt, from_phys,
num_pages * get_page_size(), flags);
}
bool Genode::unmap_local(addr_t virt_addr, size_t num_pages)
{
try {
Lock::Guard guard(*Kernel::core_pd()->platform_pd()->lock());
Translation_table *tt = Kernel::core_pd()->translation_table();
tt->remove_translation(virt_addr, num_pages * get_page_size(),
Kernel::core_pd()->platform_pd()->page_slab());
/* update translation caches of all CPUs */
Kernel::update_pd(Kernel::core_pd());
return true;
} catch(...) {
PERR("tried to remove invalid region!");
}
return false;
Platform_pd * pd = Kernel::core_pd()->platform_pd();
pd->flush(virt_addr, num_pages * get_page_size());
return true;
}
@ -246,7 +215,7 @@ bool Core_mem_allocator::Mapped_mem_allocator::_map_local(addr_t virt_addr,
addr_t phys_addr,
unsigned size)
{
Genode::Page_slab * slab = Kernel::core_pd()->platform_pd()->page_slab();
Genode::Page_slab * slab = Kernel::core_pd()->platform_pd()->_pslab;
slab->backing_store(_core_mem_allocator->raw());
bool ret = ::map_local(phys_addr, virt_addr, size / get_page_size());
slab->backing_store(_core_mem_allocator);
@ -257,7 +226,7 @@ bool Core_mem_allocator::Mapped_mem_allocator::_map_local(addr_t virt_addr,
bool Core_mem_allocator::Mapped_mem_allocator::_unmap_local(addr_t virt_addr,
unsigned size)
{
Genode::Page_slab * slab = Kernel::core_pd()->platform_pd()->page_slab();
Genode::Page_slab * slab = Kernel::core_pd()->platform_pd()->_pslab;
slab->backing_store(_core_mem_allocator->raw());
bool ret = ::unmap_local(virt_addr, size / get_page_size());
slab->backing_store(_core_mem_allocator);

View File

@ -15,26 +15,191 @@
/* core includes */
#include <platform_pd.h>
extern int _prog_img_beg;
extern int _prog_img_end;
using namespace Genode;
void Platform_pd::flush(addr_t virt_base, size_t size)
/**************************************
** Hw::Address_space implementation **
**************************************/
Core_mem_allocator * Hw::Address_space::_cma() {
return static_cast<Core_mem_allocator*>(platform()->core_mem_alloc()); }
void * Hw::Address_space::_tt_alloc()
{
Lock::Guard guard(*lock());
void * ret;
if (!_cma()->alloc_aligned(sizeof(Translation_table), (void**)&ret,
Translation_table::ALIGNM_LOG2).is_ok())
throw Root::Quota_exceeded();
return ret;
}
if (_tt) _tt->remove_translation(virt_base, size, page_slab());
/* update translation caches */
Kernel::update_pd(kernel_pd());
bool Hw::Address_space::insert_translation(addr_t virt, addr_t phys,
size_t size, Page_flags flags)
{
try {
for (;;) {
try {
Lock::Guard guard(_lock);
_tt->insert_translation(virt, phys, size, flags, _pslab);
return true;
} catch(Page_slab::Out_of_slabs) {
_pslab->alloc_slab_block();
}
}
} catch(Allocator::Out_of_memory) {
PWRN("Translation table needs to much RAM");
} catch(...) {
PERR("Invalid mapping %p -> %p (%zx)", (void*)phys, (void*)virt, size);
}
return false;
}
void Hw::Address_space::flush(addr_t virt, size_t size)
{
Lock::Guard guard(_lock);
try {
if (_tt) _tt->remove_translation(virt, size, _pslab);
/* update translation caches */
Kernel::update_pd(_kernel_pd);
} catch(...) {
PERR("tried to remove invalid region!");
}
}
Hw::Address_space::Address_space(Kernel::Pd* pd, Translation_table * tt,
Page_slab * slab)
: _tt(tt), _tt_phys(tt), _pslab(slab), _kernel_pd(pd) { }
Hw::Address_space::Address_space(Kernel::Pd * pd)
: _tt(construct_at<Translation_table>(_tt_alloc())),
_tt_phys(reinterpret_cast<Translation_table*>(_cma()->phys_addr(_tt))),
_pslab(new (_cma()) Page_slab(_cma())),
_kernel_pd(pd)
{
Lock::Guard guard(_lock);
Kernel::mtc()->map(_tt, _pslab);
}
/********************************
** Platform_pd implementation **
********************************/
int Platform_pd::bind_thread(Platform_thread * t)
{
/* is this the first and therefore main thread in this PD? */
bool main_thread = !_thread_associated;
_thread_associated = true;
return t->join_pd(this, main_thread, Address_space::weak_ptr());
}
void Platform_pd::unbind_thread(Platform_thread *t) {
t->join_pd(nullptr, false, Address_space::weak_ptr()); }
int Platform_pd::assign_parent(Native_capability parent)
{
if (!parent.valid()) {
PERR("parent invalid");
return -1;
}
_parent = parent;
return 0;
}
Platform_pd::Platform_pd(Translation_table * tt, Page_slab * slab)
: Hw::Address_space(reinterpret_cast<Kernel::Pd*>(&_kernel_object), tt, slab),
_label("core") { new (&_kernel_object) Kernel::Pd(tt, this); }
Platform_pd::Platform_pd(Allocator * md_alloc, char const *label)
: Hw::Address_space(reinterpret_cast<Kernel::Pd*>(&_kernel_object)),
_label(label)
{
/* create kernel object */
if (Kernel::new_pd(reinterpret_cast<Kernel::Pd*>(_kernel_object), this)) {
PERR("failed to create kernel object");
throw Root::Unavailable();
}
}
Platform_pd::~Platform_pd()
{
Lock::Guard guard(_lock);
Kernel::delete_pd(_kernel_pd);
_tt->remove_translation(platform()->vm_start(), platform()->vm_size(),
_pslab);
Kernel::delete_pd(kernel_pd());
flush(platform()->vm_start(), platform()->vm_size());
/* TODO: destroy page slab and translation table!!! */
}
/*************************************
** Core_platform_pd implementation **
*************************************/
Translation_table * const Core_platform_pd::_table()
{
return unmanaged_singleton<Translation_table,
1 << Translation_table::ALIGNM_LOG2>();
}
Page_slab * const Core_platform_pd::_slab()
{
using Slab = Kernel::Early_translations_slab;
using Allocator = Kernel::Early_translations_allocator;
return unmanaged_singleton<Slab,
Slab::ALIGN>(unmanaged_singleton<Allocator>());
}
void Core_platform_pd::_map(addr_t start, addr_t end, bool io_mem)
{
const Page_flags flags =
Page_flags::apply_mapping(true, io_mem ? UNCACHED : CACHED, io_mem);
start = trunc_page(start);
size_t size = round_page(end) - start;
try {
_table()->insert_translation(start, start, size, flags, _slab());
} catch(Page_slab::Out_of_slabs) {
PERR("Not enough page slabs");
} catch(Allocator::Out_of_memory) {
PERR("Translation table needs to much RAM");
} catch(...) {
PERR("Invalid mapping %p -> %p (%zx)", (void*)start,
(void*)start, size);
}
}
Core_platform_pd::Core_platform_pd()
: Platform_pd(_table(), _slab())
{
/* map exception vector for core */
Kernel::mtc()->map(_table(), _slab());
/* map core's program image */
_map((addr_t)&_prog_img_beg, (addr_t)&_prog_img_end, false);
/* map core's mmio regions */
Native_region * r = Platform::_core_only_mmio_regions(0);
for (unsigned i = 0; r;
r = Platform::_core_only_mmio_regions(++i))
_map(r->base, r->base + r->size, true);
}

View File

@ -44,37 +44,15 @@ void Rm_client::unmap(addr_t, addr_t virt_base, size_t size)
int Pager_activation_base::apply_mapping()
{
/* prepare mapping */
Platform_pd * const pd = (Platform_pd*)_fault.pd;
Lock::Guard guard(*pd->lock());
Translation_table * const tt = pd->translation_table();
Page_slab * page_slab = pd->page_slab();
Page_flags const flags =
Page_flags::apply_mapping(_mapping.writable,
_mapping.cacheable,
_mapping.io_mem);
Platform_pd * const pd = (Platform_pd*)_fault.pd;
/* insert mapping into translation table */
try {
for (unsigned retry = 0; retry < 2; retry++) {
try {
tt->insert_translation(_mapping.virt_address, _mapping.phys_address,
1 << _mapping.size_log2, flags, page_slab);
return 0;
} catch(Page_slab::Out_of_slabs) {
page_slab->alloc_slab_block();
}
}
} catch(Allocator::Out_of_memory) {
PERR("Translation table needs to much RAM");
} catch(...) {
PERR("Invalid mapping %p -> %p (%lx)", (void*)_mapping.phys_address,
(void*)_mapping.virt_address, 1UL << _mapping.size_log2);
}
return -1;
return (pd->insert_translation(_mapping.virt_address,
_mapping.phys_address,
1 << _mapping.size_log2, flags)) ? 0 : 1;
}