sel4: management of core's virtual memory

This commit is contained in:
Norman Feske 2015-05-05 00:15:50 +02:00 committed by Christian Helmuth
parent 1f5cfef64e
commit e6ad346e24
13 changed files with 943 additions and 149 deletions

View File

@ -0,0 +1,149 @@
/*
* \brief Support code for the thread API
* \author Norman Feske
* \author Stefan Kalkowski
* \date 2010-01-13
*/
/*
* Copyright (C) 2010-2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <rm_session/rm_session.h>
#include <ram_session/ram_session.h>
#include <base/printf.h>
#include <base/thread.h>
/* local includes */
#include <platform.h>
#include <map_local.h>
#include <dataspace_component.h>
using namespace Genode;
/**
* Region-manager session for allocating thread contexts
*
* This class corresponds to the managed dataspace that is normally
* used for organizing thread contexts with the thread context area.
* In contrast to the ordinary implementation, core's version does
* not split between allocation of memory and virtual memory management.
* Due to the missing availability of "real" dataspaces and capabilities
* refering to it without having an entrypoint in place, the allocation
* of a dataspace has no effect, but the attachment of the thereby "empty"
* dataspace is doing both: allocation and attachment.
*/
class Context_area_rm_session : public Rm_session
{
private:
using Ds_slab = Synchronized_allocator<Tslab<Dataspace_component,
get_page_size()> >;
Ds_slab _ds_slab { platform()->core_mem_alloc() };
enum { verbose = false };
public:
/**
* Allocate and attach on-the-fly backing store to thread-context area
*/
Local_addr attach(Dataspace_capability ds_cap, /* ignored capability */
size_t size, off_t offset,
bool use_local_addr, Local_addr local_addr,
bool executable)
{
size = round_page(size);
/* allocate physical memory */
Untyped_address const untyped_addr =
Untyped_memory::alloc(*platform_specific()->ram_alloc(), size);
Untyped_memory::convert_to_page_frames(untyped_addr.phys(),
size >> get_page_size_log2());
Dataspace_component *ds = new (&_ds_slab)
Dataspace_component(size, 0, untyped_addr.phys(), CACHED, true, 0);
if (!ds) {
PERR("dataspace for core context does not exist");
return (addr_t)0;
}
addr_t const core_local_addr =
Native_config::context_area_virtual_base() + (addr_t)local_addr;
if (verbose)
PDBG("core_local_addr = %lx, phys_addr = %lx, size = 0x%zx",
core_local_addr, ds->phys_addr(), ds->size());
if (!map_local(ds->phys_addr(), core_local_addr,
ds->size() >> get_page_size_log2())) {
PERR("could not map phys %lx at local %lx",
ds->phys_addr(), core_local_addr);
return (addr_t)0;
}
ds->assign_core_local_addr((void*)core_local_addr);
return local_addr;
}
void detach(Local_addr local_addr) { PWRN("Not implemented!"); }
Pager_capability add_client(Thread_capability) {
return Pager_capability(); }
void remove_client(Pager_capability) { }
void fault_handler(Signal_context_capability) { }
State state() { return State(); }
Dataspace_capability dataspace() { return Dataspace_capability(); }
};
class Context_area_ram_session : public Ram_session
{
public:
Ram_dataspace_capability alloc(size_t size, Cache_attribute cached) {
return reinterpret_cap_cast<Ram_dataspace>(Native_capability()); }
void free(Ram_dataspace_capability ds) {
PWRN("Not implemented!"); }
int ref_account(Ram_session_capability ram_session) { return 0; }
int transfer_quota(Ram_session_capability ram_session, size_t amount) {
return 0; }
size_t quota() { return 0; }
size_t used() { return 0; }
};
/**
* Return single instance of the context-area RM and RAM session
*/
namespace Genode {
Rm_session *env_context_area_rm_session()
{
static Context_area_rm_session inst;
return &inst;
}
Ram_session *env_context_area_ram_session()
{
static Context_area_ram_session inst;
return &inst;
}
}

View File

@ -15,11 +15,12 @@
#define _CORE__INCLUDE__CNODE_H_
/* Genode includes */
#include <util/noncopyable.h>
#include <base/exception.h>
#include <base/allocator.h>
/* core includes */
#include <untyped_address.h>
#include <untyped_memory.h>
namespace Genode {
@ -100,17 +101,17 @@ class Genode::Cnode_base
};
class Genode::Cnode : public Cnode_base
class Genode::Cnode : public Cnode_base, Noncopyable
{
public:
class Phys_alloc_failed : Exception { };
class Untyped_lookup_failed : Exception { };
class Retype_untyped_failed : Exception { };
/**
* Constructor
*
* \param parent selector of CNode where to place 'dst_sel'
* \param dst_sel designated selector referring to the created
* CNode
* \param size_log2 number of entries in CNode
@ -120,34 +121,19 @@ class Genode::Cnode : public Cnode_base
* \throw Phys_alloc_failed
* \throw Untyped_address::Lookup_failed
*/
Cnode(unsigned dst_sel, size_t size_log2, Range_allocator &phys_alloc)
Cnode(unsigned parent_sel, unsigned dst_sel, size_t size_log2,
Range_allocator &phys_alloc)
:
Cnode_base(dst_sel, size_log2)
{
/*
* Allocate natually-aligned physical memory for cnode
*
* The natual alignment is needed to ensure that the backing store is
* contained in a single untyped memory region.
*/
void *out_ptr = nullptr;
size_t const mem_size = 1UL << mem_size_log2();
Range_allocator::Alloc_return alloc_ret =
phys_alloc.alloc_aligned(mem_size, &out_ptr, mem_size_log2());
addr_t const phys_addr = (addr_t)out_ptr;
if (alloc_ret.is_error()) {
PERR("%s: allocation of backing store for cnode failed", __FUNCTION__);
throw Phys_alloc_failed();
}
Untyped_address const untyped_addr(phys_addr, mem_size);
Untyped_address const untyped_addr =
Untyped_memory::alloc_log2(phys_alloc, mem_size_log2());
seL4_Untyped const service = untyped_addr.sel();
int const type = seL4_CapTableObject;
int const offset = untyped_addr.offset();
int const size_bits = size_log2;
seL4_CNode const root = seL4_CapInitThreadCNode;
seL4_CNode const root = parent_sel;
int const node_index = 0;
int const node_depth = 0;
int const node_offset = dst_sel;

View File

@ -0,0 +1,53 @@
/*
* \brief Core's CSpace layout definition
* \author Norman Feske
* \date 2015-05-06
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _CORE__INCLUDE__CORE_CSPACE_H_
#define _CORE__INCLUDE__CORE_CSPACE_H_
namespace Genode { class Core_cspace; }
class Genode::Core_cspace
{
public:
/* CNode dimensions */
enum {
NUM_TOP_SEL_LOG2 = 12UL,
NUM_CORE_SEL_LOG2 = 14UL,
NUM_PHYS_SEL_LOG2 = 20UL,
NUM_CORE_PAD_SEL_LOG2 = 32UL - NUM_TOP_SEL_LOG2 - NUM_CORE_SEL_LOG2,
};
/* selectors for statically created CNodes */
enum Static_cnode_sel {
TOP_CNODE_SEL = 0x200,
CORE_PAD_CNODE_SEL = 0x201,
CORE_CNODE_SEL = 0x202,
PHYS_CNODE_SEL = 0x203,
CORE_VM_PAD_CNODE_SEL = 0x204,
CORE_VM_CNODE_SEL = 0x205,
};
/* indices within top-level CNode */
enum Top_cnode_idx {
TOP_CNODE_CORE_IDX = 0,
TOP_CNODE_PHYS_IDX = 0xfff
};
enum { CORE_VM_ID = 1 };
};
#endif /* _CORE__INCLUDE__CORE_CSPACE_H_ */

View File

@ -19,6 +19,7 @@
/* core includes */
#include <util.h>
#include <platform_generic.h>
namespace Genode {
@ -34,9 +35,12 @@ namespace Genode {
*/
inline bool map_local(addr_t from_phys, addr_t to_virt, size_t num_pages)
{
PDBG("not implemented");
PDBG("map_local from_phys=0x%lx, to_virt=0x%lx, num_pages=%zd",
from_phys, to_virt, num_pages);
return false;
platform_specific()->core_vm_space().map(from_phys, to_virt, num_pages);
return true;
}

View File

@ -0,0 +1,177 @@
/*
* \brief Associate page-table and frame selectors with virtual addresses
* \author Norman Feske
* \date 2015-05-04
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _CORE__INCLUDE__PAGE_TABLE_REGISTRY_H_
#define _CORE__INCLUDE__PAGE_TABLE_REGISTRY_H_
/* Genode includes */
#include <util/list.h>
#include <base/exception.h>
#include <base/tslab.h>
/* core includes */
#include <util.h>
namespace Genode { class Page_table_registry; }
class Genode::Page_table_registry
{
public:
class Lookup_failed : Exception { };
private:
/*
* XXX use AVL tree (with virtual address as key) instead of list
*/
class Page_table : public List<Page_table>::Element
{
public:
struct Entry : List<Entry>::Element
{
addr_t const addr;
unsigned const sel;
Entry(addr_t addr, unsigned sel) : addr(addr), sel(sel) { }
};
addr_t const addr;
private:
List<Entry> _entries;
static addr_t _page_frame_base(addr_t addr)
{
return addr & get_page_mask();
}
bool _entry_exists(addr_t addr) const
{
for (Entry const *e = _entries.first(); e; e = e->next()) {
if (_page_frame_base(e->addr) == _page_frame_base(addr))
return true;
}
return false;
}
public:
Page_table(addr_t addr) : addr(addr) { }
void insert_entry(Allocator &entry_slab, addr_t addr, unsigned sel)
{
if (_entry_exists(addr)) {
PWRN("trying to insert page frame for 0x%lx twice", addr);
return;
}
_entries.insert(new (entry_slab) Entry(addr, sel));
}
};
class Slab_block : public Genode::Slab_block { long _data[4*1024]; };
template <typename T>
struct Slab : Genode::Tslab<T, sizeof(Slab_block)>
{
Slab_block _initial_block;
Slab(Allocator &md_alloc)
:
Tslab<T, sizeof(Slab_block)>(&md_alloc, &_initial_block)
{ }
};
Slab<Page_table> _page_table_slab;
Slab<Page_table::Entry> _page_table_entry_slab;
List<Page_table> _page_tables;
static addr_t _page_table_base(addr_t addr)
{
return addr & ~(4*1024*1024 - 1);
}
bool _page_table_exists(addr_t addr) const
{
for (Page_table const *pt = _page_tables.first(); pt; pt = pt->next()) {
if (_page_table_base(pt->addr) == _page_table_base(addr))
return true;
}
return false;
}
Page_table &_lookup(addr_t addr)
{
for (Page_table *pt = _page_tables.first(); pt; pt = pt->next()) {
if (_page_table_base(pt->addr) == _page_table_base(addr))
return *pt;
}
PDBG("page-table lookup failed");
throw Lookup_failed();
}
public:
/**
* Constructor
*
* \param md_alloc backing store allocator for metadata
*/
Page_table_registry(Allocator &md_alloc)
:
_page_table_slab(md_alloc),
_page_table_entry_slab(md_alloc)
{ }
/**
* Register page table
*
* \param addr virtual address
* \param sel page-table selector
*/
void insert_page_table(addr_t addr, unsigned sel)
{
if (_page_table_exists(addr)) {
PWRN("trying to insert page table for 0x%lx twice", addr);
return;
}
_page_tables.insert(new (_page_table_slab) Page_table(addr));
}
bool has_page_table_at(addr_t addr) const
{
return _page_table_exists(addr);
}
/**
* Register page table entry
*
* \param addr virtual address
* \param sel page frame selector
*
* \throw Lookup_failed no page table for given address
*/
void insert_page_table_entry(addr_t addr, unsigned sel)
{
_lookup(addr).insert_entry(_page_table_entry_slab, addr, sel);
}
};
#endif /* _CORE__INCLUDE__PAGE_TABLE_REGISTRY_H_ */

View File

@ -20,6 +20,8 @@
/* local includes */
#include <platform_generic.h>
#include <core_mem_alloc.h>
#include <vm_space.h>
#include <core_cspace.h>
namespace Genode { class Platform; }
@ -36,12 +38,67 @@ class Genode::Platform : public Platform_generic
Phys_allocator _irq_alloc; /* IRQ allocator */
Rom_fs _rom_fs; /* ROM file system */
/**
* Shortcut for physical memory allocator
*/
Range_allocator &_phys_alloc = *_core_mem_alloc.phys_alloc();
/**
* Virtual address range usable by non-core processes
*/
addr_t _vm_base;
size_t _vm_size;
/**
* Initialize core allocators
*/
void _init_allocators();
bool _init_allocators_done;
/*
* Until this point, no interaction with the seL4 kernel was needed.
* However, the next steps involve the invokation of system calls and
* the use of kernel services. To use the kernel bindings, we first
* need to initialize the TLS mechanism that is used to find the IPC
* buffer for the calling thread.
*/
bool _init_sel4_ipc_buffer_done;
/* allocate 1st-level CNode */
Cnode _top_cnode { seL4_CapInitThreadCNode, Core_cspace::TOP_CNODE_SEL,
Core_cspace::NUM_TOP_SEL_LOG2, _phys_alloc };
/* allocate 2nd-level CNode to align core's CNode with the LSB of the CSpace*/
Cnode _core_pad_cnode { seL4_CapInitThreadCNode, Core_cspace::CORE_PAD_CNODE_SEL,
Core_cspace::NUM_CORE_PAD_SEL_LOG2,
_phys_alloc };
/* allocate 3rd-level CNode for core's objects */
Cnode _core_cnode { seL4_CapInitThreadCNode, Core_cspace::CORE_CNODE_SEL,
Core_cspace::NUM_CORE_SEL_LOG2, _phys_alloc };
/* allocate 2nd-level CNode for storing page-frame cap selectors */
Cnode _phys_cnode { seL4_CapInitThreadCNode, Core_cspace::PHYS_CNODE_SEL,
Core_cspace::NUM_PHYS_SEL_LOG2, _phys_alloc };
/**
* Replace initial CSpace with custom CSpace layout
*/
void _switch_to_core_cspace();
bool _switch_to_core_cspace_done;
Page_table_registry _core_page_table_registry;
/**
* Pre-populate core's '_page_table_registry' with the information
* about the initial page tables and page frames as set up by the
* kernel
*/
void _init_core_page_table_registry();
bool _init_core_page_table_registry_done;
Vm_space _core_vm_space;
int _init_rom_fs();
public:
@ -66,6 +123,11 @@ class Genode::Platform : public Platform_generic
size_t vm_size() const { return _vm_size; }
Rom_fs *rom_fs() { return &_rom_fs; }
Cnode &phys_cnode() { return _phys_cnode; }
Cnode &top_cnode() { return _top_cnode; }
Vm_space &core_vm_space() { return _core_vm_space; }
void wait_for_exit();
};

View File

@ -15,7 +15,6 @@
#define _CORE__INCLUDE__UNTYPED_ADDRESS_H_
/* seL4 includes */
#include <sel4/interfaces/sel4_client.h>
#include <sel4/bootinfo.h>
namespace Genode { struct Untyped_address; }
@ -44,11 +43,11 @@ class Genode::Untyped_address
seL4_Untyped _sel = 0;
addr_t _offset = 0;
addr_t _phys = 0;
void _init(seL4_BootInfo const &bi, addr_t phys_addr, size_t size,
unsigned const start_idx, unsigned const num_idx)
{
for (unsigned i = start_idx; i < start_idx + num_idx; i++) {
/* index into 'untypedPaddrList' and 'untypedSizeBitsList' */
@ -78,6 +77,8 @@ class Genode::Untyped_address
*/
Untyped_address(addr_t phys_addr, size_t size)
{
_phys = phys_addr;
seL4_BootInfo const &bi = sel4_boot_info();
_init(bi, phys_addr, size, bi.untyped.start,
bi.untyped.end - bi.untyped.start);
@ -92,6 +93,7 @@ class Genode::Untyped_address
unsigned sel() const { return _sel; }
addr_t offset() const { return _offset; }
addr_t phys() const { return _phys; }
};

View File

@ -0,0 +1,127 @@
/*
* \brief Utilities for dealing with untyped memory
* \author Norman Feske
* \date 2015-05-06
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _CORE__INCLUDE__UNTYPED_MEMORY_H_
#define _CORE__INCLUDE__UNTYPED_MEMORY_H_
/* Genode includes */
#include <base/allocator.h>
/* core includes */
#include <util.h>
#include <untyped_address.h>
/* seL4 includes */
#include <sel4/interfaces/sel4_client.h>
namespace Genode { struct Untyped_memory; }
struct Genode::Untyped_memory
{
class Phys_alloc_failed : Exception { };
/**
* Allocate natually-aligned physical memory for seL4 kernel object
*
* \throw Phys_alloc_failed
*/
static inline Untyped_address alloc_log2(Range_allocator &phys_alloc,
size_t const size_log2)
{
/*
* The natual alignment is needed to ensure that the backing store is
* contained in a single untyped memory region.
*/
void *out_ptr = nullptr;
size_t const size = 1UL << size_log2;
Range_allocator::Alloc_return alloc_ret =
phys_alloc.alloc_aligned(size, &out_ptr, size_log2);
addr_t const phys_addr = (addr_t)out_ptr;
if (alloc_ret.is_error()) {
PERR("%s: allocation of untyped memory failed", __FUNCTION__);
throw Phys_alloc_failed();
}
return Untyped_address(phys_addr, size);
}
/**
* Allocate natually aligned physical memory
*
* \param size size in bytes
*/
static inline Untyped_address alloc(Range_allocator &phys_alloc,
size_t const size)
{
if (size == 0) {
PERR("%s: invalid size of 0x%zd", __FUNCTION__, size);
throw Phys_alloc_failed();
}
/* calculate next power-of-two size that fits the allocation size */
size_t const size_log2 = log2(size - 1) + 1;
return alloc_log2(phys_alloc, size_log2);
}
/**
* Create page frames from untyped memory
*/
static inline void convert_to_page_frames(addr_t phys_addr,
size_t num_pages)
{
size_t const size = num_pages << get_page_size_log2();
/* align allocation offset to page boundary */
Untyped_address const untyped_address(align_addr(phys_addr, 12), size);
seL4_Untyped const service = untyped_address.sel();
int const type = seL4_IA32_4K;
int const offset = untyped_address.offset();
int const size_bits = 0;
seL4_CNode const root = Core_cspace::TOP_CNODE_SEL;
int const node_index = Core_cspace::TOP_CNODE_PHYS_IDX;
int const node_depth = Core_cspace::NUM_TOP_SEL_LOG2;
int const node_offset = phys_addr >> get_page_size_log2();
int const num_objects = num_pages;
PDBG("create frame idx %x", node_offset);
int const ret = seL4_Untyped_RetypeAtOffset(service,
type,
offset,
size_bits,
root,
node_index,
node_depth,
node_offset,
num_objects);
if (ret != 0) {
PERR("%s: seL4_Untyped_RetypeAtOffset (IA32_4K) returned %d",
__FUNCTION__, ret);
}
}
static inline unsigned frame_sel(addr_t phys_addr)
{
return (Core_cspace::TOP_CNODE_PHYS_IDX << Core_cspace::NUM_PHYS_SEL_LOG2)
| (phys_addr >> get_page_size_log2());
}
};
#endif /* _CORE__INCLUDE__UNTYPED_MEMORY_H_ */

View File

@ -18,6 +18,10 @@
#include <rm_session/rm_session.h>
#include <base/printf.h>
/* core includes */
#include <core_cspace.h>
namespace Genode {
constexpr size_t get_page_size_log2() { return 12; }
@ -29,6 +33,7 @@ namespace Genode {
inline addr_t map_src_addr(addr_t core_local, addr_t phys) { return phys; }
inline size_t constrain_map_size_log2(size_t size_log2) { return get_page_size_log2(); }
inline void print_page_fault(const char *msg, addr_t pf_addr, addr_t pf_ip,
Rm_session::Fault_type pf_type,
unsigned long faulter_badge)

View File

@ -0,0 +1,215 @@
/*
* \brief Virtual-memory space
* \author Norman Feske
* \date 2015-05-04
*/
/*
* Copyright (C) 2015 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _CORE__INCLUDE__VM_SPACE_H_
#define _CORE__INCLUDE__VM_SPACE_H_
/* Genode includes */
#include <util/bit_allocator.h>
/* core includes */
#include <page_table_registry.h>
#include <cnode.h>
namespace Genode { class Vm_space; }
class Genode::Vm_space
{
private:
Page_table_registry &_page_table_registry;
unsigned const _id;
Range_allocator &_phys_alloc;
/**
* Maximum number of page tables and page frames for the VM space
*/
enum { NUM_VM_SEL_LOG2 = 13 };
Cnode &_top_level_cnode;
Cnode &_phys_cnode;
/**
* 2nd-level CNode for aligning '_vm_cnode' with the LSB of the CSpace
*/
Cnode _vm_pad_cnode;
/**
* 3rd-level CNode for storing page-table and page-frame capabilities
*/
Cnode _vm_cnode;
/**
* Allocator for the selectors within '_vm_cnode'
*/
Bit_allocator<1UL << NUM_VM_SEL_LOG2> _sel_alloc;
/**
* Return selector for a capability slot within '_vm_cnode'
*/
unsigned _idx_to_sel(unsigned idx) const { return (_id << 20) | idx; }
void _map_page(addr_t from_phys, addr_t to_virt)
{
/* allocate page-table entry selector */
unsigned pte_idx = _sel_alloc.alloc();
/*
* Copy page-frame selector to pte_sel
*
* This is needed because each page-frame selector can be
* inserted into only a single page table.
*/
_vm_cnode.copy(_phys_cnode, from_phys >> get_page_size_log2(), pte_idx);
/* remember relationship between pte_sel and the virtual address */
_page_table_registry.insert_page_table_entry(to_virt, pte_idx);
/*
* Insert copy of page-frame selector into page table
*/
{
seL4_IA32_Page const service = _idx_to_sel(pte_idx);
seL4_IA32_PageDirectory const pd = seL4_CapInitThreadPD;
seL4_Word const vaddr = to_virt;
seL4_CapRights const rights = seL4_AllRights;
seL4_IA32_VMAttributes const attr = seL4_IA32_Default_VMAttributes;
int const ret = seL4_IA32_Page_Map(service, pd, vaddr, rights, attr);
if (ret != 0)
PERR("seL4_IA32_Page_Map to 0x%lx returned %d",
(unsigned long)vaddr, ret);
}
}
void _map_page_table(unsigned pt_sel, addr_t to_virt)
{
seL4_IA32_PageTable const service = pt_sel;
seL4_IA32_PageDirectory const pd = seL4_CapInitThreadPD;
seL4_Word const vaddr = to_virt;
seL4_IA32_VMAttributes const attr = seL4_IA32_Default_VMAttributes;
int const ret = seL4_IA32_PageTable_Map(service, pd, vaddr, attr);
if (ret != 0)
PDBG("seL4_IA32_PageTable_Map returned %d", ret);
}
class Alloc_page_table_failed : Exception { };
/**
* Allocate and install page table at given virtual address
*
* \throw Alloc_page_table_failed
*/
void _alloc_and_map_page_table(addr_t to_virt)
{
/* allocate page-table selector */
unsigned const pt_idx = _sel_alloc.alloc();
/* XXX account the consumed backing store */
/* allocate backing store for page table */
size_t const pt_mem_size_log2 = 12;
Untyped_address const untyped_addr =
Untyped_memory::alloc_log2(_phys_alloc, pt_mem_size_log2);
seL4_Untyped const service = untyped_addr.sel();
int const type = seL4_IA32_PageTableObject;
int const offset = untyped_addr.offset();
int const size_bits = pt_mem_size_log2;
seL4_CNode const root = _vm_cnode.sel();
int const node_index = 0;
int const node_depth = 0;
int const node_offset = pt_idx;
int const num_objects = 1;
int const ret = seL4_Untyped_RetypeAtOffset(service,
type,
offset,
size_bits,
root,
node_index,
node_depth,
node_offset,
num_objects);
if (ret != 0) {
PDBG("seL4_Untyped_RetypeAtOffset (page table) returned %d", ret);
throw Alloc_page_table_failed();
}
unsigned const pt_sel = _idx_to_sel(pt_idx);
_page_table_registry.insert_page_table(to_virt, pt_sel);
_map_page_table(pt_sel, to_virt);
}
public:
/**
* Constructor
*
* \param vm_pad_cnode_sel selector for the (2nd-level) VM pad CNode
* \param vm_cnode_sel selector for the (3rd-level) VM CNode
* \param phys_alloc backing store for the CNodes
* \param top_level_cnode top-level CNode to insert 'vm_pad_cnode_sel'
* \param id ID used as index in 'top_level_cnode'
* \param page_table_registry association of VM CNode selectors with
* with virtual addresses
*/
Vm_space(unsigned vm_pad_cnode_sel,
unsigned vm_cnode_sel,
Range_allocator &phys_alloc,
Cnode &top_level_cnode,
Cnode &core_cnode,
Cnode &phys_cnode,
unsigned id,
Page_table_registry &page_table_registry)
:
_page_table_registry(page_table_registry), _id(id),
_phys_alloc(phys_alloc),
_top_level_cnode(top_level_cnode),
_phys_cnode(phys_cnode),
_vm_pad_cnode(core_cnode.sel(), vm_pad_cnode_sel,
32 - 12 - NUM_VM_SEL_LOG2, phys_alloc),
_vm_cnode(core_cnode.sel(), vm_cnode_sel, NUM_VM_SEL_LOG2, phys_alloc)
{
Cnode_base const cspace(seL4_CapInitThreadCNode, 32);
/* insert 3rd-level VM CNode into 2nd-level VM-pad CNode */
_vm_pad_cnode.copy(cspace, vm_cnode_sel, 0);
/* insert 2nd-level VM-pad CNode into 1st-level CNode */
_top_level_cnode.copy(cspace, vm_pad_cnode_sel, id);
}
void map(addr_t from_phys, addr_t to_virt, size_t num_pages)
{
/* check if we need to add a page table to core's VM space */
if (!_page_table_registry.has_page_table_at(to_virt))
_alloc_and_map_page_table(to_virt);
for (size_t i = 0; i < num_pages; i++) {
off_t const offset = i << get_page_size_log2();
_map_page(from_phys + offset, to_virt + offset);
}
}
};
#endif /* _CORE__INCLUDE__VM_SPACE_H_ */

View File

@ -43,7 +43,11 @@ bool Core_mem_allocator::Mapped_mem_allocator::_map_local(addr_t virt_addr,
addr_t phys_addr,
unsigned size)
{
return map_local(phys_addr, virt_addr, size / get_page_size());
size_t const num_pages = size / get_page_size();
Untyped_memory::convert_to_page_frames(phys_addr, num_pages);
return map_local(phys_addr, virt_addr, num_pages);
}
@ -83,116 +87,8 @@ static inline void init_sel4_ipc_buffer()
}
/* CNode dimensions */
enum {
NUM_TOP_SEL_LOG2 = 12UL,
NUM_CORE_SEL_LOG2 = 14UL,
NUM_PHYS_SEL_LOG2 = 20UL,
};
/* selectors for statically created CNodes */
enum Static_cnode_sel {
TOP_CNODE_SEL = 0x200,
CORE_PAD_CNODE_SEL = 0x201,
CORE_CNODE_SEL = 0x202,
PHYS_CNODE_SEL = 0x203
};
/* indices within top-level CNode */
enum Top_cnode_idx {
TOP_CNODE_CORE_IDX = 0,
TOP_CNODE_PHYS_IDX = 0xfff
};
/**
* Replace initial CSpace with custom CSpace layout
*/
static void switch_to_core_cspace(Range_allocator &phys_alloc)
void Platform::_init_allocators()
{
Cnode_base const initial_cspace(seL4_CapInitThreadCNode, 32);
/* allocate 1st-level CNode */
static Cnode top_cnode(TOP_CNODE_SEL, NUM_TOP_SEL_LOG2, phys_alloc);
/* allocate 2nd-level CNode to align core's CNode with the LSB of the CSpace*/
static Cnode core_pad_cnode(CORE_PAD_CNODE_SEL,
32UL - NUM_TOP_SEL_LOG2 - NUM_CORE_SEL_LOG2,
phys_alloc);
/* allocate 3rd-level CNode for core's objects */
static Cnode core_cnode(CORE_CNODE_SEL, NUM_CORE_SEL_LOG2, phys_alloc);
/* copy initial selectors to core's CNode */
core_cnode.copy(initial_cspace, seL4_CapInitThreadTCB);
core_cnode.copy(initial_cspace, seL4_CapInitThreadCNode);
core_cnode.copy(initial_cspace, seL4_CapInitThreadPD);
core_cnode.move(initial_cspace, seL4_CapIRQControl); /* cannot be copied */
core_cnode.copy(initial_cspace, seL4_CapIOPort);
core_cnode.copy(initial_cspace, seL4_CapBootInfoFrame);
core_cnode.copy(initial_cspace, seL4_CapArchBootInfoFrame);
core_cnode.copy(initial_cspace, seL4_CapInitThreadIPCBuffer);
core_cnode.copy(initial_cspace, seL4_CapIPI);
core_cnode.copy(initial_cspace, seL4_CapDomain);
/* copy untyped memory selectors to core's CNode */
seL4_BootInfo const &bi = sel4_boot_info();
for (unsigned sel = bi.untyped.start; sel < bi.untyped.end; sel++)
core_cnode.copy(initial_cspace, sel);
for (unsigned sel = bi.deviceUntyped.start; sel < bi.deviceUntyped.end; sel++)
core_cnode.copy(initial_cspace, sel);
/* copy statically created CNode selectors to core's CNode */
core_cnode.copy(initial_cspace, TOP_CNODE_SEL);
core_cnode.copy(initial_cspace, CORE_PAD_CNODE_SEL);
core_cnode.copy(initial_cspace, CORE_CNODE_SEL);
/*
* Construct CNode hierarchy of core's CSpace
*/
/* insert 3rd-level core CNode into 2nd-level core-pad CNode */
core_pad_cnode.copy(initial_cspace, CORE_CNODE_SEL, 0);
/* insert 2nd-level core-pad CNode into 1st-level CNode */
top_cnode.copy(initial_cspace, CORE_PAD_CNODE_SEL, TOP_CNODE_CORE_IDX);
/* allocate 2nd-level CNode for storing page-frame cap selectors */
static Cnode phys_cnode(PHYS_CNODE_SEL, NUM_PHYS_SEL_LOG2, phys_alloc);
/* insert 2nd-level phys-mem CNode into 1st-level CNode */
top_cnode.copy(initial_cspace, PHYS_CNODE_SEL, TOP_CNODE_PHYS_IDX);
/* activate core's CSpace */
{
seL4_CapData_t null_data = { { 0 } };
int const ret = seL4_TCB_SetSpace(seL4_CapInitThreadTCB,
seL4_CapNull, /* fault_ep */
TOP_CNODE_SEL, null_data,
seL4_CapInitThreadPD, null_data);
if (ret != 0) {
PERR("%s: seL4_TCB_SetSpace returned %d", __FUNCTION__, ret);
}
}
}
Platform::Platform()
:
_io_mem_alloc(core_mem_alloc()), _io_port_alloc(core_mem_alloc()),
_irq_alloc(core_mem_alloc()),
_vm_base(0x1000),
_vm_size(2*1024*1024*1024UL - _vm_base) /* use the lower 2GiB */
{
/*
* Initialize core allocators
*/
seL4_BootInfo const &bi = sel4_boot_info();
/* interrupt allocator */
@ -210,8 +106,16 @@ Platform::Platform()
_core_mem_alloc.virt_alloc()->add_range(_vm_base, _vm_size);
/* remove core image from core's virtual address allocator */
/*
* XXX Why do we need to skip a few KiB after the end of core?
* When allocating a PTE immediately after _prog_img_end, the
* kernel would complain "Mapping already present" on the
* attempt to map a page frame.
*/
addr_t const core_virt_beg = trunc_page((addr_t)&_prog_img_beg),
core_virt_end = round_page((addr_t)&_prog_img_end);
core_virt_end = round_page((addr_t)&_prog_img_end)
+ 64*1024;
size_t const core_size = core_virt_end - core_virt_beg;
_core_mem_alloc.virt_alloc()->remove_range(core_virt_beg, core_size);
@ -225,18 +129,121 @@ Platform::Platform()
/* preserve context area in core's virtual address space */
_core_mem_alloc.virt_alloc()->remove_range(Native_config::context_area_virtual_base(),
Native_config::context_area_virtual_size());
}
void Platform::_switch_to_core_cspace()
{
Cnode_base const initial_cspace(seL4_CapInitThreadCNode, 32);
/* copy initial selectors to core's CNode */
_core_cnode.copy(initial_cspace, seL4_CapInitThreadTCB);
_core_cnode.copy(initial_cspace, seL4_CapInitThreadPD);
_core_cnode.move(initial_cspace, seL4_CapIRQControl); /* cannot be copied */
_core_cnode.copy(initial_cspace, seL4_CapIOPort);
_core_cnode.copy(initial_cspace, seL4_CapBootInfoFrame);
_core_cnode.copy(initial_cspace, seL4_CapArchBootInfoFrame);
_core_cnode.copy(initial_cspace, seL4_CapInitThreadIPCBuffer);
_core_cnode.copy(initial_cspace, seL4_CapIPI);
_core_cnode.copy(initial_cspace, seL4_CapDomain);
/* replace seL4_CapInitThreadCNode with new top-level CNode */
_core_cnode.copy(initial_cspace, Core_cspace::TOP_CNODE_SEL, seL4_CapInitThreadCNode);
/* copy untyped memory selectors to core's CNode */
seL4_BootInfo const &bi = sel4_boot_info();
for (unsigned sel = bi.untyped.start; sel < bi.untyped.end; sel++)
_core_cnode.copy(initial_cspace, sel);
for (unsigned sel = bi.deviceUntyped.start; sel < bi.deviceUntyped.end; sel++)
_core_cnode.copy(initial_cspace, sel);
/* copy statically created CNode selectors to core's CNode */
_core_cnode.copy(initial_cspace, Core_cspace::TOP_CNODE_SEL);
_core_cnode.copy(initial_cspace, Core_cspace::CORE_PAD_CNODE_SEL);
_core_cnode.copy(initial_cspace, Core_cspace::CORE_CNODE_SEL);
_core_cnode.copy(initial_cspace, Core_cspace::PHYS_CNODE_SEL);
/*
* Until this point, no interaction with the seL4 kernel was needed.
* However, the next steps involve the invokation of system calls and
* the use of kernel services. To use the kernel bindings, we first
* need to initialize the TLS mechanism that is used to find the IPC
* buffer for the calling thread.
* Construct CNode hierarchy of core's CSpace
*/
init_sel4_ipc_buffer();
/* initialize core's capability space */
switch_to_core_cspace(*_core_mem_alloc.phys_alloc());
/* insert 3rd-level core CNode into 2nd-level core-pad CNode */
_core_pad_cnode.copy(initial_cspace, Core_cspace::CORE_CNODE_SEL, 0);
/* insert 2nd-level core-pad CNode into 1st-level CNode */
_top_cnode.copy(initial_cspace, Core_cspace::CORE_PAD_CNODE_SEL,
Core_cspace::TOP_CNODE_CORE_IDX);
/* insert 2nd-level phys-mem CNode into 1st-level CNode */
_top_cnode.copy(initial_cspace, Core_cspace::PHYS_CNODE_SEL,
Core_cspace::TOP_CNODE_PHYS_IDX);
/* activate core's CSpace */
{
seL4_CapData_t null_data = { { 0 } };
int const ret = seL4_TCB_SetSpace(seL4_CapInitThreadTCB,
seL4_CapNull, /* fault_ep */
Core_cspace::TOP_CNODE_SEL, null_data,
seL4_CapInitThreadPD, null_data);
if (ret != 0) {
PERR("%s: seL4_TCB_SetSpace returned %d", __FUNCTION__, ret);
}
}
}
void Platform::_init_core_page_table_registry()
{
seL4_BootInfo const &bi = sel4_boot_info();
/*
* Register initial page tables
*/
addr_t virt_addr = (addr_t)(&_prog_img_beg);
for (unsigned sel = bi.userImagePTs.start; sel < bi.userImagePTs.end; sel++) {
_core_page_table_registry.insert_page_table(virt_addr, sel);
/* one page table has 1024 entries */
virt_addr += 1024*get_page_size();
}
/*
* Register initial page frames
*/
virt_addr = (addr_t)(&_prog_img_beg);
for (unsigned sel = bi.userImageFrames.start; sel < bi.userImageFrames.end; sel++) {
_core_page_table_registry.insert_page_table_entry(virt_addr, sel);
virt_addr += get_page_size();
}
}
Platform::Platform()
:
_io_mem_alloc(core_mem_alloc()), _io_port_alloc(core_mem_alloc()),
_irq_alloc(core_mem_alloc()),
_vm_base(0x1000),
_vm_size(2*1024*1024*1024UL - _vm_base), /* use the lower 2GiB */
_init_allocators_done((_init_allocators(), true)),
_init_sel4_ipc_buffer_done((init_sel4_ipc_buffer(), true)),
_switch_to_core_cspace_done((_switch_to_core_cspace(), true)),
_core_page_table_registry(*core_mem_alloc()),
_init_core_page_table_registry_done((_init_core_page_table_registry(), true)),
_core_vm_space(Core_cspace::CORE_VM_PAD_CNODE_SEL, Core_cspace::CORE_VM_CNODE_SEL,
_phys_alloc,
_top_cnode,
_core_cnode,
_phys_cnode,
Core_cspace::CORE_VM_ID,
_core_page_table_registry)
{
/* add boot modules to ROM fs */
@ -250,6 +257,7 @@ Platform::Platform()
printf(":virt_alloc: "); _core_mem_alloc.virt_alloc()->raw()->dump_addr_tree();
printf(":io_mem_alloc: "); _io_mem_alloc.raw()->dump_addr_tree();
}
}

View File

@ -22,8 +22,15 @@
using namespace Genode;
void Ram_session_component::_export_ram_ds(Dataspace_component *ds) { }
void Ram_session_component::_revoke_ram_ds(Dataspace_component *ds) { }
void Ram_session_component::_export_ram_ds(Dataspace_component *ds)
{
PDBG("not implemented");
}
void Ram_session_component::_revoke_ram_ds(Dataspace_component *ds)
{
PDBG("not implemented");
}
void Ram_session_component::_clear_ds (Dataspace_component *ds)

View File

@ -54,5 +54,4 @@ vpath trace_session_component.cc $(GEN_CORE_DIR)
vpath dataspace_component.cc $(GEN_CORE_DIR)
vpath core_mem_alloc.cc $(GEN_CORE_DIR)
vpath dump_alloc.cc $(GEN_CORE_DIR)
vpath context_area.cc $(GEN_CORE_DIR)
vpath %.cc $(REP_DIR)/src/core