core: re-design memory allocator (fix #1091)

* Core_mem_allocator: implement Range_allocator interface
* Core_mem_allocator: allocate with page-granularity only
* Use slab allocators in core where meaningful (e.g. dataspace objects)
This commit is contained in:
Stefan Kalkowski 2014-03-10 15:24:53 +01:00 committed by Norman Feske
parent 36bfb56e49
commit 0dece91973
27 changed files with 242 additions and 247 deletions

View File

@ -60,7 +60,7 @@ namespace Genode {
Range_allocator *io_port_alloc() { return &_io_port_alloc; }
Range_allocator *irq_alloc() { return &_irq_alloc; }
Range_allocator *region_alloc() { return _core_mem_alloc.virt_alloc(); }
Allocator *core_mem_alloc() { return &_core_mem_alloc; }
Range_allocator *core_mem_alloc() { return &_core_mem_alloc; }
addr_t vm_start() const { return _vm_base; }
size_t vm_size() const { return _vm_size; }
Rom_fs *rom_fs() { return &_rom_fs; }

View File

@ -23,9 +23,9 @@
namespace Genode {
inline size_t get_page_size_log2() { return 12; }
inline size_t get_page_size() { return 1 << get_page_size_log2(); }
inline addr_t get_page_mask() { return ~(get_page_size() - 1); }
constexpr size_t get_page_size_log2() { return 12; }
constexpr size_t get_page_size() { return 1 << get_page_size_log2(); }
constexpr addr_t get_page_mask() { return ~(get_page_size() - 1); }
inline addr_t trunc_page(addr_t addr) { return addr & get_page_mask(); }
inline addr_t round_page(addr_t addr) { return trunc_page(addr + get_page_size() - 1); }

View File

@ -126,9 +126,15 @@ int Platform::_init_rom_fs()
** Support for core memory management **
****************************************/
bool Core_mem_allocator::Mapped_mem_allocator::_map_local(addr_t virt_addr, addr_t phys_addr, unsigned size_log2)
bool Core_mem_allocator::_map_local(addr_t virt_addr, addr_t phys_addr, unsigned size)
{
return map_local(phys_addr, virt_addr, 1 << (size_log2 - get_page_size_log2()));
return map_local(phys_addr, virt_addr, size / get_page_size());
}
bool Core_mem_allocator::_unmap_local(addr_t virt_addr, unsigned size)
{
return unmap_local(virt_addr, size / get_page_size());
}

View File

@ -141,7 +141,7 @@ namespace Genode {
** Generic platform interface **
********************************/
Allocator *core_mem_alloc() { return &_ram_alloc; }
Range_allocator *core_mem_alloc() { return &_ram_alloc; }
Range_allocator *ram_alloc() { return &_ram_alloc; }
Range_allocator *io_mem_alloc() { return &_io_mem_alloc; }
Range_allocator *io_port_alloc() { return &_io_port_alloc; }

View File

@ -92,13 +92,10 @@ namespace Genode {
return l4_round_superpage(addr);
}
inline size_t get_page_size() { return L4_PAGESIZE; }
inline size_t get_page_size_log2() { return L4_LOG2_PAGESIZE; }
inline size_t get_super_page_size() { return L4_SUPERPAGESIZE; }
inline size_t get_super_page_size_log2() { return L4_LOG2_SUPERPAGESIZE; }
constexpr size_t get_page_size() { return L4_PAGESIZE; }
constexpr size_t get_page_size_log2() { return L4_LOG2_PAGESIZE; }
constexpr size_t get_super_page_size() { return L4_SUPERPAGESIZE; }
constexpr size_t get_super_page_size_log2() { return L4_LOG2_SUPERPAGESIZE; }
inline void print_page_fault(const char *msg, addr_t pf_addr, addr_t pf_ip,
Rm_session::Fault_type pf_type,

View File

@ -149,7 +149,7 @@ namespace Genode {
** Generic platform interface **
********************************/
Allocator *core_mem_alloc() { return &_ram_alloc; }
Range_allocator *core_mem_alloc() { return &_ram_alloc; }
Range_allocator *ram_alloc() { return &_ram_alloc; }
Range_allocator *io_mem_alloc() { return &_io_mem_alloc; }
Range_allocator *io_port_alloc() { return &_io_port_alloc; }

View File

@ -91,13 +91,10 @@ namespace Genode {
return (addr + L4_SUPERPAGESIZE-1) & L4_SUPERPAGEMASK;
}
inline size_t get_page_size() { return L4_PAGESIZE; }
inline size_t get_page_size_log2() { return L4_LOG2_PAGESIZE; }
inline size_t get_super_page_size() { return L4_SUPERPAGESIZE; }
inline size_t get_super_page_size_log2() { return L4_LOG2_SUPERPAGESIZE; }
constexpr size_t get_page_size() { return L4_PAGESIZE; }
constexpr size_t get_page_size_log2() { return L4_LOG2_PAGESIZE; }
constexpr size_t get_super_page_size() { return L4_SUPERPAGESIZE; }
constexpr size_t get_super_page_size_log2() { return L4_LOG2_SUPERPAGESIZE; }
inline void print_page_fault(const char *msg, addr_t pf_addr, addr_t pf_ip,
Rm_session::Fault_type pf_type,

View File

@ -38,7 +38,7 @@ namespace Genode {
Range_allocator *io_port_alloc() { return 0; }
Range_allocator *irq_alloc() { return 0; }
Range_allocator *region_alloc() { return 0; }
Allocator *core_mem_alloc() { return 0; }
Range_allocator *core_mem_alloc() { return 0; }
addr_t vm_start() const { return 0; }
size_t vm_size() const { return 0; }
Rom_fs *rom_fs() { return 0; }

View File

@ -20,9 +20,9 @@
namespace Genode {
inline size_t get_page_size_log2() { return 12; }
inline size_t get_page_size() { return 1 << get_page_size_log2(); }
inline addr_t get_page_mask() { return ~(get_page_size() - 1); }
constexpr size_t get_page_size_log2() { return 12; }
constexpr size_t get_page_size() { return 1 << get_page_size_log2(); }
constexpr addr_t get_page_mask() { return ~(get_page_size() - 1); }
inline addr_t trunc_page(addr_t addr) { return addr & get_page_mask(); }
inline addr_t round_page(addr_t addr) { return trunc_page(addr + get_page_size() - 1); }

View File

@ -38,19 +38,19 @@ namespace Genode
/**
* Get the the minimal supported page-size log 2
*/
inline size_t get_page_size_log2() { return MIN_PAGE_SIZE_LOG2; }
constexpr size_t get_page_size_log2() { return MIN_PAGE_SIZE_LOG2; }
/**
* Get the the minimal supported page-size
*/
inline size_t get_page_size() { return 1 << get_page_size_log2(); }
constexpr size_t get_page_size() { return 1 << get_page_size_log2(); }
/**
* Get the base mask for the minimal supported page-size
*/
inline addr_t get_page_mask() { return ~(get_page_size() - 1); }
constexpr addr_t get_page_mask() { return ~(get_page_size() - 1); }
/**

View File

@ -0,0 +1,22 @@
/*
* \brief Core-internal utilities
* \author Stefan Kalkowski
* \date 2014-03-10
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _CORE__INCLUDE__UTIL_H_
#define _CORE__INCLUDE__UTIL_H_
namespace Genode {
constexpr size_t get_page_size_log2() { return 12; }
constexpr size_t get_page_size() { return 1 << get_page_size_log2(); }
}
#endif /* _CORE__INCLUDE__UTIL_H_ */

View File

@ -67,7 +67,7 @@ namespace Genode {
Range_allocator *io_port_alloc() { return &_io_port_alloc; }
Range_allocator *irq_alloc() { return &_irq_alloc; }
Range_allocator *region_alloc() { return _core_mem_alloc.virt_alloc(); }
Allocator *core_mem_alloc() { return &_core_mem_alloc; }
Range_allocator *core_mem_alloc() { return &_core_mem_alloc; }
addr_t vm_start() const { return _vm_base; }
size_t vm_size() const { return _vm_size; }
Rom_fs *rom_fs() { return &_rom_fs; }

View File

@ -20,11 +20,11 @@
namespace Genode {
inline size_t get_page_size_log2() { return 12; }
inline size_t get_page_size() { return 1 << get_page_size_log2(); }
inline addr_t get_page_mask() { return ~(get_page_size() - 1); }
inline size_t get_super_page_size_log2() { return 22; }
inline size_t get_super_page_size() { return 1 << get_super_page_size_log2(); }
constexpr size_t get_page_size_log2() { return 12; }
constexpr size_t get_page_size() { return 1 << get_page_size_log2(); }
constexpr addr_t get_page_mask() { return ~(get_page_size() - 1); }
constexpr size_t get_super_page_size_log2() { return 22; }
constexpr size_t get_super_page_size() { return 1 << get_super_page_size_log2(); }
inline addr_t trunc_page(addr_t addr) { return addr & get_page_mask(); }
inline addr_t round_page(addr_t addr) { return trunc_page(addr + get_page_size() - 1); }

View File

@ -321,12 +321,23 @@ Platform::Platform() :
map_local_phys_to_virt(__main_thread_utcb,
Mem_crd(BDA_PHY, 0, Rights(true, false, false)),
Mem_crd(BDA_VIRT, 0, Rights(true, false, false)));
/*
* Now that we can access the I/O ports for comport 0, printf works...
*/
/*
* remap main utcb to default utcb address
* we do this that early, because Core_mem_allocator uses
* the main_thread_utcb very early to establish mappings
*/
if (map_local(__main_thread_utcb, (addr_t)__main_thread_utcb,
(addr_t)main_thread_utcb(), 1, Rights(true, true, false))) {
PERR("could not remap utcb of main thread");
nova_die();
}
/* sanity checks */
if (hip->sel_exc + 3 > NUM_INITIAL_PT_RESERVED) {
printf("configuration error\n");
@ -616,13 +627,6 @@ Platform::Platform() :
_irq_alloc.add_range(0, hip->sel_gsi - 1);
_gsi_base_sel = (hip->mem_desc_offset - hip->cpu_desc_offset) / hip->cpu_desc_size;
/* remap main utcb to default utcb address */
if (map_local(__main_thread_utcb, (addr_t)__main_thread_utcb,
(addr_t)main_thread_utcb(), 1, Rights(true, true, false))) {
PERR("could not remap utcb of main thread");
nova_die();
}
if (verbose_boot_info) {
printf(":virt_alloc: "); _core_mem_alloc.virt_alloc()->raw()->dump_addr_tree();
printf(":phys_alloc: "); _core_mem_alloc.phys_alloc()->raw()->dump_addr_tree();
@ -661,17 +665,24 @@ Platform::Platform() :
** Support for core memory management **
****************************************/
bool Core_mem_allocator::Mapped_mem_allocator::_map_local(addr_t virt_addr,
addr_t phys_addr,
unsigned size_log2)
bool Core_mem_allocator::_map_local(addr_t virt_addr, addr_t phys_addr,
unsigned size)
{
map_local((Utcb *)Thread_base::myself()->utcb(), phys_addr,
virt_addr, 1 << (size_log2 - get_page_size_log2()),
virt_addr, size / get_page_size(),
Rights(true, true, true), true);
return true;
}
bool Core_mem_allocator::_unmap_local(addr_t virt_addr, unsigned size)
{
unmap_local((Utcb *)Thread_base::myself()->utcb(),
virt_addr, size / get_page_size());
return true;
}
/********************************
** Generic platform interface **
********************************/

View File

@ -35,15 +35,19 @@ namespace Genode {
{
private:
typedef Core_mem_allocator::Phys_allocator Phys_allocator;
using Phys_allocator = Core_mem_allocator::Phys_allocator;
using Rom_slab = Tslab<Rom_module, get_page_size()>;
using Thread_slab = Tslab<Platform_thread, get_page_size()>;
Platform_pd *_core_pd; /* core protection domain */
Platform_thread *_core_pager; /* pager for core threads */
Core_mem_allocator _core_mem_alloc; /* core-accessible memory */
Phys_allocator _io_mem_alloc; /* MMIO allocator */
Phys_allocator _io_port_alloc; /* I/O port allocator */
Phys_allocator _irq_alloc; /* IRQ allocator */
Rom_fs _rom_fs; /* ROM file system */
Platform_pd *_core_pd; /* core protection domain */
Platform_thread *_core_pager; /* pager for core threads */
Core_mem_allocator _core_mem_alloc; /* core-accessible memory */
Phys_allocator _io_mem_alloc; /* MMIO allocator */
Phys_allocator _io_port_alloc; /* I/O port allocator */
Phys_allocator _irq_alloc; /* IRQ allocator */
Rom_slab _rom_slab; /* Slab for rom modules */
Rom_fs _rom_fs; /* ROM file system */
Thread_slab _thread_slab; /* Slab for platform threads */
/*
* Virtual-memory range for non-core address spaces.
@ -75,6 +79,10 @@ namespace Genode {
*/
Platform_thread *core_pager() { return _core_pager; }
/**
* Accessor for platform thread object slab allocator
*/
Thread_slab *thread_slab() { return &_thread_slab; }
/**********************************************
** Callbacks used for parsing the boot info **
@ -113,7 +121,7 @@ namespace Genode {
Range_allocator *io_port_alloc() { return &_io_port_alloc; }
Range_allocator *irq_alloc() { return &_irq_alloc; }
Range_allocator *region_alloc() { return _core_mem_alloc.virt_alloc(); }
Allocator *core_mem_alloc() { return &_core_mem_alloc; }
Range_allocator *core_mem_alloc() { return &_core_mem_alloc; }
addr_t vm_start() const { return _vm_start; }
size_t vm_size() const { return _vm_size; }
Rom_fs *rom_fs() { return &_rom_fs; }

View File

@ -61,9 +61,9 @@ namespace Genode {
}
}
inline size_t get_page_size_log2() { return 12; }
inline size_t get_page_size() { return 1 << get_page_size_log2(); }
inline addr_t get_page_mask() { return ~(get_page_size() - 1); }
constexpr size_t get_page_size_log2() { return 12; }
constexpr size_t get_page_size() { return 1 << get_page_size_log2(); }
constexpr addr_t get_page_mask() { return ~(get_page_size() - 1); }
inline size_t get_super_page_size_log2()
{

View File

@ -53,12 +53,13 @@ static int num_boot_module_objects;
** Support for core memory management **
****************************************/
bool Core_mem_allocator::Mapped_mem_allocator::_map_local(addr_t virt_addr,
addr_t phys_addr,
unsigned size_log2)
{
return map_local(phys_addr, virt_addr, 1 << (size_log2 - get_page_size_log2()));
}
bool Core_mem_allocator::_map_local(addr_t virt_addr, addr_t phys_addr,
unsigned size) {
return map_local(phys_addr, virt_addr, size / get_page_size()); }
bool Core_mem_allocator::_unmap_local(addr_t virt_addr, unsigned size) {
return unmap_local(virt_addr, size / get_page_size()); }
/**********************
@ -185,9 +186,14 @@ Okl4::bi_name_t Platform::bi_new_ms(Okl4::bi_name_t owner,
}
static char init_slab_block_rom[get_page_size()];
static char init_slab_block_thread[get_page_size()];
Platform::Platform() :
_io_mem_alloc(core_mem_alloc()), _io_port_alloc(core_mem_alloc()),
_irq_alloc(core_mem_alloc())
_irq_alloc(core_mem_alloc()),
_rom_slab(core_mem_alloc(), (Slab_block *)&init_slab_block_rom),
_thread_slab(core_mem_alloc(), (Slab_block *)&init_slab_block_thread)
{
/*
* We must be single-threaded at this stage and so this is safe.
@ -247,7 +253,7 @@ Platform::Platform() :
/* make gathered boot-module info known to '_rom_fs' */
int num_boot_modules = min(num_boot_module_objects, num_boot_module_memsects);
for (int i = 0; i < num_boot_modules; i++) {
Rom_module *r = new (core_mem_alloc())
Rom_module *r = new (&_rom_slab)
Rom_module(boot_modules[i].base,
boot_modules[i].size,
boot_modules[i].name);
@ -294,7 +300,7 @@ Platform::Platform() :
* not destroy this task, it should be no problem.
*/
Platform_thread *core_thread =
new(core_mem_alloc()) Platform_thread("core.main");
new(&_thread_slab) Platform_thread("core.main");
core_thread->set_l4_thread_id(Okl4::L4_rootserver);

View File

@ -34,7 +34,8 @@ void Thread_base::_thread_start()
void Thread_base::start()
{
/* create and start platform thread */
_tid.pt = new(platform()->core_mem_alloc()) Platform_thread(_context->name);
_tid.pt = new(platform_specific()->thread_slab())
Platform_thread(_context->name);
platform_specific()->core_pd()->bind_thread(_tid.pt);
@ -53,5 +54,5 @@ void Thread_base::cancel_blocking()
void Thread_base::_deinit_platform_thread()
{
/* destruct platform thread */
destroy(platform()->core_mem_alloc(), _tid.pt);
destroy(platform_specific()->thread_slab(), _tid.pt);
}

View File

@ -139,7 +139,7 @@ namespace Genode {
** Generic platform interface **
********************************/
Allocator *core_mem_alloc() { return &_ram_alloc; }
Range_allocator *core_mem_alloc() { return &_ram_alloc; }
Range_allocator *ram_alloc() { return &_ram_alloc; }
Range_allocator *io_mem_alloc() { return &_io_mem_alloc; }
Range_allocator *io_port_alloc() { return &_io_port_alloc; }

View File

@ -78,9 +78,9 @@ namespace Genode {
touch_read_write(bptr);
}
inline size_t get_page_size() { return Pistachio::get_page_size(); }
inline size_t get_page_size_log2() { return Pistachio::get_page_size_log2(); }
inline addr_t get_page_mask() { return Pistachio::get_page_mask(); }
constexpr size_t get_page_size_log2() { return 12; }
constexpr size_t get_page_size() { return 1 << get_page_size_log2(); }
constexpr addr_t get_page_mask() { return ~(get_page_size() - 1); }
inline size_t get_super_page_size_log2()
{

View File

@ -257,13 +257,11 @@ namespace Genode {
*
* \param BMDT block meta-data type
*/
template <typename BMDT>
template <typename BMDT, unsigned SLAB_BLOCK_SIZE = 256 * sizeof(addr_t)>
class Allocator_avl_tpl : public Allocator_avl_base
{
private:
enum { SLAB_BLOCK_SIZE = 256 * sizeof(addr_t) };
/*
* Pump up the Block class with custom meta-data type
*/

View File

@ -117,7 +117,7 @@ struct Genode::Expanding_ram_session_client : Upgradeable_client<Genode::Ram_ses
* Because the worst case almost never happens, we request
* a bit too much quota for the most time.
*/
enum { ALLOC_OVERHEAD = 1024U };
enum { ALLOC_OVERHEAD = 4096U };
Genode::snprintf(buf, sizeof(buf), "ram_quota=%zu",
size + ALLOC_OVERHEAD);
env()->parent()->resource_request(buf);

View File

@ -124,7 +124,14 @@ class Context_area_rm_session : public Rm_session
class Context_area_ram_session : public Ram_session
{
enum { verbose = false };
private:
enum { verbose = false };
using Ds_slab = Synchronized_allocator<Tslab<Dataspace_component,
get_page_size()> >;
Ds_slab _ds_slab { platform()->core_mem_alloc() };
public:
@ -153,7 +160,7 @@ class Context_area_ram_session : public Ram_session
if (verbose)
PDBG("phys_base = %p, size = 0x%zx", phys_base, size);
context_ds[i] = new (platform()->core_mem_alloc())
context_ds[i] = new (&_ds_slab)
Dataspace_component(size, 0, (addr_t)phys_base, false, true, 0);
Dataspace_capability cap = Dataspace_capability::local_cap(context_ds[i]);
@ -180,7 +187,7 @@ class Context_area_ram_session : public Ram_session
if (verbose)
PDBG("phys_addr = %p, size = 0x%zx", phys_addr, size);
destroy(platform()->core_mem_alloc(), dataspace_component);
destroy(&_ds_slab, dataspace_component);
platform_specific()->ram_alloc()->free(phys_addr, size);
}

View File

@ -1,6 +1,7 @@
/*
* \brief Allocator for core-local memory
* \author Norman Feske
* \author Stefan Kalkowski
* \date 2009-10-12
*/
@ -16,48 +17,46 @@
/* local includes */
#include <core_mem_alloc.h>
#include <util.h>
using namespace Genode;
static const bool verbose_core_mem_alloc = false;
bool Core_mem_allocator::Mapped_mem_allocator::alloc(size_t size, void **out_addr)
Range_allocator::Alloc_return
Core_mem_allocator::alloc_aligned(size_t size, void **out_addr, int align)
{
/* try to allocate block in cores already mapped virtual address ranges */
if (_alloc.alloc(size, out_addr))
return true;
/* there is no sufficient space in core's mapped virtual memory, expansion needed */
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
void *phys_addr = 0, *virt_addr = 0;
void *phys_addr = 0;
align = max((size_t)align, get_page_size_log2());
/* allocate physical pages */
if (!_phys_alloc->alloc(page_rounded_size, &phys_addr)) {
PERR("Could not allocate physical memory region of size %zu\n", page_rounded_size);
return false;
Alloc_return ret1 = _phys_alloc.raw()->alloc_aligned(page_rounded_size,
&phys_addr, align);
if (!ret1.is_ok()) {
PERR("Could not allocate physical memory region of size %zu\n",
page_rounded_size);
return ret1;
}
/* allocate range in core's virtual address space */
if (!_virt_alloc->alloc(page_rounded_size, &virt_addr)) {
PERR("Could not allocate virtual address range in core of size %zu\n", page_rounded_size);
Alloc_return ret2 = _virt_alloc.raw()->alloc_aligned(page_rounded_size,
out_addr, align);
if (!ret2.is_ok()) {
PERR("Could not allocate virtual address range in core of size %zu\n",
page_rounded_size);
/* revert physical allocation */
_phys_alloc->free(phys_addr);
return false;
_phys_alloc.raw()->free(phys_addr);
return ret2;
}
if (verbose_core_mem_alloc)
printf("added core memory block of %zu bytes at virt=%p phys=%p\n",
page_rounded_size, virt_addr, phys_addr);
page_rounded_size, *out_addr, phys_addr);
/* make physical page accessible at the designated virtual address */
_map_local((addr_t)virt_addr, (addr_t)phys_addr, get_page_size_log2());
_map_local((addr_t)*out_addr, (addr_t)phys_addr, page_rounded_size);
/* add new range to core's allocator for mapped virtual memory */
_alloc.add_range((addr_t)virt_addr, page_rounded_size);
/* now that we have added enough memory, try again... */
return _alloc.alloc(size, out_addr);
return Alloc_return::OK;
}

View File

@ -1,6 +1,7 @@
/*
* \brief Allocator infrastructure for core
* \author Norman Feske
* \author Stefan Kalkowski
* \date 2009-10-12
*/
@ -17,162 +18,102 @@
#include <base/lock.h>
#include <base/sync_allocator.h>
#include <base/allocator_avl.h>
#include <util.h>
namespace Genode {
/**
* Allocators for physical memory, core's virtual address space,
* and core-local memory. The interface of this class is thread safe.
*/
class Core_mem_allocator : public Allocator
{
public:
typedef Synchronized_range_allocator<Allocator_avl> Phys_allocator;
private:
/**
* Unsynchronized allocator for core-mapped memory
*
* This is an allocator of core-mapped memory. It is meant to be used as
* meta-data allocator for the other allocators and as back end for core's
* synchronized memory allocator.
*/
class Mapped_mem_allocator : public Allocator
{
private:
Allocator_avl _alloc;
Range_allocator *_phys_alloc;
Range_allocator *_virt_alloc;
/**
* Initial chunk to populate the core mem allocator
*
* This chunk is used at platform initialization time.
*/
char _initial_chunk[16*1024];
/**
* Map physical page locally to specified virtual address
*
* \param virt_addr core-local address
* \param phys_addr physical memory address
* \param size_log2 size of memory block to map
* \return true on success
*/
bool _map_local(addr_t virt_addr, addr_t phys_addr, unsigned size_log2);
public:
/**
* Constructor
*
* \param phys_alloc allocator of physical memory
* \param virt_alloc allocator of core-local virtual memory ranges
*/
Mapped_mem_allocator(Range_allocator *phys_alloc,
Range_allocator *virt_alloc)
: _alloc(0), _phys_alloc(phys_alloc), _virt_alloc(virt_alloc)
{
_alloc.add_range((addr_t)_initial_chunk, sizeof(_initial_chunk));
}
class Core_mem_allocator;
};
/*************************
** Allocator interface **
*************************/
/**
* Allocators for physical memory, core's virtual address space,
* and core-local memory. The interface of this class is thread safe.
* The class itself implements a ready-to-use memory allocator for
* core that allows to allocate memory at page granularity only.
*/
class Genode::Core_mem_allocator : public Genode::Range_allocator
{
public:
bool alloc(size_t size, void **out_addr);
void free(void *addr, size_t size) { _alloc.free(addr, size); }
size_t consumed() { return _phys_alloc->consumed(); }
size_t overhead(size_t size) { return _phys_alloc->overhead(size); }
using Page_allocator = Allocator_avl_tpl<Empty, get_page_size()>;
using Phys_allocator = Synchronized_range_allocator<Page_allocator>;
bool need_size_for_free() const override {
return _phys_alloc->need_size_for_free(); }
};
protected:
/**
* Lock used for synchronization of all operations on the
* embedded allocators.
*/
Lock _lock;
/**
* Synchronized allocator of physical memory ranges
*
* This allocator must only be used to allocate memory
* ranges at page granularity.
*/
Phys_allocator _phys_alloc;
/**
* Synchronized allocator of core's virtual memory ranges
*
* This allocator must only be used to allocate memory
* ranges at page granularity.
*/
Phys_allocator _virt_alloc;
bool _map_local(addr_t virt_addr, addr_t phys_addr, unsigned size);
bool _unmap_local(addr_t virt_addr, unsigned size);
public:
/**
* Constructor
*/
Core_mem_allocator()
: _phys_alloc(&_lock, this),
_virt_alloc(&_lock, this) { }
/**
* Access physical-memory allocator
*/
Phys_allocator *phys_alloc() { return &_phys_alloc; }
/**
* Access core's virtual-memory allocator
*/
Phys_allocator *virt_alloc() { return &_virt_alloc; }
/**
* Lock used for synchronization of all operations on the
* embedded allocators.
*/
Lock _lock;
/*******************************
** Range allocator interface **
*******************************/
/**
* Synchronized allocator of physical memory ranges
*
* This allocator must only be used to allocate memory
* ranges at page granularity.
*/
Phys_allocator _phys_alloc;
int add_range(addr_t base, size_t size) { return -1; }
int remove_range(addr_t base, size_t size) { return -1; }
Alloc_return alloc_aligned(size_t size, void **out_addr, int align = 0);
Alloc_return alloc_addr(size_t size, addr_t addr) {
return Alloc_return::RANGE_CONFLICT; }
void free(void *addr) {}
size_t avail() { return _phys_alloc.avail(); }
/**
* Synchronized allocator of core's virtual memory ranges
*
* This allocator must only be used to allocate memory
* ranges at page granularity.
*/
Phys_allocator _virt_alloc;
/**
* Unsynchronized core-mapped memory allocator
*
* This allocator is internally used within this class for
* allocating meta data for the other allocators. It is not
* synchronized to avoid nested locking. The lock-guarded
* access to this allocator from the outer world is
* provided via the 'Allocator' interface implemented by
* 'Core_mem_allocator'. The allocator works at byte
* granularity.
*/
Mapped_mem_allocator _mem_alloc;
public:
/**
* Constructor
*/
Core_mem_allocator() :
_phys_alloc(&_lock, &_mem_alloc),
_virt_alloc(&_lock, &_mem_alloc),
_mem_alloc(_phys_alloc.raw(), _virt_alloc.raw())
{ }
/**
* Access physical-memory allocator
*/
Phys_allocator *phys_alloc() { return &_phys_alloc; }
/**
* Access core's virtual-memory allocator
*/
Phys_allocator *virt_alloc() { return &_virt_alloc; }
bool valid_addr(addr_t addr) { return _virt_alloc.valid_addr(addr); }
/*************************
** Allocator interface **
*************************/
/*************************
** Allocator interface **
*************************/
bool alloc(size_t size, void **out_addr)
{
Lock::Guard lock_guard(_lock);
return _mem_alloc.alloc(size, out_addr);
}
bool alloc(size_t size, void **out_addr) {
return alloc_aligned(size, out_addr).is_ok(); }
void free(void *addr, size_t size)
{
Lock::Guard lock_guard(_lock);
_mem_alloc.free(addr, size);
}
void free(void *addr, size_t) { free(addr); }
size_t consumed() { return _phys_alloc.consumed(); }
size_t overhead(size_t size) { return _phys_alloc.overhead(size); }
size_t consumed() { return _phys_alloc.consumed(); }
size_t overhead(size_t size) { return _phys_alloc.overhead(size); }
bool need_size_for_free() const override {
return _phys_alloc.need_size_for_free(); }
};
}
bool need_size_for_free() const override {
return _phys_alloc.need_size_for_free(); }
};
#endif /* _CORE__INCLUDE__CORE_MEM_ALLOC_H_ */

View File

@ -37,7 +37,7 @@ namespace Genode {
/**
* Allocator of core-local mapped virtual memory
*/
virtual Allocator *core_mem_alloc() = 0;
virtual Range_allocator *core_mem_alloc() = 0;
/**
* Allocator of physical memory

View File

@ -23,6 +23,7 @@
/* core includes */
#include <dataspace_component.h>
#include <util.h>
namespace Genode {
@ -37,9 +38,10 @@ namespace Genode {
class Invalid_dataspace : public Exception { };
enum { SBS = 1024 }; /* slab block size */
static constexpr size_t SBS = get_page_size();
typedef Synchronized_allocator<Tslab<Dataspace_component, SBS> > Ds_slab;
using Ds_slab = Synchronized_allocator<Tslab<Dataspace_component,
SBS> >;
Rpc_entrypoint *_ds_ep;
Rpc_entrypoint *_ram_session_ep;