heap: release some memory to the RAM session

With this patch, allocations >= 64K are fulfilled by dedicated
dataspaces, which are released to the RAM session when not needed
anymore.

Fixes #1267
This commit is contained in:
Christian Prochaska 2014-10-01 13:20:41 +02:00 committed by Christian Helmuth
parent 53d471aa22
commit 4539eb7512
2 changed files with 206 additions and 106 deletions

View File

@ -34,7 +34,19 @@ namespace Genode {
enum {
MIN_CHUNK_SIZE = 4*1024, /* in machine words */
MAX_CHUNK_SIZE = 256*1024
MAX_CHUNK_SIZE = 256*1024,
/*
* Meta data includes the Dataspace structure and meta data of
* the AVL allocator.
*/
META_DATA_SIZE = 1024, /* in bytes */
/*
* Allocation sizes >= this value are considered as big
* allocations, which get their own dataspace. In contrast
* to smaller allocations, this memory is released to
* the RAM session when 'free()' is called.
*/
BIG_ALLOCATION_THRESHOLD = 64*1024 /* in bytes */
};
class Dataspace : public List<Dataspace>::Element
@ -43,50 +55,35 @@ namespace Genode {
Ram_dataspace_capability cap;
void *local_addr;
size_t size;
Dataspace(Ram_dataspace_capability c, void *a)
: cap(c), local_addr(a) {}
Dataspace(Ram_dataspace_capability c, void *local_addr, size_t size)
: cap(c), local_addr(local_addr), size(size) { }
inline void * operator new(Genode::size_t, void* addr) {
return addr; }
inline void operator delete(void*) { }
};
class Dataspace_pool : public List<Dataspace>
/*
* This structure exists only to make sure that the dataspaces are
* destroyed after the AVL allocator.
*/
struct Dataspace_pool : public List<Dataspace>
{
private:
Ram_session *ram_session; /* ram session for backing store */
Rm_session *rm_session; /* region manager */
Ram_session *_ram_session; /* ram session for backing store */
Rm_session *_rm_session; /* region manager */
Dataspace_pool(Ram_session *ram_session, Rm_session *rm_session)
: ram_session(ram_session), rm_session(rm_session) { }
public:
/**
* Destructor
*/
~Dataspace_pool();
/**
* Constructor
*/
Dataspace_pool(Ram_session *ram_session, Rm_session *rm_session):
_ram_session(ram_session), _rm_session(rm_session) { }
/**
* Destructor
*/
~Dataspace_pool();
/**
* Expand dataspace by specified size
*
* \param size number of bytes to add to the dataspace pool
* \param md_alloc allocator to expand. This allocator is also
* used for meta data allocation (only after
* being successfully expanded).
* \throw Rm_session::Invalid_dataspace,
* Rm_session::Region_conflict
* \return 0 on success or negative error code
*/
int expand(size_t size, Range_allocator *alloc);
void reassign_resources(Ram_session *ram, Rm_session *rm) {
_ram_session = ram, _rm_session = rm; }
void reassign_resources(Ram_session *ram, Rm_session *rm) {
ram_session = ram, rm_session = rm; }
};
/*
@ -101,16 +98,33 @@ namespace Genode {
size_t _quota_used;
size_t _chunk_size;
/**
* Allocate a new dataspace of the specified size
*
* \param size number of bytes to allocate
* \param enforce_separate_metadata if true, the new dataspace
* will not contain any meta data
* \throw Rm_session::Invalid_dataspace,
* Rm_session::Region_conflict
* \return 0 on success or negative error code
*/
Heap::Dataspace *_allocate_dataspace(size_t size, bool enforce_separate_metadata);
/**
* Try to allocate block at our local allocator
*
* \return true on success
*
* This function is a utility used by 'alloc' to avoid
* code duplication.
* This function is a utility used by '_unsynchronized_alloc' to
* avoid code duplication.
*/
bool _try_local_alloc(size_t size, void **out_addr);
/**
* Unsynchronized implementation of 'alloc'
*/
bool _unsynchronized_alloc(size_t size, void **out_addr);
public:
enum { UNLIMITED = ~0 };

View File

@ -32,48 +32,19 @@ Heap::Dataspace_pool::~Dataspace_pool()
*/
Ram_dataspace_capability ds_cap = ds->cap;
void *ds_local_addr = ds->local_addr;
remove(ds);
/* have the destructor of the 'cap' member called */
delete ds;
_rm_session->detach(ds->local_addr);
_ram_session->free(ds_cap);
rm_session->detach(ds_local_addr);
ram_session->free(ds_cap);
}
}
int Heap::Dataspace_pool::expand(size_t size, Range_allocator *alloc)
{
Ram_dataspace_capability new_ds_cap;
void *local_addr, *ds_addr = 0;
/* make new ram dataspace available at our local address space */
try {
new_ds_cap = _ram_session->alloc(size);
local_addr = _rm_session->attach(new_ds_cap);
} catch (Ram_session::Alloc_failed) {
return -2;
} catch (Rm_session::Attach_failed) {
_ram_session->free(new_ds_cap);
return -3;
}
/* add new local address range to our local allocator */
alloc->add_range((addr_t)local_addr, size);
/* now that we have new backing store, allocate Dataspace structure */
if (alloc->alloc_aligned(sizeof(Dataspace), &ds_addr, 2).is_error()) {
PWRN("could not allocate meta data - this should never happen");
return -1;
}
/* add dataspace information to list of dataspaces */
Dataspace *ds = new (ds_addr) Dataspace(new_ds_cap, local_addr);
insert(ds);
return 0;
}
int Heap::quota_limit(size_t new_quota_limit)
{
if (new_quota_limit < _quota_used) return -1;
@ -82,6 +53,55 @@ int Heap::quota_limit(size_t new_quota_limit)
}
Heap::Dataspace *Heap::_allocate_dataspace(size_t size, bool enforce_separate_metadata)
{
Ram_dataspace_capability new_ds_cap;
void *ds_addr = 0;
void *ds_meta_data_addr = 0;
Heap::Dataspace *ds = 0;
/* make new ram dataspace available at our local address space */
try {
new_ds_cap = _ds_pool.ram_session->alloc(size);
ds_addr = _ds_pool.rm_session->attach(new_ds_cap);
} catch (Ram_session::Alloc_failed) {
PWRN("could not allocate new dataspace of size %zu", size);
return 0;
} catch (Rm_session::Attach_failed) {
PWRN("could not attach dataspace");
_ds_pool.ram_session->free(new_ds_cap);
return 0;
}
if (enforce_separate_metadata) {
/* allocate the Dataspace structure */
if (_unsynchronized_alloc(sizeof(Heap::Dataspace), &ds_meta_data_addr) < 0) {
PWRN("could not allocate dataspace meta data");
return 0;
}
} else {
/* add new local address range to our local allocator */
_alloc.add_range((addr_t)ds_addr, size);
/* allocate the Dataspace structure */
if (_alloc.alloc_aligned(sizeof(Heap::Dataspace), &ds_meta_data_addr, 2).is_error()) {
PWRN("could not allocate dataspace meta data - this should never happen");
return 0;
}
}
ds = new (ds_meta_data_addr) Heap::Dataspace(new_ds_cap, ds_addr, size);
_ds_pool.insert(ds);
return ds;
}
bool Heap::_try_local_alloc(size_t size, void **out_addr)
{
if (_alloc.alloc_aligned(size, out_addr, 2).is_error())
@ -92,6 +112,79 @@ bool Heap::_try_local_alloc(size_t size, void **out_addr)
}
bool Heap::_unsynchronized_alloc(size_t size, void **out_addr)
{
size_t dataspace_size;
if (size >= BIG_ALLOCATION_THRESHOLD) {
/*
* big allocation
*
* in this case, we allocate one dataspace without any meta data in it
* and return its local address without going through the allocator.
*/
/* align to 4K page */
dataspace_size = align_addr(size, 12);
Heap::Dataspace *ds = _allocate_dataspace(dataspace_size, true);
if (!ds) {
PWRN("could not allocate dataspace");
return false;
}
_quota_used += ds->size;
*out_addr = ds->local_addr;
return true;
} else {
/* try allocation at our local allocator */
if (_try_local_alloc(size, out_addr))
return true;
/*
* Calculate block size of needed backing store. The block must hold the
* requested 'size' and we add some space for meta data
* ('Dataspace' structures, AVL nodes).
* Finally, we align the size to a 4K page.
*/
dataspace_size = size + META_DATA_SIZE;
if (dataspace_size < _chunk_size * sizeof(umword_t)) {
/*
* '_chunk_size' is a multiple of 4K, so 'dataspace_size' becomes
* 4K-aligned, too.
*/
dataspace_size = _chunk_size * sizeof(umword_t);
/*
* Exponentially increase chunk size with each allocated chunk until
* we hit 'MAX_CHUNK_SIZE'.
*/
_chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE);
} else {
/* align to 4K page */
dataspace_size = align_addr(dataspace_size, 12);
}
_allocate_dataspace(dataspace_size, false);
/* allocate originally requested block */
return _try_local_alloc(size, out_addr);
}
}
bool Heap::alloc(size_t size, void **out_addr)
{
/* serialize access of heap functions */
@ -101,34 +194,7 @@ bool Heap::alloc(size_t size, void **out_addr)
if (size + _quota_used > _quota_limit)
return false;
/* try allocation at our local allocator */
if (_try_local_alloc(size, out_addr))
return true;
/*
* Calculate block size of needed backing store. The block must hold the
* requested 'size' and a new Dataspace structure if the allocation above
* failed. Finally, we align the size to a 4K page.
*/
size_t request_size = size + 1024;
if (request_size < _chunk_size*sizeof(umword_t)) {
request_size = _chunk_size*sizeof(umword_t);
/*
* Exponentially increase chunk size with each allocated chunk until
* we hit 'MAX_CHUNK_SIZE'.
*/
_chunk_size = min(2*_chunk_size, (size_t)MAX_CHUNK_SIZE);
}
if (_ds_pool.expand(align_addr(request_size, 12), &_alloc) < 0) {
PWRN("could not expand dataspace pool");
return 0;
}
/* allocate originally requested block */
return _try_local_alloc(size, out_addr);
return _unsynchronized_alloc(size, out_addr);
}
@ -137,13 +203,33 @@ void Heap::free(void *addr, size_t size)
/* serialize access of heap functions */
Lock::Guard lock_guard(_lock);
/* forward request to our local allocator */
_alloc.free(addr, size);
if (size >= BIG_ALLOCATION_THRESHOLD) {
_quota_used -= size;
Heap::Dataspace *ds;
/*
* We could check for completely unused dataspaces...
* Yes, we could...
*/
for (ds = _ds_pool.first(); ds; ds = ds->next())
if (((addr_t)addr >= (addr_t)ds->local_addr) &&
((addr_t)addr <= (addr_t)ds->local_addr + ds->size - 1))
break;
_ds_pool.remove(ds);
_ds_pool.rm_session->detach(ds->local_addr);
_ds_pool.ram_session->free(ds->cap);
_quota_used -= ds->size;
/* have the destructor of the 'cap' member called */
delete ds;
_alloc.free(ds);
} else {
/*
* forward request to our local allocator
*/
_alloc.free(addr, size);
_quota_used -= size;
}
}