heap: release ds pool meta data when destructed

This patch makes sure that the dataspace pool is flushed before
destructing the heap-local allocator-avl instance. With the original
destruction order, the allocator would still contain dangling
allocations on the account of the dataspace pool when destructed. In
practice, this caused no problem because the underlying backing store is
eventually freed on the destruction of the pool. But it triggers a
runtime warning of the allocator since it has become more strict with
regard to dangling allocations.
This commit is contained in:
Norman Feske 2016-04-05 11:58:24 +02:00 committed by Christian Helmuth
parent 357dbdd64b
commit 8971bb25ce
2 changed files with 57 additions and 38 deletions

View File

@ -15,6 +15,7 @@
#define _INCLUDE__BASE__HEAP_H_
#include <util/list.h>
#include <util/volatile_object.h>
#include <ram_session/ram_session.h>
#include <rm_session/rm_session.h>
#include <base/allocator_avl.h>
@ -64,10 +65,6 @@ class Genode::Heap : public Allocator
Dataspace(Ram_dataspace_capability c, void *local_addr, size_t size)
: cap(c), local_addr(local_addr), size(size) { }
inline void * operator new(Genode::size_t, void* addr) {
return addr; }
inline void operator delete(void*) { }
};
/*
@ -77,31 +74,23 @@ class Genode::Heap : public Allocator
struct Dataspace_pool : public List<Dataspace>
{
Ram_session *ram_session; /* RAM session for backing store */
Rm_session *rm_session; /* region manager */
Rm_session *rm_session;
Dataspace_pool(Ram_session *ram_session, Rm_session *rm_session)
: ram_session(ram_session), rm_session(rm_session) { }
Dataspace_pool(Ram_session *ram, Rm_session *rm)
: ram_session(ram), rm_session(rm) { }
/**
* Destructor
*/
~Dataspace_pool();
void reassign_resources(Ram_session *ram, Rm_session *rm) {
ram_session = ram, rm_session = rm; }
};
/*
* NOTE: The order of the member variables is important for
* the calling order of the destructors!
*/
Lock _lock;
Dataspace_pool _ds_pool; /* list of dataspaces */
Allocator_avl _alloc; /* local allocator */
size_t _quota_limit;
size_t _quota_used;
size_t _chunk_size;
Lock _lock;
Volatile_object<Allocator_avl> _alloc; /* local allocator */
Dataspace_pool _ds_pool; /* list of dataspaces */
size_t _quota_limit;
size_t _quota_used;
size_t _chunk_size;
/**
* Allocate a new dataspace of the specified size
@ -140,15 +129,17 @@ class Genode::Heap : public Allocator
void *static_addr = 0,
size_t static_size = 0)
:
_alloc(nullptr),
_ds_pool(ram_session, rm_session),
_alloc(0),
_quota_limit(quota_limit), _quota_used(0),
_chunk_size(MIN_CHUNK_SIZE)
{
if (static_addr)
_alloc.add_range((addr_t)static_addr, static_size);
_alloc->add_range((addr_t)static_addr, static_size);
}
~Heap();
/**
* Reconfigure quota limit
*
@ -171,7 +162,7 @@ class Genode::Heap : public Allocator
bool alloc(size_t, void **) override;
void free(void *, size_t) override;
size_t consumed() const override { return _quota_used; }
size_t overhead(size_t size) const override { return _alloc.overhead(size); }
size_t overhead(size_t size) const override { return _alloc->overhead(size); }
bool need_size_for_free() const override { return false; }
};

View File

@ -11,9 +11,9 @@
* under the terms of the GNU General Public License version 2.
*/
#include <util/construct_at.h>
#include <base/env.h>
#include <base/printf.h>
#include <rm_session/rm_session.h>
#include <base/heap.h>
#include <base/lock.h>
@ -36,8 +36,13 @@ Heap::Dataspace_pool::~Dataspace_pool()
remove(ds);
/* have the destructor of the 'cap' member called */
delete ds;
/*
* Call 'Dataspace' destructor to properly release the RAM dataspace
* capabilities. Note that we don't free the 'Dataspace' object at the
* local allocator because this is already done by the 'Heap'
* destructor prior executing the 'Dataspace_pool' destructor.
*/
ds->~Dataspace();
rm_session->detach(ds_local_addr);
ram_session->free(ds_cap);
@ -84,17 +89,17 @@ Heap::Dataspace *Heap::_allocate_dataspace(size_t size, bool enforce_separate_me
} else {
/* add new local address range to our local allocator */
_alloc.add_range((addr_t)ds_addr, size);
_alloc->add_range((addr_t)ds_addr, size);
/* allocate the Dataspace structure */
if (_alloc.alloc_aligned(sizeof(Heap::Dataspace), &ds_meta_data_addr, log2(sizeof(addr_t))).is_error()) {
if (_alloc->alloc_aligned(sizeof(Heap::Dataspace), &ds_meta_data_addr, log2(sizeof(addr_t))).is_error()) {
PWRN("could not allocate dataspace meta data - this should never happen");
return 0;
}
}
ds = new (ds_meta_data_addr) Heap::Dataspace(new_ds_cap, ds_addr, size);
ds = construct_at<Dataspace>(ds_meta_data_addr, new_ds_cap, ds_addr, size);
_ds_pool.insert(ds);
@ -104,7 +109,7 @@ Heap::Dataspace *Heap::_allocate_dataspace(size_t size, bool enforce_separate_me
bool Heap::_try_local_alloc(size_t size, void **out_addr)
{
if (_alloc.alloc_aligned(size, out_addr, log2(sizeof(addr_t))).is_error())
if (_alloc->alloc_aligned(size, out_addr, log2(sizeof(addr_t))).is_error())
return false;
_quota_used += size;
@ -215,18 +220,41 @@ void Heap::free(void *addr, size_t size)
_quota_used -= ds->size;
/* have the destructor of the 'cap' member called */
delete ds;
_alloc.free(ds);
destroy(*_alloc, ds);
} else {
/*
* forward request to our local allocator
*/
_alloc.free(addr, size);
* forward request to our local allocator
*/
_alloc->free(addr, size);
_quota_used -= size;
}
}
Heap::~Heap()
{
/*
* Revert allocations of heap-internal 'Dataspace' objects. Otherwise, the
* subsequent destruction of the 'Allocator_avl' would detect those blocks
* as dangling allocations.
*
* Since no new allocations can occur at the destruction time of the
* 'Heap', it is safe to release the 'Dataspace' objects at the allocator
* yet still access them afterwards during the destruction of the
* 'Allocator_avl'.
*/
for (Heap::Dataspace *ds = _ds_pool.first(); ds; ds = ds->next())
_alloc->free(ds, sizeof(Dataspace));
/*
* Destruct 'Allocator_avl' before destructing the dataspace pool. This
* order is important because some dataspaces of the dataspace pool are
* used as backing store for the allocator's meta data. If we destroyed
* the object pool before the allocator, the subsequent attempt to destruct
* the allocator would access no-longer-present backing store.
*/
_alloc.destruct();
}