2011-12-22 16:19:25 +01:00
|
|
|
/*
|
|
|
|
* \brief Core implementation of the RAM session interface
|
|
|
|
* \author Norman Feske
|
|
|
|
* \date 2006-05-19
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2013-01-10 21:44:47 +01:00
|
|
|
* Copyright (C) 2006-2013 Genode Labs GmbH
|
2011-12-22 16:19:25 +01:00
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
|
|
* under the terms of the GNU General Public License version 2.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Genode includes */
|
|
|
|
#include <base/printf.h>
|
|
|
|
#include <util/arg_string.h>
|
|
|
|
|
|
|
|
/* core includes */
|
|
|
|
#include <ram_session_component.h>
|
|
|
|
|
|
|
|
using namespace Genode;
|
|
|
|
|
|
|
|
|
|
|
|
static const bool verbose = false;
|
|
|
|
|
|
|
|
|
2012-05-28 16:57:09 +02:00
|
|
|
addr_t Ram_session_component::phys_addr(Ram_dataspace_capability ds)
|
|
|
|
{
|
2012-12-14 10:03:55 +01:00
|
|
|
Object_pool<Dataspace_component>::Guard dsc(_ds_ep->lookup_and_lock(ds));
|
2012-05-28 16:57:09 +02:00
|
|
|
if (!dsc) throw Invalid_dataspace();
|
|
|
|
return dsc->phys_addr();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2011-12-22 16:19:25 +01:00
|
|
|
void Ram_session_component::_free_ds(Dataspace_component *ds)
|
|
|
|
{
|
|
|
|
if (!ds) return;
|
2012-04-20 10:59:41 +02:00
|
|
|
if (!ds->owner(this)) return;
|
2011-12-22 16:19:25 +01:00
|
|
|
|
|
|
|
size_t ds_size = ds->size();
|
|
|
|
|
|
|
|
/* tell entry point to forget the dataspace */
|
|
|
|
_ds_ep->dissolve(ds);
|
|
|
|
|
2013-01-18 15:10:56 +01:00
|
|
|
/* destroy native shared memory representation */
|
|
|
|
_revoke_ram_ds(ds);
|
|
|
|
|
2011-12-22 16:19:25 +01:00
|
|
|
/* XXX: remove dataspace from all RM sessions */
|
|
|
|
|
|
|
|
/* free physical memory that was backing the dataspace */
|
|
|
|
_ram_alloc->free((void *)ds->phys_addr(), ds_size);
|
|
|
|
|
|
|
|
/* call dataspace destructors and free memory */
|
|
|
|
destroy(&_ds_slab, ds);
|
|
|
|
|
|
|
|
/* adjust payload */
|
2013-01-18 15:10:56 +01:00
|
|
|
Lock::Guard lock_guard(_ref_members_lock);
|
2011-12-22 16:19:25 +01:00
|
|
|
_payload -= ds_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int Ram_session_component::_transfer_quota(Ram_session_component *dst, size_t amount)
|
|
|
|
{
|
|
|
|
/* check if recipient is a valid Ram_session_component */
|
|
|
|
if (!dst) return -1;
|
|
|
|
|
|
|
|
/* check for reference account relationship */
|
|
|
|
if ((ref_account() != dst) && (dst->ref_account() != this))
|
2013-09-28 19:25:25 +02:00
|
|
|
return -2;
|
2011-12-22 16:19:25 +01:00
|
|
|
|
|
|
|
/* decrease quota limit of this session - check against used quota */
|
|
|
|
if (_quota_limit < amount + _payload) {
|
|
|
|
PWRN("Insufficient quota for transfer: %s", _label);
|
2013-12-20 10:33:56 +01:00
|
|
|
PWRN(" have %zu, need %zu", _quota_limit - _payload, amount);
|
2011-12-22 16:19:25 +01:00
|
|
|
return -3;
|
|
|
|
}
|
|
|
|
|
|
|
|
_quota_limit -= amount;
|
|
|
|
|
|
|
|
/* increase quota_limit of recipient */
|
|
|
|
dst->_quota_limit += amount;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Ram_session_component::_register_ref_account_member(Ram_session_component *new_member)
|
|
|
|
{
|
|
|
|
Lock::Guard lock_guard(_ref_members_lock);
|
|
|
|
_ref_members.insert(new_member);
|
|
|
|
new_member->_ref_account = this;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Ram_session_component::_unsynchronized_remove_ref_account_member(Ram_session_component *member)
|
|
|
|
{
|
|
|
|
member->_ref_account = 0;
|
|
|
|
_ref_members.remove(member);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Ram_session_component::_remove_ref_account_member(Ram_session_component *member)
|
|
|
|
{
|
|
|
|
Lock::Guard lock_guard(_ref_members_lock);
|
|
|
|
_unsynchronized_remove_ref_account_member(member);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-06-19 16:37:31 +02:00
|
|
|
Ram_dataspace_capability Ram_session_component::alloc(size_t ds_size, Cache_attribute cached)
|
2011-12-22 16:19:25 +01:00
|
|
|
{
|
|
|
|
/* zero-sized dataspaces are not allowed */
|
|
|
|
if (!ds_size) return Ram_dataspace_capability();
|
|
|
|
|
|
|
|
/* dataspace allocation granularity is page size */
|
|
|
|
ds_size = align_addr(ds_size, 12);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check quota!
|
|
|
|
*
|
|
|
|
* In the worst case, we need to allocate a new slab block for the
|
|
|
|
* meta data of the dataspace to be created - therefore, we add
|
|
|
|
* the slab block size here.
|
|
|
|
*/
|
2013-10-08 10:38:15 +02:00
|
|
|
if (used_quota() + SBS + ds_size > _quota_limit) {
|
2011-12-22 16:19:25 +01:00
|
|
|
|
2014-02-13 14:07:04 +01:00
|
|
|
if (verbose) {
|
|
|
|
PWRN("Quota exceeded: %s", _label);
|
|
|
|
PWRN(" memory for slab: %zu", _ds_slab.consumed());
|
|
|
|
PWRN(" used quota: %zu", used_quota());
|
|
|
|
PWRN(" ds_size: %zu", ds_size);
|
|
|
|
PWRN(" sizeof(Ram_session_component): %zu", sizeof(Ram_session_component));
|
|
|
|
PWRN(" quota_limit: %zu", _quota_limit);
|
|
|
|
}
|
2011-12-22 16:19:25 +01:00
|
|
|
|
|
|
|
throw Quota_exceeded();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate physical backing store
|
|
|
|
*
|
|
|
|
* As an optimization for the use of large mapping sizes, we try to
|
|
|
|
* align the dataspace in physical memory naturally (size-aligned).
|
|
|
|
* If this does not work, we subsequently weaken the alignment constraint
|
|
|
|
* until the allocation succeeds.
|
|
|
|
*/
|
|
|
|
void *ds_addr = 0;
|
|
|
|
bool alloc_succeeded = false;
|
|
|
|
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
|
2015-02-07 21:02:50 +01:00
|
|
|
if (_ram_alloc->alloc_aligned(ds_size, &ds_addr, align_log2,
|
|
|
|
_phys_start, _phys_end).is_ok()) {
|
2011-12-22 16:19:25 +01:00
|
|
|
alloc_succeeded = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Normally, init's quota equals the size of physical memory and this quota
|
|
|
|
* is distributed among the processes. As we check the quota before
|
|
|
|
* allocating, the allocation should always succeed in theory. However,
|
|
|
|
* fragmentation could cause a failing allocation.
|
|
|
|
*/
|
|
|
|
if (!alloc_succeeded) {
|
2013-12-20 10:33:56 +01:00
|
|
|
PERR("We ran out of physical memory while allocating %zu bytes", ds_size);
|
2011-12-22 16:19:25 +01:00
|
|
|
throw Quota_exceeded();
|
|
|
|
}
|
|
|
|
|
|
|
|
Dataspace_component *ds;
|
|
|
|
try {
|
2012-06-18 15:20:31 +02:00
|
|
|
/*
|
|
|
|
* For non-cached RAM dataspaces, we mark the dataspace as write
|
|
|
|
* combined and expect the pager to evaluate this dataspace property
|
|
|
|
* when resolving page faults.
|
|
|
|
*/
|
|
|
|
ds = new (&_ds_slab)
|
2014-06-19 16:37:31 +02:00
|
|
|
Dataspace_component(ds_size, (addr_t)ds_addr, cached, true, this);
|
2011-12-22 16:19:25 +01:00
|
|
|
} catch (Allocator::Out_of_memory) {
|
|
|
|
PWRN("Could not allocate metadata");
|
2013-12-20 14:31:52 +01:00
|
|
|
/* cleanup unneeded resources */
|
|
|
|
_ram_alloc->free(ds_addr);
|
|
|
|
|
2011-12-22 16:19:25 +01:00
|
|
|
throw Out_of_metadata();
|
|
|
|
}
|
|
|
|
|
2012-06-18 15:20:31 +02:00
|
|
|
/*
|
|
|
|
* Fill new dataspaces with zeros. For non-cached RAM dataspaces, this
|
|
|
|
* function must also make sure to flush all cache lines related to the
|
|
|
|
* address range used by the dataspace.
|
|
|
|
*/
|
2011-12-22 16:19:25 +01:00
|
|
|
_clear_ds(ds);
|
|
|
|
|
2013-12-20 14:31:52 +01:00
|
|
|
/* create native shared memory representation of dataspace */
|
|
|
|
try {
|
|
|
|
_export_ram_ds(ds);
|
|
|
|
} catch (Out_of_metadata) {
|
|
|
|
PWRN("could not export RAM dataspace of size 0x%zx", ds->size());
|
|
|
|
/* cleanup unneeded resources */
|
|
|
|
destroy(&_ds_slab, ds);
|
|
|
|
_ram_alloc->free(ds_addr);
|
|
|
|
|
|
|
|
throw Quota_exceeded();
|
|
|
|
}
|
|
|
|
|
2011-12-22 16:19:25 +01:00
|
|
|
if (verbose)
|
2013-12-20 10:33:56 +01:00
|
|
|
PDBG("ds_size=%zu, used_quota=%zu quota_limit=%zu",
|
2011-12-22 16:19:25 +01:00
|
|
|
ds_size, used_quota(), _quota_limit);
|
|
|
|
|
|
|
|
Dataspace_capability result = _ds_ep->manage(ds);
|
|
|
|
|
2013-01-18 15:10:56 +01:00
|
|
|
Lock::Guard lock_guard(_ref_members_lock);
|
|
|
|
/* keep track of the used quota for actual payload */
|
|
|
|
_payload += ds_size;
|
|
|
|
|
2011-12-22 16:19:25 +01:00
|
|
|
return static_cap_cast<Ram_dataspace>(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Ram_session_component::free(Ram_dataspace_capability ds_cap)
|
|
|
|
{
|
2012-12-14 10:03:55 +01:00
|
|
|
Dataspace_component * ds =
|
|
|
|
dynamic_cast<Dataspace_component *>(_ds_ep->lookup_and_lock(ds_cap));
|
|
|
|
if (!ds)
|
|
|
|
return;
|
|
|
|
|
|
|
|
_free_ds(ds);
|
2011-12-22 16:19:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int Ram_session_component::ref_account(Ram_session_capability ram_session_cap)
|
|
|
|
{
|
|
|
|
/* the reference account cannot be defined twice */
|
|
|
|
if (_ref_account) return -2;
|
|
|
|
|
2012-12-14 10:03:55 +01:00
|
|
|
Object_pool<Ram_session_component>::Guard ref(_ram_session_ep->lookup_and_lock(ram_session_cap));
|
2011-12-22 16:19:25 +01:00
|
|
|
|
|
|
|
/* check if recipient is a valid Ram_session_component */
|
|
|
|
if (!ref) return -1;
|
|
|
|
|
|
|
|
/* deny the usage of the ram session as its own ref account */
|
|
|
|
/* XXX also check for cycles along the tree of ref accounts */
|
|
|
|
if (ref == this) return -3;
|
|
|
|
|
|
|
|
_ref_account = ref;
|
|
|
|
_ref_account->_register_ref_account_member(this);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int Ram_session_component::transfer_quota(Ram_session_capability ram_session_cap,
|
|
|
|
size_t amount)
|
|
|
|
{
|
|
|
|
if (verbose)
|
2013-12-20 10:33:56 +01:00
|
|
|
PDBG("amount=%zu", amount);
|
2011-12-22 16:19:25 +01:00
|
|
|
|
2012-12-14 10:03:55 +01:00
|
|
|
Object_pool<Ram_session_component>::Guard dst(_ram_session_ep->lookup_and_lock(ram_session_cap));
|
2011-12-22 16:19:25 +01:00
|
|
|
return _transfer_quota(dst, amount);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Ram_session_component::Ram_session_component(Rpc_entrypoint *ds_ep,
|
|
|
|
Rpc_entrypoint *ram_session_ep,
|
|
|
|
Range_allocator *ram_alloc,
|
|
|
|
Allocator *md_alloc,
|
|
|
|
const char *args,
|
|
|
|
size_t quota_limit)
|
|
|
|
:
|
|
|
|
_ds_ep(ds_ep), _ram_session_ep(ram_session_ep), _ram_alloc(ram_alloc),
|
|
|
|
_quota_limit(quota_limit), _payload(0),
|
2015-03-27 14:02:04 +01:00
|
|
|
_md_alloc(md_alloc, Arg_string::find_arg(args, "ram_quota").ulong_value(0)),
|
2015-02-07 21:02:50 +01:00
|
|
|
_ds_slab(&_md_alloc), _ref_account(0),
|
2015-03-27 14:02:04 +01:00
|
|
|
_phys_start(Arg_string::find_arg(args, "phys_start").ulong_value(0))
|
2011-12-22 16:19:25 +01:00
|
|
|
{
|
|
|
|
Arg_string::find_arg(args, "label").string(_label, sizeof(_label), "");
|
2015-02-07 21:02:50 +01:00
|
|
|
|
2015-03-27 14:02:04 +01:00
|
|
|
size_t phys_size = Arg_string::find_arg(args, "phys_size").ulong_value(0);
|
2015-02-07 21:02:50 +01:00
|
|
|
/* sanitize overflow and interpret phys_size==0 as maximum phys address */
|
|
|
|
if (_phys_start + phys_size <= _phys_start)
|
|
|
|
_phys_end = ~0UL;
|
|
|
|
else
|
|
|
|
_phys_end = _phys_start + phys_size - 1;
|
2011-12-22 16:19:25 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Ram_session_component::~Ram_session_component()
|
|
|
|
{
|
|
|
|
/* destroy all dataspaces */
|
2013-01-18 15:10:56 +01:00
|
|
|
for (Dataspace_component *ds; (ds = _ds_slab.raw()->first_object()); _free_ds(ds));
|
2011-12-22 16:19:25 +01:00
|
|
|
|
|
|
|
if (_payload != 0)
|
2013-12-20 10:33:56 +01:00
|
|
|
PWRN("Remaining payload of %zu in ram session to destroy", _payload);
|
2011-12-22 16:19:25 +01:00
|
|
|
|
|
|
|
if (!_ref_account) return;
|
|
|
|
|
|
|
|
/* transfer remaining quota to reference account */
|
|
|
|
_transfer_quota(_ref_account, _quota_limit);
|
|
|
|
|
|
|
|
/* remember our original reference account */
|
|
|
|
Ram_session_component *orig_ref_account = _ref_account;
|
|
|
|
|
|
|
|
/* remove reference to us from the reference account */
|
|
|
|
_ref_account->_remove_ref_account_member(this);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now, the '_ref_account' member has become invalid.
|
|
|
|
*/
|
|
|
|
|
|
|
|
Lock::Guard lock_guard(_ref_members_lock);
|
|
|
|
|
|
|
|
/* assign all sub accounts to our original reference account */
|
|
|
|
for (Ram_session_component *rsc; (rsc = _ref_members.first()); ) {
|
|
|
|
|
|
|
|
_unsynchronized_remove_ref_account_member(rsc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function grabs the '_ref_account_lock' of the '_ref_account',
|
|
|
|
* which is never identical to ourself. Hence, deadlock cannot happen
|
|
|
|
* here.
|
|
|
|
*/
|
|
|
|
orig_ref_account->_register_ref_account_member(rsc);
|
|
|
|
}
|
|
|
|
|
|
|
|
_ref_account = 0;
|
|
|
|
}
|