genode/repos/base/src/core/ram_session_component.cc
Christian Prochaska 0fd53c7fe4 Extract numeric string arguments with the correct signedness
There are lots of places where a numeric argument of an argument string
gets extraced as signed long value and then assigned to an unsigned long
variable. If the value in the string was negative, it would not be
detected as invalid (and replaced by the default value), but become a
positive bogus value.

With this patch, numeric values which are supposed to be unsigned get
extracted with the 'ulong_value()' function, which returns the default
value for negative numbers.

Fixes #1472
2015-04-13 14:18:15 +02:00

323 lines
9.0 KiB
C++

/*
* \brief Core implementation of the RAM session interface
* \author Norman Feske
* \date 2006-05-19
*/
/*
* Copyright (C) 2006-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* Genode includes */
#include <base/printf.h>
#include <util/arg_string.h>
/* core includes */
#include <ram_session_component.h>
using namespace Genode;
static const bool verbose = false;
addr_t Ram_session_component::phys_addr(Ram_dataspace_capability ds)
{
Object_pool<Dataspace_component>::Guard dsc(_ds_ep->lookup_and_lock(ds));
if (!dsc) throw Invalid_dataspace();
return dsc->phys_addr();
}
void Ram_session_component::_free_ds(Dataspace_component *ds)
{
if (!ds) return;
if (!ds->owner(this)) return;
size_t ds_size = ds->size();
/* tell entry point to forget the dataspace */
_ds_ep->dissolve(ds);
/* destroy native shared memory representation */
_revoke_ram_ds(ds);
/* XXX: remove dataspace from all RM sessions */
/* free physical memory that was backing the dataspace */
_ram_alloc->free((void *)ds->phys_addr(), ds_size);
/* call dataspace destructors and free memory */
destroy(&_ds_slab, ds);
/* adjust payload */
Lock::Guard lock_guard(_ref_members_lock);
_payload -= ds_size;
}
int Ram_session_component::_transfer_quota(Ram_session_component *dst, size_t amount)
{
/* check if recipient is a valid Ram_session_component */
if (!dst) return -1;
/* check for reference account relationship */
if ((ref_account() != dst) && (dst->ref_account() != this))
return -2;
/* decrease quota limit of this session - check against used quota */
if (_quota_limit < amount + _payload) {
PWRN("Insufficient quota for transfer: %s", _label);
PWRN(" have %zu, need %zu", _quota_limit - _payload, amount);
return -3;
}
_quota_limit -= amount;
/* increase quota_limit of recipient */
dst->_quota_limit += amount;
return 0;
}
void Ram_session_component::_register_ref_account_member(Ram_session_component *new_member)
{
Lock::Guard lock_guard(_ref_members_lock);
_ref_members.insert(new_member);
new_member->_ref_account = this;
}
void Ram_session_component::_unsynchronized_remove_ref_account_member(Ram_session_component *member)
{
member->_ref_account = 0;
_ref_members.remove(member);
}
void Ram_session_component::_remove_ref_account_member(Ram_session_component *member)
{
Lock::Guard lock_guard(_ref_members_lock);
_unsynchronized_remove_ref_account_member(member);
}
Ram_dataspace_capability Ram_session_component::alloc(size_t ds_size, Cache_attribute cached)
{
/* zero-sized dataspaces are not allowed */
if (!ds_size) return Ram_dataspace_capability();
/* dataspace allocation granularity is page size */
ds_size = align_addr(ds_size, 12);
/*
* Check quota!
*
* In the worst case, we need to allocate a new slab block for the
* meta data of the dataspace to be created - therefore, we add
* the slab block size here.
*/
if (used_quota() + SBS + ds_size > _quota_limit) {
if (verbose) {
PWRN("Quota exceeded: %s", _label);
PWRN(" memory for slab: %zu", _ds_slab.consumed());
PWRN(" used quota: %zu", used_quota());
PWRN(" ds_size: %zu", ds_size);
PWRN(" sizeof(Ram_session_component): %zu", sizeof(Ram_session_component));
PWRN(" quota_limit: %zu", _quota_limit);
}
throw Quota_exceeded();
}
/*
* Allocate physical backing store
*
* As an optimization for the use of large mapping sizes, we try to
* align the dataspace in physical memory naturally (size-aligned).
* If this does not work, we subsequently weaken the alignment constraint
* until the allocation succeeds.
*/
void *ds_addr = 0;
bool alloc_succeeded = false;
for (size_t align_log2 = log2(ds_size); align_log2 >= 12; align_log2--) {
if (_ram_alloc->alloc_aligned(ds_size, &ds_addr, align_log2,
_phys_start, _phys_end).is_ok()) {
alloc_succeeded = true;
break;
}
}
/*
* Normally, init's quota equals the size of physical memory and this quota
* is distributed among the processes. As we check the quota before
* allocating, the allocation should always succeed in theory. However,
* fragmentation could cause a failing allocation.
*/
if (!alloc_succeeded) {
PERR("We ran out of physical memory while allocating %zu bytes", ds_size);
throw Quota_exceeded();
}
Dataspace_component *ds;
try {
/*
* For non-cached RAM dataspaces, we mark the dataspace as write
* combined and expect the pager to evaluate this dataspace property
* when resolving page faults.
*/
ds = new (&_ds_slab)
Dataspace_component(ds_size, (addr_t)ds_addr, cached, true, this);
} catch (Allocator::Out_of_memory) {
PWRN("Could not allocate metadata");
/* cleanup unneeded resources */
_ram_alloc->free(ds_addr);
throw Out_of_metadata();
}
/*
* Fill new dataspaces with zeros. For non-cached RAM dataspaces, this
* function must also make sure to flush all cache lines related to the
* address range used by the dataspace.
*/
_clear_ds(ds);
/* create native shared memory representation of dataspace */
try {
_export_ram_ds(ds);
} catch (Out_of_metadata) {
PWRN("could not export RAM dataspace of size 0x%zx", ds->size());
/* cleanup unneeded resources */
destroy(&_ds_slab, ds);
_ram_alloc->free(ds_addr);
throw Quota_exceeded();
}
if (verbose)
PDBG("ds_size=%zu, used_quota=%zu quota_limit=%zu",
ds_size, used_quota(), _quota_limit);
Dataspace_capability result = _ds_ep->manage(ds);
Lock::Guard lock_guard(_ref_members_lock);
/* keep track of the used quota for actual payload */
_payload += ds_size;
return static_cap_cast<Ram_dataspace>(result);
}
void Ram_session_component::free(Ram_dataspace_capability ds_cap)
{
Dataspace_component * ds =
dynamic_cast<Dataspace_component *>(_ds_ep->lookup_and_lock(ds_cap));
if (!ds)
return;
_free_ds(ds);
}
int Ram_session_component::ref_account(Ram_session_capability ram_session_cap)
{
/* the reference account cannot be defined twice */
if (_ref_account) return -2;
Object_pool<Ram_session_component>::Guard ref(_ram_session_ep->lookup_and_lock(ram_session_cap));
/* check if recipient is a valid Ram_session_component */
if (!ref) return -1;
/* deny the usage of the ram session as its own ref account */
/* XXX also check for cycles along the tree of ref accounts */
if (ref == this) return -3;
_ref_account = ref;
_ref_account->_register_ref_account_member(this);
return 0;
}
int Ram_session_component::transfer_quota(Ram_session_capability ram_session_cap,
size_t amount)
{
if (verbose)
PDBG("amount=%zu", amount);
Object_pool<Ram_session_component>::Guard dst(_ram_session_ep->lookup_and_lock(ram_session_cap));
return _transfer_quota(dst, amount);
}
Ram_session_component::Ram_session_component(Rpc_entrypoint *ds_ep,
Rpc_entrypoint *ram_session_ep,
Range_allocator *ram_alloc,
Allocator *md_alloc,
const char *args,
size_t quota_limit)
:
_ds_ep(ds_ep), _ram_session_ep(ram_session_ep), _ram_alloc(ram_alloc),
_quota_limit(quota_limit), _payload(0),
_md_alloc(md_alloc, Arg_string::find_arg(args, "ram_quota").ulong_value(0)),
_ds_slab(&_md_alloc), _ref_account(0),
_phys_start(Arg_string::find_arg(args, "phys_start").ulong_value(0))
{
Arg_string::find_arg(args, "label").string(_label, sizeof(_label), "");
size_t phys_size = Arg_string::find_arg(args, "phys_size").ulong_value(0);
/* sanitize overflow and interpret phys_size==0 as maximum phys address */
if (_phys_start + phys_size <= _phys_start)
_phys_end = ~0UL;
else
_phys_end = _phys_start + phys_size - 1;
}
Ram_session_component::~Ram_session_component()
{
/* destroy all dataspaces */
for (Dataspace_component *ds; (ds = _ds_slab.raw()->first_object()); _free_ds(ds));
if (_payload != 0)
PWRN("Remaining payload of %zu in ram session to destroy", _payload);
if (!_ref_account) return;
/* transfer remaining quota to reference account */
_transfer_quota(_ref_account, _quota_limit);
/* remember our original reference account */
Ram_session_component *orig_ref_account = _ref_account;
/* remove reference to us from the reference account */
_ref_account->_remove_ref_account_member(this);
/*
* Now, the '_ref_account' member has become invalid.
*/
Lock::Guard lock_guard(_ref_members_lock);
/* assign all sub accounts to our original reference account */
for (Ram_session_component *rsc; (rsc = _ref_members.first()); ) {
_unsynchronized_remove_ref_account_member(rsc);
/*
* This function grabs the '_ref_account_lock' of the '_ref_account',
* which is never identical to ourself. Hence, deadlock cannot happen
* here.
*/
orig_ref_account->_register_ref_account_member(rsc);
}
_ref_account = 0;
}