base: redesign object pool using lambda interface

Instead of returning pointers to locked objects via a lookup function,
the new object pool implementation restricts object access to
functors resp. lambda expressions that are applied to the objects
within the pool itself.

Fix #884
Fix #1658
This commit is contained in:
Stefan Kalkowski 2015-08-10 13:34:16 +02:00 committed by Christian Helmuth
parent 555835c95b
commit 458b4d6fc4
66 changed files with 1616 additions and 1709 deletions

View File

@ -30,39 +30,41 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
{
using namespace Codezero;
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(ds_cap));
if (!ds)
throw Invalid_dataspace();
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
if (!ds)
throw Invalid_dataspace();
if (size == 0)
size = ds->size();
if (size == 0)
size = ds->size();
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
size_t num_pages = page_rounded_size >> get_page_size_log2();
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
size_t num_pages = page_rounded_size >> get_page_size_log2();
if (use_local_addr) {
PERR("Parameter 'use_local_addr' not supported within core");
return 0;
}
if (use_local_addr) {
PERR("Parameter 'use_local_addr' not supported within core");
return nullptr;
}
if (offset) {
PERR("Parameter 'offset' not supported within core");
return 0;
}
if (offset) {
PERR("Parameter 'offset' not supported within core");
return nullptr;
}
/* allocate range in core's virtual address space */
void *virt_addr;
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
PERR("Could not allocate virtual address range in core of size %zd\n",
page_rounded_size);
return false;
}
/* allocate range in core's virtual address space */
void *virt_addr;
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
PERR("Could not allocate virtual address range in core of size %zd\n",
page_rounded_size);
return nullptr;
}
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages)) {
PERR("core-local memory mapping failed virt=%lx, phys=%lx\n",
(addr_t)virt_addr, ds->phys_addr());
return 0;
}
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages)) {
PERR("core-local memory mapping failed virt=%lx, phys=%lx\n",
(addr_t)virt_addr, ds->phys_addr());
return nullptr;
}
return virt_addr;
return virt_addr;
};
return _ds_ep->apply(ds_cap, lambda);
}

View File

@ -176,7 +176,7 @@ void Ipc_pager::acknowledge_wakeup()
** Pager entrypoint **
**********************/
Untyped_capability Pager_entrypoint::_manage(Pager_object *obj)
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
{
return Untyped_capability(_tid.l4id, obj->badge());
return Untyped_capability(_tid.l4id, badge);
}

View File

@ -78,7 +78,7 @@ void Ipc_pager::acknowledge_wakeup()
** Pager Entrypoint **
**********************/
Untyped_capability Pager_entrypoint::_manage(Pager_object *obj)
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
{
return Untyped_capability(_tid.l4id, obj->badge());
return Untyped_capability(_tid.l4id, badge);
}

View File

@ -70,24 +70,13 @@ void Rpc_entrypoint::entry()
continue;
}
/* atomically lookup and lock referenced object */
Object_pool<Rpc_object_base>::Guard curr_obj(lookup_and_lock(srv.badge()));
if (!curr_obj)
continue;
apply(srv.badge(), [&] (Rpc_object_base *obj) {
if (!obj) return;
{
Lock::Guard lock_guard(_curr_obj_lock);
_curr_obj = curr_obj;
}
/* dispatch request */
try { srv.ret(_curr_obj->dispatch(opcode, srv, srv)); }
catch (Blocking_canceled) { }
{
Lock::Guard lock_guard(_curr_obj_lock);
_curr_obj = 0;
}
try {
srv.ret(obj->dispatch(opcode, srv, srv));
} catch(Blocking_canceled&) { }
});
}
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
@ -95,6 +84,4 @@ void Rpc_entrypoint::entry()
/* defer the destruction of 'Ipc_server' until '~Rpc_entrypoint' is ready */
_delay_exit.lock();
}

View File

@ -40,14 +40,16 @@ void Genode::Cpu_session_component::enable_vcpu(Genode::Thread_capability thread
using namespace Genode;
using namespace Fiasco;
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return;
auto lambda = [&] (Cpu_thread_component *thread) {
if (!thread) return;
Native_thread tid = thread->platform_thread()->thread().local.dst();
Native_thread tid = thread->platform_thread()->thread().local.dst();
l4_msgtag_t tag = l4_thread_vcpu_control(tid, vcpu_state);
if (l4_msgtag_has_error(tag))
PWRN("l4_thread_vcpu_control failed");
l4_msgtag_t tag = l4_thread_vcpu_control(tid, vcpu_state);
if (l4_msgtag_has_error(tag))
PWRN("l4_thread_vcpu_control failed");
};
_thread_ep->apply(thread_cap, lambda);
}
@ -56,10 +58,11 @@ Genode::Cpu_session_component::native_cap(Genode::Thread_capability cap)
{
using namespace Genode;
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(cap));
if (!thread) return Native_capability();
return thread->platform_thread()->thread().local;
auto lambda = [&] (Cpu_thread_component *thread) {
return (!thread) ? Native_capability()
: thread->platform_thread()->thread().local;
};
return _thread_ep->apply(cap, lambda);
}
@ -97,15 +100,17 @@ void Genode::Cpu_session_component::single_step(Genode::Thread_capability thread
{
using namespace Genode;
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return;
auto lambda = [&] (Cpu_thread_component *thread) {
if (!thread) return;
Native_thread tid = thread->platform_thread()->thread().local.dst();
Native_thread tid = thread->platform_thread()->thread().local.dst();
enum { THREAD_SINGLE_STEP = 0x40000 };
int flags = enable ? THREAD_SINGLE_STEP : 0;
enum { THREAD_SINGLE_STEP = 0x40000 };
int flags = enable ? THREAD_SINGLE_STEP : 0;
Fiasco::l4_thread_ex_regs(tid, ~0UL, ~0UL, flags);
Fiasco::l4_thread_ex_regs(tid, ~0UL, ~0UL, flags);
};
_thread_ep->apply(thread_cap, lambda);
}

View File

@ -42,96 +42,95 @@ void Pager_entrypoint::entry()
reply_pending = false;
/* lookup referenced object */
Object_pool<Pager_object>::Guard obj(lookup_and_lock(_pager.badge()));
apply(_pager.badge(), [&] (Pager_object *obj) {
/* the pager_object might be destroyed, while we got the message */
if (!obj) {
PWRN("No pager object found!");
return;
}
/* the pager_object might be destroyed, while we got the message */
if (!obj) {
PWRN("No pager object found!");
continue;
}
switch (_pager.msg_type()) {
case Ipc_pager::PAGEFAULT:
case Ipc_pager::EXCEPTION:
{
if (_pager.is_exception()) {
Lock::Guard guard(obj->state.lock);
_pager.get_regs(&obj->state);
obj->state.exceptions++;
obj->state.in_exception = true;
obj->submit_exception_signal();
return;
}
switch (_pager.msg_type()) {
/* handle request */
if (obj->pager(_pager)) {
/* could not resolv - leave thread in pagefault */
PDBG("Could not resolve pf=%p ip=%p",
(void*)_pager.fault_addr(), (void*)_pager.fault_ip());
} else {
_pager.set_reply_dst(obj->badge());
reply_pending = true;
return;
}
break;
}
case Ipc_pager::PAGEFAULT:
case Ipc_pager::EXCEPTION:
{
if (_pager.is_exception()) {
case Ipc_pager::WAKE_UP:
{
/*
* We got a request from one of cores region-manager sessions
* to answer the pending page fault of a resolved region-manager
* client, or to resume a previously paused thread. Hence, we
* have to send a reply to the specified thread and answer the
* call.
*/
/* send reply to the caller */
_pager.set_reply_dst(Native_thread());
_pager.acknowledge_wakeup();
{
Lock::Guard guard(obj->state.lock);
/* revert exception flag */
obj->state.in_exception = false;
/* set new register contents */
_pager.set_regs(obj->state);
}
/* send wake up message to requested thread */
_pager.set_reply_dst(obj->badge());
_pager.acknowledge_exception();
break;
}
/*
* Handle exceptions that are artificially generated by the pause
* function of the CPU service.
*/
case Ipc_pager::PAUSE:
{
Lock::Guard guard(obj->state.lock);
_pager.get_regs(&obj->state);
obj->state.exceptions++;
obj->state.in_exception = true;
obj->submit_exception_signal();
continue;
/*
* It might occur that the thread raises an exception,
* after it already got resumed by the cpu_session, in
* that case we unblock it immediately.
*/
if (!obj->state.paused) {
_pager.set_reply_dst(obj->badge());
reply_pending = true;
}
break;
}
/* handle request */
if (obj->pager(_pager)) {
/* could not resolv - leave thread in pagefault */
PDBG("Could not resolve pf=%p ip=%p",
(void*)_pager.fault_addr(), (void*)_pager.fault_ip());
} else {
_pager.set_reply_dst(obj->badge());
reply_pending = true;
continue;
}
break;
default:
PERR("Got unknown message type %x!", _pager.msg_type());
}
case Ipc_pager::WAKE_UP:
{
/*
* We got a request from one of cores region-manager sessions
* to answer the pending page fault of a resolved region-manager
* client, or to resume a previously paused thread. Hence, we
* have to send a reply to the specified thread and answer the
* call.
*/
/* send reply to the caller */
_pager.set_reply_dst(Native_thread());
_pager.acknowledge_wakeup();
{
Lock::Guard guard(obj->state.lock);
/* revert exception flag */
obj->state.in_exception = false;
/* set new register contents */
_pager.set_regs(obj->state);
}
/* send wake up message to requested thread */
_pager.set_reply_dst(obj->badge());
_pager.acknowledge_exception();
break;
}
/*
* Handle exceptions that are artificially generated by the pause
* function of the CPU service.
*/
case Ipc_pager::PAUSE:
{
Lock::Guard guard(obj->state.lock);
_pager.get_regs(&obj->state);
obj->state.exceptions++;
obj->state.in_exception = true;
/*
* It might occur that the thread raises an exception,
* after it already got resumed by the cpu_session, in
* that case we unblock it immediately.
*/
if (!obj->state.paused) {
_pager.set_reply_dst(obj->badge());
reply_pending = true;
}
break;
}
default:
PERR("Got unknown message type %x!", _pager.msg_type());
}
});
};
}
@ -141,7 +140,7 @@ void Pager_entrypoint::dissolve(Pager_object *obj)
/* cleanup at cap session */
_cap_session->free(obj->Object_pool<Pager_object>::Entry::cap());
remove_locked(obj);
remove(obj);
}

View File

@ -72,23 +72,13 @@ void Rpc_entrypoint::entry()
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
/* atomically lookup and lock referenced object */
Object_pool<Rpc_object_base>::Guard curr_obj(lookup_and_lock(srv.badge()));
if (!curr_obj)
continue;
apply(srv.badge(), [&] (Rpc_object_base *curr_obj) {
if (!curr_obj) return;
{
Lock::Guard lock_guard(_curr_obj_lock);
_curr_obj = curr_obj;
}
/* dispatch request */
try { srv.ret(_curr_obj->dispatch(opcode, srv, srv)); }
catch (Blocking_canceled) { }
{
Lock::Guard lock_guard(_curr_obj_lock);
_curr_obj = 0;
}
/* dispatch request */
try { srv.ret(curr_obj->dispatch(opcode, srv, srv)); }
catch (Blocking_canceled) { }
});
}
/* answer exit call, thereby wake up '~Rpc_entrypoint' */

View File

@ -28,42 +28,44 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
off_t offset, bool use_local_addr,
Rm_session::Local_addr, bool executable)
{
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(ds_cap));
if (!ds)
throw Invalid_dataspace();
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
if (!ds)
throw Invalid_dataspace();
if (size == 0)
size = ds->size();
if (size == 0)
size = ds->size();
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
if (use_local_addr) {
PERR("Parameter 'use_local_addr' not supported within core");
return 0UL;
}
if (use_local_addr) {
PERR("Parameter 'use_local_addr' not supported within core");
return nullptr;
}
if (offset) {
PERR("Parameter 'offset' not supported within core");
return 0UL;
}
if (offset) {
PERR("Parameter 'offset' not supported within core");
return nullptr;
}
/* allocate range in core's virtual address space */
void *virt_addr;
if (!platform()->region_alloc()->alloc_aligned(page_rounded_size,
&virt_addr,
get_page_size_log2()).is_ok()) {
PERR("Could not allocate virtual address range in core of size %zd\n",
page_rounded_size);
return false;
}
/* allocate range in core's virtual address space */
void *virt_addr;
if (!platform()->region_alloc()->alloc_aligned(page_rounded_size,
&virt_addr,
get_page_size_log2()).is_ok()) {
PERR("Could not allocate virtual address range in core of size %zd\n",
page_rounded_size);
return nullptr;
}
/* map the dataspace's physical pages to corresponding virtual addresses */
unsigned num_pages = page_rounded_size >> get_page_size_log2();
Page_flags const flags = Page_flags::apply_mapping(ds.object()->writable(),
ds.object()->cacheability(),
ds.object()->is_io_mem());
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages, flags))
return 0UL;
/* map the dataspace's physical pages to corresponding virtual addresses */
unsigned num_pages = page_rounded_size >> get_page_size_log2();
Page_flags const flags = Page_flags::apply_mapping(ds->writable(),
ds->cacheability(),
ds->is_io_mem());
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages, flags))
return nullptr;
return virt_addr;
return virt_addr;
};
return _ds_ep->apply(ds_cap, lambda);
}

View File

@ -25,10 +25,11 @@ Ram_dataspace_capability
Cpu_session_component::utcb(Thread_capability thread_cap)
{
/* look up requested UTCB dataspace */
Object_pool<Cpu_thread_component>::Guard
t(_thread_ep->lookup_and_lock(thread_cap));
if (!t) return Ram_dataspace_capability();
return t->platform_thread()->utcb();
auto lambda = [] (Cpu_thread_component *t) {
if (!t) return Ram_dataspace_capability();
return t->platform_thread()->utcb();
};
return _thread_ep->apply(thread_cap, lambda);
}

View File

@ -104,7 +104,7 @@ Pager_object::Pager_object(unsigned const badge, Affinity::Location)
void Pager_entrypoint::dissolve(Pager_object * const o)
{
remove_locked(o);
remove(o);
}

View File

@ -129,24 +129,26 @@ int Platform_thread::start(void * const ip, void * const sp)
if (_main_thread) {
/* lookup dataspace component for physical address */
Rpc_entrypoint * ep = core_env()->entrypoint();
Object_pool<Dataspace_component>::Guard dsc(ep->lookup_and_lock(_utcb));
if (!dsc) return -1;
auto lambda = [&] (Dataspace_component *dsc) {
if (!dsc) return -1;
/* lock the address space */
Locked_ptr<Address_space> locked_ptr(_address_space);
if (!locked_ptr.is_valid()) {
PERR("invalid RM client");
return -1;
/* lock the address space */
Locked_ptr<Address_space> locked_ptr(_address_space);
if (!locked_ptr.is_valid()) {
PERR("invalid RM client");
return -1;
};
Page_flags const flags = Page_flags::apply_mapping(true, CACHED, false);
_utcb_pd_addr = utcb_main_thread();
Hw::Address_space * as = static_cast<Hw::Address_space*>(&*locked_ptr);
if (!as->insert_translation((addr_t)_utcb_pd_addr, dsc->phys_addr(),
sizeof(Native_utcb), flags)) {
PERR("failed to attach UTCB");
return -1;
}
return 0;
};
Page_flags const flags = Page_flags::apply_mapping(true, CACHED, false);
_utcb_pd_addr = utcb_main_thread();
Hw::Address_space * as = static_cast<Hw::Address_space*>(&*locked_ptr);
if (!as->insert_translation((addr_t)_utcb_pd_addr, dsc->phys_addr(),
sizeof(Native_utcb), flags)) {
PERR("failed to attach UTCB");
return -1;
}
if (core_env()->entrypoint()->apply(_utcb, lambda)) return -1;
}
/* initialize thread registers */

View File

@ -60,40 +60,42 @@ void Pager_entrypoint::entry()
{
/* receive fault */
if (Kernel::await_signal(_cap.dst())) continue;
Pager_object * po =
*(Pager_object**)Thread_base::myself()->utcb()->base();
Untyped_capability cap =
(*(Pager_object**)Thread_base::myself()->utcb()->base())->cap();
/*
* Synchronize access and ensure that the object is still managed
*
* FIXME: The implicit lookup of the oject isn't needed.
*/
unsigned const pon = po->cap().local_name();
Object_pool<Pager_object>::Guard pog(lookup_and_lock(pon));
if (!pog) continue;
auto lambda = [&] (Pager_object *po) {
if (!po) return;
/* fetch fault data */
Platform_thread * const pt = (Platform_thread *)pog->badge();
if (!pt) {
PWRN("failed to get platform thread of faulter");
continue;
}
/* fetch fault data */
Platform_thread * const pt = (Platform_thread *)po->badge();
if (!pt) {
PWRN("failed to get platform thread of faulter");
return;
}
_fault.pd = pt->kernel_object()->fault_pd();
_fault.ip = pt->kernel_object()->ip;
_fault.addr = pt->kernel_object()->fault_addr();
_fault.writes = pt->kernel_object()->fault_writes();
_fault.signal = pt->kernel_object()->fault_signal();
_fault.pd = pt->kernel_object()->fault_pd();
_fault.ip = pt->kernel_object()->ip;
_fault.addr = pt->kernel_object()->fault_addr();
_fault.writes = pt->kernel_object()->fault_writes();
_fault.signal = pt->kernel_object()->fault_signal();
/* try to resolve fault directly via local region managers */
if (pog->pager(*this)) { continue; }
/* try to resolve fault directly via local region managers */
if (po->pager(*this)) return;
/* apply mapping that was determined by the local region managers */
if (apply_mapping()) {
PWRN("failed to apply mapping");
continue;
}
/* let pager object go back to no-fault state */
pog->wake_up();
/* apply mapping that was determined by the local region managers */
if (apply_mapping()) {
PWRN("failed to apply mapping");
return;
}
/* let pager object go back to no-fault state */
po->wake_up();
};
apply(cap, lambda);
}
}

View File

@ -52,14 +52,16 @@ Signal_receiver_capability Signal_session_component::alloc_receiver()
void Signal_session_component::free_receiver(Signal_receiver_capability cap)
{
/* look up ressource info */
Receiver::Pool::Guard r(_receivers.lookup_and_lock(cap));
if (!r) {
PERR("unknown signal receiver");
throw Kill_receiver_failed();
}
/* release resources */
_receivers.remove_locked(r);
destroy(&_receivers_slab, r.object());
auto lambda = [&] (Receiver *r) {
if (!r) {
PERR("unknown signal receiver");
throw Kill_receiver_failed();
}
/* release resources */
_receivers.remove(r);
destroy(&_receivers_slab, r);
};
_receivers.apply(cap, lambda);
}
@ -68,35 +70,39 @@ Signal_session_component::alloc_context(Signal_receiver_capability src,
unsigned const imprint)
{
/* look up ressource info */
Receiver::Pool::Guard r(_receivers.lookup_and_lock(src));
if (!r) {
PERR("unknown signal receiver");
throw Create_context_failed();
}
auto lambda = [&] (Receiver *r) {
if (!r) {
PERR("unknown signal receiver");
throw Create_context_failed();
}
try {
Context * c = new (_contexts_slab) Context(*r.object(), imprint);
_contexts.insert(c);
return reinterpret_cap_cast<Signal_context>(c->cap());
} catch (Allocator::Out_of_memory&) {
PERR("failed to allocate signal-context resources");
throw Out_of_metadata();
}
return reinterpret_cap_cast<Signal_context>(Untyped_capability());
try {
Context * c = new (_contexts_slab) Context(*r, imprint);
_contexts.insert(c);
return reinterpret_cap_cast<Signal_context>(c->cap());
} catch (Allocator::Out_of_memory&) {
PERR("failed to allocate signal-context resources");
throw Out_of_metadata();
}
return reinterpret_cap_cast<Signal_context>(Untyped_capability());
};
return _receivers.apply(src, lambda);
}
void Signal_session_component::free_context(Signal_context_capability cap)
{
/* look up ressource info */
Context::Pool::Guard c(_contexts.lookup_and_lock(cap));
if (!c) {
PERR("unknown signal context");
throw Kill_context_failed();
}
/* release resources */
_contexts.remove_locked(c);
destroy(&_contexts_slab, c.object());
auto lambda = [&] (Context *c) {
if (!c) {
PERR("unknown signal context");
throw Kill_context_failed();
}
/* release resources */
_contexts.remove(c);
destroy(&_contexts_slab, c);
};
_contexts.apply(cap, lambda);
}
@ -108,12 +114,8 @@ Signal_session_component::Signal_session_component(Allocator * const allocator,
Signal_session_component::~Signal_session_component()
{
while (Context * const c = _contexts.first_locked()) {
_contexts.remove_locked(c);
destroy(&_contexts_slab, c);
}
while (Receiver * const r = _receivers.first_locked()) {
_receivers.remove_locked(r);
destroy(&_receivers_slab, r);
}
_contexts.remove_all([this] (Context * c) {
destroy(&_contexts_slab, c);});
_receivers.remove_all([this] (Receiver * r) {
destroy(&_receivers_slab, r);});
}

View File

@ -53,10 +53,11 @@ void Vm_session_component::_attach(addr_t phys_addr, addr_t vm_addr, size_t size
void Vm_session_component::attach(Dataspace_capability ds_cap, addr_t vm_addr)
{
/* check dataspace validity */
Object_pool<Dataspace_component>::Guard dsc(_ds_ep->lookup_and_lock(ds_cap));
if (!dsc) throw Invalid_dataspace();
_ds_ep->apply(ds_cap, [&] (Dataspace_component *dsc) {
if (!dsc) throw Invalid_dataspace();
_attach(dsc->phys_addr(), vm_addr, dsc->size());
_attach(dsc->phys_addr(), vm_addr, dsc->size());
});
}

View File

@ -15,35 +15,34 @@ using namespace Genode;
void Cpu_session_component::thread_id(Thread_capability thread_cap, int pid, int tid)
{
Object_pool<Cpu_thread_component>::Guard
thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return;
thread->platform_thread()->thread_id(pid, tid);
_thread_ep->apply(thread_cap, [&] (Cpu_thread_component *thread) {
if (thread) thread->platform_thread()->thread_id(pid, tid); });
}
Untyped_capability Cpu_session_component::server_sd(Thread_capability thread_cap)
{
Object_pool<Cpu_thread_component>::Guard
thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return Untyped_capability();
auto lambda = [] (Cpu_thread_component *thread) {
if (!thread) return Untyped_capability();
enum { DUMMY_LOCAL_NAME = 0 };
typedef Native_capability::Dst Dst;
return Untyped_capability(Dst(thread->platform_thread()->server_sd()),
DUMMY_LOCAL_NAME);
enum { DUMMY_LOCAL_NAME = 0 };
typedef Native_capability::Dst Dst;
return Untyped_capability(Dst(thread->platform_thread()->server_sd()),
DUMMY_LOCAL_NAME);
};
return _thread_ep->apply(thread_cap, lambda);
}
Untyped_capability Cpu_session_component::client_sd(Thread_capability thread_cap)
{
Object_pool<Cpu_thread_component>::Guard
thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return Untyped_capability();
auto lambda = [] (Cpu_thread_component *thread) {
if (!thread) return Untyped_capability();
enum { DUMMY_LOCAL_NAME = 0 };
typedef Native_capability::Dst Dst;
return Untyped_capability(Dst(thread->platform_thread()->client_sd()),
DUMMY_LOCAL_NAME);
enum { DUMMY_LOCAL_NAME = 0 };
typedef Native_capability::Dst Dst;
return Untyped_capability(Dst(thread->platform_thread()->client_sd()),
DUMMY_LOCAL_NAME);
};
return _thread_ep->apply(thread_cap, lambda);
}

View File

@ -40,16 +40,15 @@ namespace Genode {
*/
Thread_capability thread_cap() { return _thread_cap; } const
void thread_cap(Thread_capability cap) { _thread_cap = cap; }
/* required by lookup_and_lock, provided by Object_pool::Entry normally */
void release() { }
};
struct Pager_entrypoint
{
Pager_entrypoint(Cap_session *) { }
Pager_object *lookup_and_lock(Pager_capability) { return 0; }
template <typename FUNC>
auto apply(Pager_capability, FUNC f) -> decltype(f(nullptr)) {
return f(nullptr); }
};
}

View File

@ -24,6 +24,7 @@
namespace Genode {
class Dataspace_component;
class Pd_session_component : public Rpc_object<Linux_pd_session, Pd_session_component>
{
private:
@ -39,6 +40,8 @@ namespace Genode {
Parent_capability _parent;
Rpc_entrypoint *_ds_ep;
void _start(Dataspace_component *ds);
public:
/**

View File

@ -305,6 +305,98 @@ static const char *get_env(const char *key)
** PD session interface **
**************************/
void Pd_session_component::_start(Dataspace_component *ds)
{
const char *tmp_filename = "temporary_executable_elf_dataspace_file_for_execve";
if (!ds) {
PERR("could not lookup binary, aborted PD startup");
return; /* XXX reflect error to client */
}
/* we need 's' on stack to make it an lvalue with an lvalue member we use the pointer to */
Linux_dataspace::Filename s = ds->fname();
const char *filename = s.buf;
/*
* In order to be executable via 'execve', a program must be represented as
* a file on the Linux file system. However, this is not the case for a
* plain RAM dataspace that contains an ELF image. In this case, we copy
* the dataspace content into a temporary file whose path is passed to
* 'execve()'.
*/
if (strcmp(filename, "") == 0) {
filename = tmp_filename;
int tmp_binary_fd = lx_open(filename, O_CREAT | O_EXCL | O_WRONLY, S_IRWXU);
if (tmp_binary_fd < 0) {
PERR("Could not create file '%s'", filename);
return; /* XXX reflect error to client */
}
char buf[4096];
int num_bytes = 0;
while ((num_bytes = lx_read(ds->fd().dst().socket, buf, sizeof(buf))) != 0)
lx_write(tmp_binary_fd, buf, num_bytes);
lx_close(tmp_binary_fd);
}
/* pass parent capability as environment variable to the child */
enum { ENV_STR_LEN = 256 };
static char envbuf[5][ENV_STR_LEN];
Genode::snprintf(envbuf[1], ENV_STR_LEN, "parent_local_name=%lu",
_parent.local_name());
Genode::snprintf(envbuf[2], ENV_STR_LEN, "DISPLAY=%s",
get_env("DISPLAY"));
Genode::snprintf(envbuf[3], ENV_STR_LEN, "HOME=%s",
get_env("HOME"));
Genode::snprintf(envbuf[4], ENV_STR_LEN, "LD_LIBRARY_PATH=%s",
get_env("LD_LIBRARY_PATH"));
char *env[] = { &envbuf[0][0], &envbuf[1][0], &envbuf[2][0],
&envbuf[3][0], &envbuf[4][0], 0 };
/* prefix name of Linux program (helps killing some zombies) */
char const *prefix = "[Genode] ";
char pname_buf[sizeof(_label) + sizeof(prefix)];
snprintf(pname_buf, sizeof(pname_buf), "%s%s", prefix, _label);
char *argv_buf[2];
argv_buf[0] = pname_buf;
argv_buf[1] = 0;
/*
* We cannot create the new process via 'fork()' because all our used
* memory including stack memory is backed by dataspaces, which had been
* mapped with the 'MAP_SHARED' flag. Therefore, after being created, the
* new process starts using the stack with the same physical memory pages
* as used by parent process. This would ultimately lead to stack
* corruption. To prevent both processes from concurrently accessing the
* same stack, we pause the execution of the parent until the child calls
* 'execve'. From then on, the child has its private memory layout. The
* desired behaviour is normally provided by 'vfork' but we use the more
* modern 'clone' call for this purpose.
*/
enum { STACK_SIZE = 4096 };
static char stack[STACK_SIZE]; /* initial stack used by the child until
calling 'execve' */
/*
* Argument frame as passed to 'clone'. Because, we can only pass a single
* pointer, all arguments are embedded within the 'execve_args' struct.
*/
Execve_args arg(filename, _root, argv_buf, env, _uid, _gid,
_parent.dst().socket);
_pid = lx_create_process((int (*)(void *))_exec_child,
stack + STACK_SIZE - sizeof(umword_t), &arg);
if (strcmp(filename, tmp_filename) == 0)
lx_unlink(filename);
}
Pd_session_component::Pd_session_component(Rpc_entrypoint * ep,
Allocator * md_alloc,
const char * args)
@ -366,94 +458,7 @@ int Pd_session_component::assign_parent(Parent_capability parent)
void Pd_session_component::start(Capability<Dataspace> binary)
{
const char *tmp_filename = "temporary_executable_elf_dataspace_file_for_execve";
/* lookup binary dataspace */
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(binary));
if (!ds) {
PERR("could not lookup binary, aborted PD startup");
return; /* XXX reflect error to client */
}
/* we need 's' on stack to make it an lvalue with an lvalue member we use the pointer to */
Linux_dataspace::Filename s = ds->fname();
const char *filename = s.buf;
/*
* In order to be executable via 'execve', a program must be represented as
* a file on the Linux file system. However, this is not the case for a
* plain RAM dataspace that contains an ELF image. In this case, we copy
* the dataspace content into a temporary file whose path is passed to
* 'execve()'.
*/
if (strcmp(filename, "") == 0) {
filename = tmp_filename;
int tmp_binary_fd = lx_open(filename, O_CREAT | O_EXCL | O_WRONLY, S_IRWXU);
if (tmp_binary_fd < 0) {
PERR("Could not create file '%s'", filename);
return; /* XXX reflect error to client */
}
char buf[4096];
int num_bytes = 0;
while ((num_bytes = lx_read(ds->fd().dst().socket, buf, sizeof(buf))) != 0)
lx_write(tmp_binary_fd, buf, num_bytes);
lx_close(tmp_binary_fd);
}
/* pass parent capability as environment variable to the child */
enum { ENV_STR_LEN = 256 };
static char envbuf[5][ENV_STR_LEN];
Genode::snprintf(envbuf[1], ENV_STR_LEN, "parent_local_name=%lu",
_parent.local_name());
Genode::snprintf(envbuf[2], ENV_STR_LEN, "DISPLAY=%s",
get_env("DISPLAY"));
Genode::snprintf(envbuf[3], ENV_STR_LEN, "HOME=%s",
get_env("HOME"));
Genode::snprintf(envbuf[4], ENV_STR_LEN, "LD_LIBRARY_PATH=%s",
get_env("LD_LIBRARY_PATH"));
char *env[] = { &envbuf[0][0], &envbuf[1][0], &envbuf[2][0],
&envbuf[3][0], &envbuf[4][0], 0 };
/* prefix name of Linux program (helps killing some zombies) */
char const *prefix = "[Genode] ";
char pname_buf[sizeof(_label) + sizeof(prefix)];
snprintf(pname_buf, sizeof(pname_buf), "%s%s", prefix, _label);
char *argv_buf[2];
argv_buf[0] = pname_buf;
argv_buf[1] = 0;
/*
* We cannot create the new process via 'fork()' because all our used
* memory including stack memory is backed by dataspaces, which had been
* mapped with the 'MAP_SHARED' flag. Therefore, after being created, the
* new process starts using the stack with the same physical memory pages
* as used by parent process. This would ultimately lead to stack
* corruption. To prevent both processes from concurrently accessing the
* same stack, we pause the execution of the parent until the child calls
* 'execve'. From then on, the child has its private memory layout. The
* desired behaviour is normally provided by 'vfork' but we use the more
* modern 'clone' call for this purpose.
*/
enum { STACK_SIZE = 4096 };
static char stack[STACK_SIZE]; /* initial stack used by the child until
calling 'execve' */
/*
* Argument frame as passed to 'clone'. Because, we can only pass a single
* pointer, all arguments are embedded within the 'execve_args' struct.
*/
Execve_args arg(filename, _root, argv_buf, env, _uid, _gid,
_parent.dst().socket);
_pid = lx_create_process((int (*)(void *))_exec_child,
stack + STACK_SIZE - sizeof(umword_t), &arg);
if (strcmp(filename, tmp_filename) == 0)
lx_unlink(filename);
_ds_ep->apply(binary, [&] (Dataspace_component *ds) {
_start(ds); });
};

View File

@ -193,10 +193,8 @@ Platform_env_base::Rm_session_mmap::_dataspace_size(Capability<Dataspace> ds_cap
}
/* use local function call if called from the entrypoint */
Object_pool<Rpc_object_base>::Guard
ds_rpc(core_env()->entrypoint()->lookup_and_lock(ds_cap));
Dataspace * ds = dynamic_cast<Dataspace *>(&*ds_rpc);
return ds ? ds->size() : 0;
return core_env()->entrypoint()->apply(ds_cap, [] (Dataspace *ds) {
return ds ? ds->size() : 0; });
}
@ -212,10 +210,6 @@ int Platform_env_base::Rm_session_mmap::_dataspace_fd(Capability<Dataspace> ds_c
Capability<Linux_dataspace> lx_ds_cap = static_cap_cast<Linux_dataspace>(ds_cap);
Object_pool<Rpc_object_base>::Guard
ds_rpc(core_env()->entrypoint()->lookup_and_lock(lx_ds_cap));
Linux_dataspace * ds = dynamic_cast<Linux_dataspace *>(&*ds_rpc);
/*
* Return a duplicate of the dataspace file descriptor, which will be freed
* immediately after mmap'ing the file (see 'Rm_session_mmap').
@ -225,7 +219,8 @@ int Platform_env_base::Rm_session_mmap::_dataspace_fd(Capability<Dataspace> ds_c
* socket descriptor during the RPC handling). When later destroying the
* dataspace, the descriptor would unexpectedly be closed again.
*/
return ds ? lx_dup(ds->fd().dst().socket) : -1;
return core_env()->entrypoint()->apply(lx_ds_cap, [] (Linux_dataspace *ds) {
return ds ? lx_dup(ds->fd().dst().socket) : -1; });
}
@ -239,9 +234,6 @@ bool Platform_env_base::Rm_session_mmap::_dataspace_writable(Dataspace_capabilit
return writable;
}
Object_pool<Rpc_object_base>::Guard
ds_rpc(core_env()->entrypoint()->lookup_and_lock(ds_cap));
Dataspace * ds = dynamic_cast<Dataspace *>(&*ds_rpc);
return ds ? ds->writable() : false;
return core_env()->entrypoint()->apply(ds_cap, [] (Dataspace *ds) {
return ds ? ds->writable() : false; });
}

View File

@ -89,19 +89,7 @@ void Rpc_entrypoint::_dissolve(Rpc_object_base *obj)
Nova::revoke(Nova::Obj_crd(obj->cap().local_name(), 0), true);
/* make sure nobody is able to find this object */
remove_locked(obj);
/*
* The activation may execute a blocking operation in a dispatch function.
* Before resolving the corresponding object, we need to ensure that it is
* no longer used by an activation. Therefore, we to need cancel an
* eventually blocking operation and let the activation leave the context
* of the object.
*/
_leave_server_object(obj);
/* wait until nobody is inside dispatch */
obj->acquire();
remove(obj);
}
void Rpc_entrypoint::_activation_entry()
@ -115,10 +103,9 @@ void Rpc_entrypoint::_activation_entry()
Rpc_entrypoint *ep = static_cast<Rpc_entrypoint *>(Thread_base::myself());
/* delay start if requested so */
if (ep->_curr_obj) {
ep->_delay_start.lock();
ep->_delay_start.unlock();
{
/* potentially delay start */
Lock::Guard lock_guard(ep->_delay_start);
}
/* required to decrease ref count of capability used during last reply */
@ -134,30 +121,25 @@ void Rpc_entrypoint::_activation_entry()
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
/* atomically lookup and lock referenced object */
ep->_curr_obj = ep->lookup_and_lock(id_pt);
if (!ep->_curr_obj) {
auto lambda = [&] (Rpc_object_base *obj) {
if (!obj) {
/*
* Badge is used to suppress error message solely.
* It's non zero during cleanup call of an
* rpc_object_base object, see _leave_server_object.
*/
if (!srv.badge())
PERR("could not look up server object, "
" return from call id_pt=%lx",
id_pt);
} else {
/*
* Badge is used to suppress error message solely.
* It's non zero during cleanup call of an
* rpc_object_base object, see _leave_server_object.
*/
if (!srv.badge())
PERR("could not look up server object, "
" return from call id_pt=%lx", id_pt);
return;
}
/* dispatch request */
try { srv.ret(ep->_curr_obj->dispatch(opcode, srv, srv)); }
try { srv.ret(obj->dispatch(opcode, srv, srv)); }
catch (Blocking_canceled) { }
Rpc_object_base * tmp = ep->_curr_obj;
ep->_curr_obj = 0;
tmp->release();
}
};
ep->apply(id_pt, lambda);
if (!ep->_rcv_buf.prepare_rcv_window((Nova::Utcb *)ep->utcb()))
PWRN("out of capability selectors for handling server requests");
@ -174,30 +156,6 @@ void Rpc_entrypoint::entry()
}
void Rpc_entrypoint::_leave_server_object(Rpc_object_base *)
{
using namespace Nova;
Utcb *utcb = reinterpret_cast<Utcb *>(Thread_base::myself()->utcb());
/* don't call ourself */
if (utcb == reinterpret_cast<Utcb *>(this->utcb()))
return;
/*
* Required outside of core. E.g. launchpad needs it to forcefully kill
* a client which blocks on a session opening request where the service
* is not up yet.
*/
cancel_blocking();
utcb->msg[0] = 0xdead;
utcb->set_msg_word(1);
if (uint8_t res = call(_cap.local_name()))
PERR("%8p - could not clean up entry point of thread 0x%p - res %u",
utcb, this->utcb(), res);
}
void Rpc_entrypoint::_block_until_cap_valid() { }
@ -220,7 +178,6 @@ Rpc_entrypoint::Rpc_entrypoint(Cap_session *cap_session, size_t stack_size,
Affinity::Location location)
:
Thread_base(Cpu_session::DEFAULT_WEIGHT, name, stack_size),
_curr_obj(start_on_construction ? 0 : (Rpc_object_base *)~0UL),
_delay_start(Lock::LOCKED),
_cap_session(cap_session)
{
@ -260,13 +217,10 @@ Rpc_entrypoint::~Rpc_entrypoint()
{
typedef Object_pool<Rpc_object_base> Pool;
if (Pool::first()) {
Pool::remove_all([&] (Rpc_object_base *obj) {
PWRN("Object pool not empty in %s", __func__);
/* dissolve all objects - objects are not destroyed! */
while (Rpc_object_base *obj = Pool::first())
_dissolve(obj);
}
_dissolve(obj);
});
if (!_cap.valid())
return;

View File

@ -32,20 +32,22 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
Rm_session::Local_addr local_addr,
bool executable)
{
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(ds_cap));
if (!ds)
throw Invalid_dataspace();
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
if (!ds)
throw Invalid_dataspace();
if (use_local_addr) {
PERR("Parameter 'use_local_addr' not supported within core");
return 0UL;
}
if (use_local_addr) {
PERR("Parameter 'use_local_addr' not supported within core");
return nullptr;
}
if (offset) {
PERR("Parameter 'offset' not supported within core");
return 0UL;
}
if (offset) {
PERR("Parameter 'offset' not supported within core");
return nullptr;
}
/* allocate range in core's virtual address space */
return ds->core_local_addr();
/* allocate range in core's virtual address space */
return ds->core_local_addr();
};
return _ds_ep->apply(ds_cap, lambda);
}

View File

@ -23,12 +23,13 @@ using namespace Genode;
Native_capability
Cpu_session_component::pause_sync(Thread_capability thread_cap)
{
Object_pool<Cpu_thread_component>::Guard
thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread || !thread->platform_thread())
return Native_capability();
auto lambda = [] (Cpu_thread_component *thread) {
if (!thread || !thread->platform_thread())
return Native_capability();
return thread->platform_thread()->pause();
return thread->platform_thread()->pause();
};
return _thread_ep->apply(thread_cap, lambda);
}
@ -37,12 +38,13 @@ Cpu_session_component::single_step_sync(Thread_capability thread_cap, bool enabl
{
using namespace Genode;
Object_pool<Cpu_thread_component>::Guard
thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread || !thread->platform_thread())
return Native_capability();
auto lambda = [enable] (Cpu_thread_component *thread) {
if (!thread || !thread->platform_thread())
return Native_capability();
return thread->platform_thread()->single_step(enable);
return thread->platform_thread()->single_step(enable);
};
return _thread_ep->apply(thread_cap, lambda);
}

View File

@ -170,7 +170,6 @@ namespace Genode {
*/
void assign_pd(addr_t pd_sel) { _pd = pd_sel; }
addr_t pd_sel() const { return _pd; }
void dump_kernel_quota_usage(Pager_object * = (Pager_object *)~0UL);
void exception(uint8_t exit_id);
@ -386,12 +385,6 @@ namespace Genode {
*/
void ep(Pager_entrypoint *ep) { _ep = ep; }
/*
* Used for diagnostic/debugging purposes
* - see Pager_object::dump_kernel_quota_usage
*/
Pager_object * pager_head();
/**
* Thread interface
*/

View File

@ -500,29 +500,6 @@ Exception_handlers::Exception_handlers(Pager_object *obj)
******************/
void Pager_object::dump_kernel_quota_usage(Pager_object *obj)
{
if (obj == (Pager_object *)~0UL) {
unsigned use_cpu = location.xpos();
obj = pager_threads[use_cpu]->pager_head();
PINF("-- kernel memory usage of Genode PDs --");
}
if (!obj)
return;
addr_t limit = 0; addr_t usage = 0;
Nova::pd_ctrl_debug(obj->pd_sel(), limit, usage);
char const * thread_name = reinterpret_cast<char const *>(obj->badge());
PINF("pd=0x%lx pager=%p thread='%s' limit=0x%lx usage=0x%lx",
obj->pd_sel(), obj, thread_name, limit, usage);
dump_kernel_quota_usage(static_cast<Pager_object *>(obj->child(Genode::Avl_node_base::LEFT)));
dump_kernel_quota_usage(static_cast<Pager_object *>(obj->child(Genode::Avl_node_base::RIGHT)));
}
Pager_object::Pager_object(unsigned long badge, Affinity::Location location)
:
_badge(badge),
@ -847,9 +824,6 @@ Pager_activation_base::Pager_activation_base(const char *name, size_t stack_size
void Pager_activation_base::entry() { }
Pager_object * Pager_activation_base::pager_head() {
return _ep ? _ep->first() : nullptr; }
/**********************
** Pager entrypoint **
**********************/
@ -918,7 +892,7 @@ void Pager_entrypoint::dissolve(Pager_object *obj)
/* revoke cap selector locally */
revoke(pager_obj.dst(), true);
/* remove object from pool */
remove_locked(obj);
remove(obj);
/* take care that no faults are in-flight */
obj->cleanup_call();
}

View File

@ -146,9 +146,6 @@ int Platform_thread::start(void *ip, void *sp)
KEEP_FREE_PAGES_NOT_AVAILABLE_FOR_UPGRADE, UPPER_LIMIT_PAGES);
if (res != NOVA_OK) {
PERR("create_pd returned %d", res);
_pager->dump_kernel_quota_usage();
goto cleanup_pd;
}

View File

@ -87,19 +87,20 @@ Signal_context_capability Signal_session_component::alloc_context(long imprint)
void Signal_session_component::free_context(Signal_context_capability context_cap)
{
Signal_context_component * context =
dynamic_cast<Signal_context_component *>(_signal_queue.lookup_and_lock(context_cap.local_name()));
if (!context) {
PWRN("%p - specified signal-context capability has wrong type %lx",
this, context_cap.local_name());
return;
}
auto lambda = [&] (Signal_context_component *context) {
if (!context) {
PWRN("%p - specified signal-context capability has wrong type %lx",
this, context_cap.local_name());
return;
}
_signal_queue.remove_locked(context);
destroy(&_contexts_slab, context);
_signal_queue.remove(context);
destroy(&_contexts_slab, context);
Nova::revoke(Nova::Obj_crd(context_cap.local_name(), 0));
cap_map()->remove(context_cap.local_name(), 0);
Nova::revoke(Nova::Obj_crd(context_cap.local_name(), 0));
cap_map()->remove(context_cap.local_name(), 0);
};
_signal_queue.apply(context_cap, lambda);
}

View File

@ -26,37 +26,40 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
{
using namespace Okl4;
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(ds_cap));
if (!ds)
throw Invalid_dataspace();
auto lambda = [&] (Dataspace_component *ds) -> void* {
if (!ds)
throw Invalid_dataspace();
if (size == 0)
size = ds->size();
if (size == 0)
size = ds->size();
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
size_t page_rounded_size = (size + get_page_size() - 1)
& get_page_mask();
if (use_local_addr) {
PERR("Parameter 'use_local_addr' not supported within core");
return 0;
}
if (use_local_addr) {
PERR("Parameter 'use_local_addr' not supported within core");
return nullptr;
}
if (offset) {
PERR("Parameter 'offset' not supported within core");
return 0;
}
if (offset) {
PERR("Parameter 'offset' not supported within core");
return nullptr;
}
/* allocate range in core's virtual address space */
void *virt_addr;
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
PERR("Could not allocate virtual address range in core of size %zd\n",
page_rounded_size);
return false;
}
/* allocate range in core's virtual address space */
void *virt_addr;
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
PERR("Could not allocate virtual address range in core of size %zd\n",
page_rounded_size);
return nullptr;
}
/* map the dataspace's physical pages to corresponding virtual addresses */
unsigned num_pages = page_rounded_size >> get_page_size_log2();
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
return 0;
/* map the dataspace's physical pages to corresponding virtual addresses */
unsigned num_pages = page_rounded_size >> get_page_size_log2();
if (!map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages))
return nullptr;
return virt_addr;
};
return virt_addr;
return _ds_ep->apply(ds_cap, lambda);
}

View File

@ -21,8 +21,9 @@ using namespace Genode;
void Pd_session_component::space_pager(Thread_capability thread)
{
Object_pool<Cpu_thread_component>::Guard
cpu_thread(_thread_ep->lookup_and_lock(thread));
if (!cpu_thread) return;
_pd.space_pager(cpu_thread->platform_thread());
_thread_ep->apply(thread, [this] (Cpu_thread_component *cpu_thread)
{
if (!cpu_thread) return;
_pd.space_pager(cpu_thread->platform_thread());
});
}

View File

@ -149,7 +149,7 @@ void Ipc_pager::acknowledge_wakeup()
** Pager entrypoint **
**********************/
Untyped_capability Pager_entrypoint::_manage(Pager_object *obj)
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
{
return Untyped_capability(_tid.l4id, obj->badge());
return Untyped_capability(_tid.l4id, badge);
}

View File

@ -140,7 +140,7 @@ void Ipc_pager::acknowledge_wakeup()
** Pager entrypoint **
**********************/
Untyped_capability Pager_entrypoint::_manage(Pager_object *obj)
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
{
return Untyped_capability(_tid.l4id, obj->badge());
return Untyped_capability(_tid.l4id, badge);
}

View File

@ -17,6 +17,7 @@
*/
/* Genode includes */
#include <internal/capability_space_sel4.h>
#include <base/rpc_server.h>
using namespace Genode;
@ -65,23 +66,14 @@ void Rpc_entrypoint::entry()
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
/* atomically lookup and lock referenced object */
Object_pool<Rpc_object_base>::Guard curr_obj(lookup_and_lock(srv.badge()));
if (!curr_obj)
continue;
auto lambda = [&] (Rpc_object_base *obj) {
if (!obj) return;
{
Lock::Guard lock_guard(_curr_obj_lock);
_curr_obj = curr_obj;
}
/* dispatch request */
try { srv.ret(_curr_obj->dispatch(opcode, srv, srv)); }
catch (Blocking_canceled) { }
{
Lock::Guard lock_guard(_curr_obj_lock);
_curr_obj = 0;
}
/* dispatch request */
try { srv.ret(obj->dispatch(opcode, srv, srv)); }
catch (Blocking_canceled) { }
};
apply(srv.badge(), lambda);
}
/* answer exit call, thereby wake up '~Rpc_entrypoint' */
@ -89,6 +81,4 @@ void Rpc_entrypoint::entry()
/* defer the destruction of 'Ipc_server' until '~Rpc_entrypoint' is ready */
_delay_exit.lock();
}

View File

@ -28,36 +28,39 @@ Core_rm_session::attach(Dataspace_capability ds_cap, size_t size,
Rm_session::Local_addr local_addr,
bool executable)
{
Object_pool<Dataspace_component>::Guard ds(_ds_ep->lookup_and_lock(ds_cap));
if (!ds)
throw Invalid_dataspace();
auto lambda = [&] (Dataspace_component *ds) -> Local_addr {
if (!ds)
throw Invalid_dataspace();
if (size == 0)
size = ds->size();
if (size == 0)
size = ds->size();
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
size_t page_rounded_size = (size + get_page_size() - 1) & get_page_mask();
if (use_local_addr) {
PERR("Parameter 'use_local_addr' not supported within core");
return 0;
}
if (use_local_addr) {
PERR("Parameter 'use_local_addr' not supported within core");
return nullptr;
}
if (offset) {
PERR("Parameter 'offset' not supported within core");
return 0;
}
if (offset) {
PERR("Parameter 'offset' not supported within core");
return nullptr;
}
/* allocate range in core's virtual address space */
void *virt_addr;
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
PERR("Could not allocate virtual address range in core of size %zd\n",
page_rounded_size);
return false;
}
/* allocate range in core's virtual address space */
void *virt_addr;
if (!platform()->region_alloc()->alloc(page_rounded_size, &virt_addr)) {
PERR("Could not allocate virtual address range in core of size %zd\n",
page_rounded_size);
return nullptr;
}
/* map the dataspace's physical pages to core-local virtual addresses */
size_t num_pages = page_rounded_size >> get_page_size_log2();
map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages);
/* map the dataspace's physical pages to core-local virtual addresses */
size_t num_pages = page_rounded_size >> get_page_size_log2();
map_local(ds->phys_addr(), (addr_t)virt_addr, num_pages);
return virt_addr;
return virt_addr;
};
return _ds_ep->apply(ds_cap, lambda);
}

View File

@ -120,14 +120,14 @@ void Pager_object::unresolved_page_fault_occurred()
** Pager entrypoint **
**********************/
Untyped_capability Pager_entrypoint::_manage(Pager_object *obj)
Untyped_capability Pager_entrypoint::_pager_object_cap(unsigned long badge)
{
/*
* Create minted endpoint capability of the pager entrypoint.
* The badge of the page-fault message is used to find the pager
* object for faulted thread.
*/
Rpc_obj_key rpc_obj_key((addr_t)obj->badge());
Rpc_obj_key rpc_obj_key((addr_t)badge);
Untyped_capability ep_cap(Capability_space::create_ep_cap(*this));
return Capability_space::create_rpc_obj_cap(ep_cap, nullptr, rpc_obj_key);

View File

@ -224,6 +224,8 @@ class Genode::Child : protected Rpc_object<Parent>
*/
void _remove_session(Session *s);
void _close(Session *s);
/**
* Return service interface targetting the parent
*

View File

@ -1,7 +1,8 @@
/*
* \brief Object pool - map ids to objects
* \brief Object pool - map capabilities to objects
* \author Norman Feske
* \author Alexander Boettcher
* \author Stafen Kalkowski
* \date 2006-06-26
*/
@ -23,7 +24,7 @@ namespace Genode { template <typename> class Object_pool; }
/**
* Map object ids to local objects
* Map capabilities to local objects
*
* \param OBJ_TYPE object type (must be inherited from Object_pool::Entry)
*
@ -35,66 +36,25 @@ class Genode::Object_pool
{
public:
class Guard
{
private:
OBJ_TYPE * _object;
public:
operator OBJ_TYPE*() const { return _object; }
OBJ_TYPE * operator->() const { return _object; }
OBJ_TYPE * object() const { return _object; }
template <class X>
explicit Guard(X * object) {
_object = dynamic_cast<OBJ_TYPE *>(object); }
~Guard()
{
if (!_object) return;
_object->release();
}
};
class Entry : public Avl_node<Entry>
{
private:
Untyped_capability _cap;
short int _ref;
bool _dead;
Lock _entry_lock;
Lock _lock;
inline unsigned long _obj_id() { return _cap.local_name(); }
friend class Object_pool;
friend class Avl_tree<Entry>;
/*
* Support methods for atomic lookup and lock functionality of
* class Object_pool.
*/
void lock() { _entry_lock.lock(); };
void unlock() { _entry_lock.unlock(); };
void add_ref() { _ref += 1; }
void del_ref() { _ref -= 1; }
bool is_dead(bool set_dead = false) {
return (set_dead ? (_dead = true) : _dead); }
bool is_ref_zero() { return _ref <= 0; }
public:
/**
* Constructors
*/
Entry() : _ref(0), _dead(false) { }
Entry(Untyped_capability cap) : _cap(cap), _ref(0), _dead(false) { }
Entry() { }
Entry(Untyped_capability cap) : _cap(cap) { }
/**
* Avl_node interface
@ -120,12 +80,7 @@ class Genode::Object_pool
void cap(Untyped_capability c) { _cap = c; }
Untyped_capability const cap() const { return _cap; }
/**
* Function used - ideally - solely by the Guard.
*/
void release() { del_ref(); unlock(); }
void acquire() { lock(); add_ref(); }
Lock& lock() { return _lock; }
};
private:
@ -133,6 +88,58 @@ class Genode::Object_pool
Avl_tree<Entry> _tree;
Lock _lock;
OBJ_TYPE* _obj_by_capid(unsigned long capid)
{
Entry *ret = _tree.first() ? _tree.first()->find_by_obj_id(capid)
: nullptr;
return static_cast<OBJ_TYPE*>(ret);
}
template <typename FUNC, typename RET>
struct Apply_functor
{
RET operator()(OBJ_TYPE *obj, FUNC f)
{
using Functor = Trait::Functor<decltype(&FUNC::operator())>;
using Object_pointer = typename Functor::template Argument<0>::Type;
try {
auto ret = f(dynamic_cast<Object_pointer>(obj));
if (obj) obj->_lock.unlock();
return ret;
} catch(...) {
if (obj) obj->_lock.unlock();
throw;
}
}
};
template <typename FUNC>
struct Apply_functor<FUNC, void>
{
void operator()(OBJ_TYPE *obj, FUNC f)
{
using Functor = Trait::Functor<decltype(&FUNC::operator())>;
using Object_pointer = typename Functor::template Argument<0>::Type;
try {
f(dynamic_cast<Object_pointer>(obj));
if (obj) obj->_lock.unlock();
} catch(...) {
if (obj) obj->_lock.unlock();
throw;
}
}
};
protected:
bool empty()
{
Lock::Guard lock_guard(_lock);
return _tree.first() == nullptr;
}
public:
void insert(OBJ_TYPE *obj)
@ -141,74 +148,60 @@ class Genode::Object_pool
_tree.insert(obj);
}
void remove_locked(OBJ_TYPE *obj)
void remove(OBJ_TYPE *obj)
{
obj->is_dead(true);
obj->del_ref();
while (true) {
obj->unlock();
{
Lock::Guard lock_guard(_lock);
if (obj->is_ref_zero()) {
_tree.remove(obj);
return;
}
}
obj->lock();
}
Lock::Guard lock_guard(_lock);
_tree.remove(obj);
}
/**
* Lookup object
*/
OBJ_TYPE *lookup_and_lock(addr_t obj_id)
template <typename FUNC>
auto apply(unsigned long capid, FUNC func)
-> typename Trait::Functor<decltype(&FUNC::operator())>::Return_type
{
OBJ_TYPE * obj_typed;
using Functor = Trait::Functor<decltype(&FUNC::operator())>;
OBJ_TYPE * obj;
{
Lock::Guard lock_guard(_lock);
Entry *obj = _tree.first();
if (!obj) return 0;
obj_typed = (OBJ_TYPE *)obj->find_by_obj_id(obj_id);
if (!obj_typed || obj_typed->is_dead())
return 0;
obj = _obj_by_capid(capid);
obj_typed->add_ref();
if (obj) obj->_lock.lock();
}
obj_typed->lock();
return obj_typed;
Apply_functor<FUNC, typename Functor::Return_type> hf;
return hf(obj, func);
}
OBJ_TYPE *lookup_and_lock(Untyped_capability cap)
template <typename FUNC>
auto apply(Untyped_capability cap, FUNC func)
-> typename Trait::Functor<decltype(&FUNC::operator())>::Return_type
{
return lookup_and_lock(cap.local_name());
return apply(cap.local_name(), func);
}
/**
* Return first element of tree
*
* This function is used for removing tree elements step by step.
*/
OBJ_TYPE *first()
template <typename FUNC>
void remove_all(FUNC func)
{
Lock::Guard lock_guard(_lock);
return (OBJ_TYPE *)_tree.first();
}
for (;;) {
OBJ_TYPE * obj;
/**
* Return first element of tree locked
*
* This function is used for removing tree elements step by step.
*/
OBJ_TYPE *first_locked()
{
Lock::Guard lock_guard(_lock);
OBJ_TYPE * const obj_typed = (OBJ_TYPE *)_tree.first();
if (!obj_typed) { return 0; }
obj_typed->lock();
return obj_typed;
{
Lock::Guard lock_guard(_lock);
obj = (OBJ_TYPE*) _tree.first();
if (!obj) return;
{
Lock::Guard object_guard(obj->_lock);
_tree.remove(obj);
}
}
func(obj);
}
}
};

View File

@ -275,8 +275,6 @@ class Genode::Rpc_entrypoint : Thread_base, public Object_pool<Rpc_object_base>
protected:
Ipc_server *_ipc_server;
Rpc_object_base *_curr_obj; /* currently dispatched RPC object */
Lock _curr_obj_lock; /* for the protection of '_curr_obj' */
Lock _cap_valid; /* thread startup synchronization */
Lock _delay_start; /* delay start of request dispatching */
Lock _delay_exit; /* delay destructor until server settled */
@ -298,13 +296,6 @@ class Genode::Rpc_entrypoint : Thread_base, public Object_pool<Rpc_object_base>
*/
void _dissolve(Rpc_object_base *obj);
/**
* Force activation to cancel dispatching the specified server object
*
* \noapi
*/
void _leave_server_object(Rpc_object_base *obj);
/**
* Wait until the entrypoint activation is initialized
*

View File

@ -248,26 +248,29 @@ class Genode::Root_component : public Rpc_object<Typed_root<SESSION_TYPE> >,
{
if (!args.is_valid_string()) throw Root::Invalid_args();
typedef typename Object_pool<SESSION_TYPE>::Guard Object_guard;
Object_guard s(_ep->lookup_and_lock(session));
if (!s) return;
_ep->apply(session, [&] (SESSION_TYPE *s) {
if (!s) return;
_upgrade_session(s, args.string());
_upgrade_session(s, args.string());
});
}
void close(Session_capability session) override
void close(Session_capability session_cap) override
{
SESSION_TYPE * s =
dynamic_cast<SESSION_TYPE *>(_ep->lookup_and_lock(session));
if (!s) return;
SESSION_TYPE * session;
/* let the entry point forget the session object */
_ep->dissolve(s);
_ep->apply(session_cap, [&] (SESSION_TYPE *s) {
session = s;
_destroy_session(s);
/* let the entry point forget the session object */
if (session) _ep->dissolve(session);
});
if (!session) return;
_destroy_session(session);
POLICY::release();
return;
}
};

View File

@ -43,6 +43,23 @@ namespace Genode {
namespace Meta {
/***********************************
** Variadic template type access **
***********************************/
template <unsigned long N, typename HEAD, typename... TAIL>
struct Variadic_type_tuple
{
using Type = typename Variadic_type_tuple<N-1, TAIL...>::Type;
};
template <typename HEAD, typename... TAIL>
struct Variadic_type_tuple<0, HEAD, TAIL...>
{
using Type = HEAD;
};
/***************
** Type list **
***************/
@ -643,6 +660,28 @@ namespace Genode {
template <bool VALUE> struct Bool_to_type { enum { V = VALUE }; };
} /* namespace Meta */
namespace Trait {
template<typename T> struct Functor;
template<typename RET, typename T, typename... ARGS>
struct Functor<RET (T::*)(ARGS...) const>
{
static constexpr unsigned long argument_count = sizeof...(ARGS);
using Return_type = RET;
template <unsigned long N>
struct Argument
{
static_assert(N < argument_count, "Invalid index");
using Type =
typename Meta::Variadic_type_tuple<N, ARGS...>::Type;
};
};
} /* namespace Trait */
}
#endif /* _INCLUDE__BASE__UTIL__META_H_ */

View File

@ -198,7 +198,6 @@ void Child::_add_session(Child::Session const &s)
void Child::_remove_session(Child::Session *s)
{
/* forget about this session */
_session_pool.remove_locked(s);
_session_list.remove(s);
/* return session quota to the ram session of the child */
@ -216,6 +215,51 @@ Service *Child::_parent_service()
}
void Child::_close(Session* s)
{
if (!s) {
PWRN("no session structure found");
return;
}
/*
* There is a chance that the server is not responding to the 'close' call,
* making us block infinitely. However, by using core's cancel-blocking
* mechanism, we can cancel the 'close' call by another (watchdog) thread
* that invokes 'cancel_blocking' at our thread after a timeout. The
* unblocking is reflected at the API level as an 'Blocking_canceled'
* exception. We catch this exception to proceed with normal operation
* after being unblocked.
*/
try { s->service()->close(s->cap()); }
catch (Blocking_canceled) {
PDBG("Got Blocking_canceled exception during %s->close call\n",
s->ident()); }
/*
* If the session was provided by a child of us,
* 'server()->ram_session_cap()' returns the RAM session of the
* corresponding child. Since the session to the server is closed now, we
* expect that the server released all donated resources and we can
* decrease the servers' quota.
*
* If this goes wrong, the server is misbehaving.
*/
if (s->service()->ram_session_cap().valid()) {
Ram_session_client server_ram(s->service()->ram_session_cap());
if (server_ram.transfer_quota(env()->ram_session_cap(),
s->donated_ram_quota())) {
PERR("Misbehaving server '%s'!", s->service()->name());
}
}
{
Lock::Guard lock_guard(_lock);
_remove_session(s);
}
}
void Child::revoke_server(Server const *server)
{
Lock::Guard lock_guard(_lock);
@ -228,6 +272,8 @@ void Child::revoke_server(Server const *server)
/* if no matching session exists, we are done */
if (!s) return;
_session_pool.apply(s->cap(), [&] (Session *s) {
if (s) _session_pool.remove(s); });
_remove_session(s);
}
}
@ -329,41 +375,43 @@ void Child::upgrade(Session_capability to_session, Parent::Upgrade_args const &a
targeted_service = &_pd_service;
/* check if upgrade refers to server */
Object_pool<Session>::Guard session(_session_pool.lookup_and_lock(to_session));
if (session)
targeted_service = session->service();
_session_pool.apply(to_session, [&] (Session *session)
{
if (session)
targeted_service = session->service();
if (!targeted_service) {
PWRN("could not lookup service for session upgrade");
return;
}
if (!targeted_service) {
PWRN("could not lookup service for session upgrade");
return;
}
if (!args.is_valid_string()) {
PWRN("no valid session-upgrade arguments");
return;
}
if (!args.is_valid_string()) {
PWRN("no valid session-upgrade arguments");
return;
}
size_t const ram_quota =
Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0);
size_t const ram_quota =
Arg_string::find_arg(args.string(), "ram_quota").ulong_value(0);
/* transfer quota from client to ourself */
Transfer donation_from_child(ram_quota, _ram,
env()->ram_session_cap());
/* transfer quota from client to ourself */
Transfer donation_from_child(ram_quota, _ram,
env()->ram_session_cap());
/* transfer session quota from ourself to the service provider */
Transfer donation_to_service(ram_quota, env()->ram_session_cap(),
targeted_service->ram_session_cap());
/* transfer session quota from ourself to the service provider */
Transfer donation_to_service(ram_quota, env()->ram_session_cap(),
targeted_service->ram_session_cap());
try { targeted_service->upgrade(to_session, args.string()); }
catch (Service::Quota_exceeded) { throw Quota_exceeded(); }
try { targeted_service->upgrade(to_session, args.string()); }
catch (Service::Quota_exceeded) { throw Quota_exceeded(); }
/* remember new amount attached to the session */
if (session)
session->upgrade_ram_quota(ram_quota);
/* remember new amount attached to the session */
if (session)
session->upgrade_ram_quota(ram_quota);
/* finish transaction */
donation_from_child.acknowledge();
donation_to_service.acknowledge();
/* finish transaction */
donation_from_child.acknowledge();
donation_to_service.acknowledge();
});
}
@ -376,46 +424,13 @@ void Child::close(Session_capability session_cap)
|| session_cap.local_name() == _pd.local_name())
return;
Session *s = _session_pool.lookup_and_lock(session_cap);
if (!s) {
PWRN("no session structure found");
return;
}
/*
* There is a chance that the server is not responding to the 'close' call,
* making us block infinitely. However, by using core's cancel-blocking
* mechanism, we can cancel the 'close' call by another (watchdog) thread
* that invokes 'cancel_blocking' at our thread after a timeout. The
* unblocking is reflected at the API level as an 'Blocking_canceled'
* exception. We catch this exception to proceed with normal operation
* after being unblocked.
*/
try { s->service()->close(s->cap()); }
catch (Blocking_canceled) {
PDBG("Got Blocking_canceled exception during %s->close call\n",
s->ident()); }
/*
* If the session was provided by a child of us,
* 'server()->ram_session_cap()' returns the RAM session of the
* corresponding child. Since the session to the server is closed now, we
* expect that the server released all donated resources and we can
* decrease the servers' quota.
*
* If this goes wrong, the server is misbehaving.
*/
if (s->service()->ram_session_cap().valid()) {
Ram_session_client server_ram(s->service()->ram_session_cap());
if (server_ram.transfer_quota(env()->ram_session_cap(),
s->donated_ram_quota())) {
PERR("Misbehaving server '%s'!", s->service()->name());
}
}
Lock::Guard lock_guard(_lock);
_remove_session(s);
Session *session = nullptr;
_session_pool.apply(session_cap, [&] (Session *s)
{
session = s;
if (s) _session_pool.remove(s);
});
_close(session);
}
@ -495,7 +510,6 @@ Child::~Child()
_entrypoint->dissolve(this);
_policy->unregister_services();
for (Session *s; (s = _session_pool.first()); )
close(s->cap());
_session_pool.remove_all([&] (Session *s) { _close(s); });
}

View File

@ -22,18 +22,7 @@ using namespace Genode;
void Rpc_entrypoint::_dissolve(Rpc_object_base *obj)
{
/* make sure nobody is able to find this object */
remove_locked(obj);
/*
* The activation may execute a blocking operation in a dispatch function.
* Before resolving the corresponding object, we need to ensure that it is
* no longer used. Therefore, we to need cancel an eventually blocking
* operation and let the activation leave the context of the object.
*/
_leave_server_object(obj);
/* wait until nobody is inside dispatch */
obj->acquire();
remove(obj);
_cap_session->free(obj->cap());
@ -41,15 +30,6 @@ void Rpc_entrypoint::_dissolve(Rpc_object_base *obj)
}
void Rpc_entrypoint::_leave_server_object(Rpc_object_base *obj)
{
Lock::Guard lock_guard(_curr_obj_lock);
if (obj == _curr_obj)
cancel_blocking();
}
void Rpc_entrypoint::_block_until_cap_valid()
{
_cap_valid.lock();
@ -104,7 +84,7 @@ Rpc_entrypoint::Rpc_entrypoint(Cap_session *cap_session, size_t stack_size,
:
Thread_base(Cpu_session::DEFAULT_WEIGHT, name, stack_size),
_cap(Untyped_capability()),
_curr_obj(0), _cap_valid(Lock::LOCKED), _delay_start(Lock::LOCKED),
_cap_valid(Lock::LOCKED), _delay_start(Lock::LOCKED),
_delay_exit(Lock::LOCKED),
_cap_session(cap_session)
{
@ -124,8 +104,6 @@ Rpc_entrypoint::Rpc_entrypoint(Cap_session *cap_session, size_t stack_size,
Rpc_entrypoint::~Rpc_entrypoint()
{
typedef Object_pool<Rpc_object_base> Pool;
/*
* We have to make sure the server loop is running which is only the case
* if the Rpc_entrypoint was actived before we execute the RPC call.
@ -137,14 +115,9 @@ Rpc_entrypoint::~Rpc_entrypoint()
dissolve(&_exit_handler);
if (Pool::first()) {
if (!empty())
PWRN("Object pool not empty in %s", __func__);
/* dissolve all objects - objects are not destroyed! */
while (Rpc_object_base *obj = Pool::first())
_dissolve(obj);
}
/*
* Now that we finished the 'dissolve' steps above (which need a working
* 'Ipc_server' in the context of the entrypoint thread), we can allow the

View File

@ -42,6 +42,8 @@ Untyped_capability Rpc_entrypoint::_manage(Rpc_object_base *obj)
void Rpc_entrypoint::entry()
{
using Pool = Object_pool<Rpc_object_base>;
Ipc_server srv(&_snd_buf, &_rcv_buf);
_ipc_server = &srv;
_cap = srv;
@ -65,24 +67,13 @@ void Rpc_entrypoint::entry()
/* set default return value */
srv.ret(Ipc_client::ERR_INVALID_OBJECT);
/* atomically lookup and lock referenced object */
Object_pool<Rpc_object_base>::Guard curr_obj(lookup_and_lock(srv.badge()));
if (!curr_obj)
continue;
Pool::apply(srv.badge(), [&] (Rpc_object_base *obj)
{
Lock::Guard lock_guard(_curr_obj_lock);
_curr_obj = curr_obj;
}
/* dispatch request */
try { srv.ret(_curr_obj->dispatch(opcode, srv, srv)); }
catch (Blocking_canceled) { }
{
Lock::Guard lock_guard(_curr_obj_lock);
_curr_obj = 0;
}
if (!obj) { return;}
try {
srv.ret(obj->dispatch(opcode, srv, srv));
} catch(Blocking_canceled&) { }
});
}
/* answer exit call, thereby wake up '~Rpc_entrypoint' */

View File

@ -103,91 +103,108 @@ void Cpu_session_component::_unsynchronized_kill_thread(Cpu_thread_component *th
void Cpu_session_component::kill_thread(Thread_capability thread_cap)
{
Cpu_thread_component * thread =
dynamic_cast<Cpu_thread_component *>(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return;
auto lambda = [this] (Cpu_thread_component *thread) {
if (!thread) return;
Lock::Guard lock_guard(_thread_list_lock);
_unsynchronized_kill_thread(thread);
Lock::Guard lock_guard(_thread_list_lock);
_unsynchronized_kill_thread(thread);
};
_thread_ep->apply(thread_cap, lambda);
}
int Cpu_session_component::set_pager(Thread_capability thread_cap,
Pager_capability pager_cap)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return -1;
auto lambda = [&] (Cpu_thread_component *thread) {
if (!thread) return -1;
Object_pool<Pager_object>::Guard p(_pager_ep->lookup_and_lock(pager_cap));
if (!p) return -2;
auto p_lambda = [&] (Pager_object *p) {
if (!p) return -2;
thread->platform_thread()->pager(p);
thread->platform_thread()->pager(p);
p->thread_cap(thread->cap());
return 0;
p->thread_cap(thread->cap());
return 0;
};
return _pager_ep->apply(pager_cap, p_lambda);
};
return _thread_ep->apply(thread_cap, lambda);
}
int Cpu_session_component::start(Thread_capability thread_cap,
addr_t ip, addr_t sp)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return -1;
auto lambda = [&] (Cpu_thread_component *thread) {
if (!thread) return -1;
/*
* If an exception handler was installed prior to the call of 'set_pager',
* we need to update the pager object with the current exception handler.
*/
thread->update_exception_sigh();
/*
* If an exception handler was installed prior to the call of 'set_pager',
* we need to update the pager object with the current exception handler.
*/
thread->update_exception_sigh();
return thread->platform_thread()->start((void *)ip, (void *)sp);
return thread->platform_thread()->start((void *)ip, (void *)sp);
};
return _thread_ep->apply(thread_cap, lambda);
}
void Cpu_session_component::pause(Thread_capability thread_cap)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return;
auto lambda = [this] (Cpu_thread_component *thread) {
if (!thread) return;
thread->platform_thread()->pause();
thread->platform_thread()->pause();
};
_thread_ep->apply(thread_cap, lambda);
}
void Cpu_session_component::resume(Thread_capability thread_cap)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return;
auto lambda = [this] (Cpu_thread_component *thread) {
if (!thread) return;
thread->platform_thread()->resume();
thread->platform_thread()->resume();
};
_thread_ep->apply(thread_cap, lambda);
}
void Cpu_session_component::cancel_blocking(Thread_capability thread_cap)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return;
auto lambda = [this] (Cpu_thread_component *thread) {
if (!thread) return;
thread->platform_thread()->cancel_blocking();
thread->platform_thread()->cancel_blocking();
};
_thread_ep->apply(thread_cap, lambda);
}
Thread_state Cpu_session_component::state(Thread_capability thread_cap)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) throw State_access_failed();
auto lambda = [this] (Cpu_thread_component *thread) {
if (!thread) throw State_access_failed();
return thread->platform_thread()->state();
return thread->platform_thread()->state();
};
return _thread_ep->apply(thread_cap, lambda);
}
void Cpu_session_component::state(Thread_capability thread_cap,
Thread_state const &state)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) throw State_access_failed();
auto lambda = [&] (Cpu_thread_component *thread) {
if (!thread) throw State_access_failed();
thread->platform_thread()->state(state);
thread->platform_thread()->state(state);
};
_thread_ep->apply(thread_cap, lambda);
}
@ -212,10 +229,12 @@ Cpu_session_component::exception_handler(Thread_capability thread_cap,
sigh_cap = _default_exception_handler;
}
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return;
auto lambda = [&] (Cpu_thread_component *thread) {
if (!thread) return;
thread->sigh(sigh_cap);
thread->sigh(sigh_cap);
};
_thread_ep->apply(thread_cap, lambda);
}
@ -232,23 +251,25 @@ Affinity::Space Cpu_session_component::affinity_space() const
void Cpu_session_component::affinity(Thread_capability thread_cap,
Affinity::Location location)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return;
auto lambda = [&] (Cpu_thread_component *thread) {
if (!thread) return;
/* convert session-local location to physical location */
int const x1 = location.xpos() + _location.xpos(),
y1 = location.ypos() + _location.ypos(),
x2 = location.xpos() + location.width(),
y2 = location.ypos() + location.height();
/* convert session-local location to physical location */
int const x1 = location.xpos() + _location.xpos(),
y1 = location.ypos() + _location.ypos(),
x2 = location.xpos() + location.width(),
y2 = location.ypos() + location.height();
int const clipped_x1 = max(_location.xpos(), x1),
clipped_y1 = max(_location.ypos(), y1),
clipped_x2 = max(_location.xpos() + (int)_location.width() - 1, x2),
clipped_y2 = max(_location.ypos() + (int)_location.height() - 1, y2);
int const clipped_x1 = max(_location.xpos(), x1),
clipped_y1 = max(_location.ypos(), y1),
clipped_x2 = max(_location.xpos() + (int)_location.width() - 1, x2),
clipped_y2 = max(_location.ypos() + (int)_location.height() - 1, y2);
thread->platform_thread()->affinity(Affinity::Location(clipped_x1, clipped_y1,
clipped_x2 - clipped_x1 + 1,
clipped_y2 - clipped_y1 + 1));
thread->platform_thread()->affinity(Affinity::Location(clipped_x1, clipped_y1,
clipped_x2 - clipped_x1 + 1,
clipped_y2 - clipped_y1 + 1));
};
_thread_ep->apply(thread_cap, lambda);
}
@ -260,28 +281,34 @@ Dataspace_capability Cpu_session_component::trace_control()
unsigned Cpu_session_component::trace_control_index(Thread_capability thread_cap)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return 0;
auto lambda = [] (Cpu_thread_component *thread) -> unsigned {
if (!thread) return 0;
return thread->trace_control_index();
return thread->trace_control_index();
};
return _thread_ep->apply(thread_cap, lambda);
}
Dataspace_capability Cpu_session_component::trace_buffer(Thread_capability thread_cap)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return Dataspace_capability();
auto lambda = [this] (Cpu_thread_component *thread) {
if (!thread) return Dataspace_capability();
return thread->trace_source()->buffer();
return thread->trace_source()->buffer();
};
return _thread_ep->apply(thread_cap, lambda);
}
Dataspace_capability Cpu_session_component::trace_policy(Thread_capability thread_cap)
{
Object_pool<Cpu_thread_component>::Guard thread(_thread_ep->lookup_and_lock(thread_cap));
if (!thread) return Dataspace_capability();
auto lambda = [this] (Cpu_thread_component *thread) {
if (!thread) return Dataspace_capability();
return thread->trace_source()->policy();
return thread->trace_source()->policy();
};
return _thread_ep->apply(thread_cap, lambda);
}
@ -309,29 +336,30 @@ int Cpu_session_component::transfer_quota(Cpu_session_capability dst_cap,
size_t amount)
{
/* lookup targeted CPU session */
Object_pool<Cpu_session_component>::Guard
dst(_session_ep->lookup_and_lock(dst_cap));
if (!dst) {
PWRN("Transfer CPU quota, %s, targeted session not found",
_label.string());
return -1;
}
/* check reference relationship */
if (dst->_ref != this && dst != _ref) {
PWRN("Transfer CPU quota, %s -> %s, no reference relation",
_label.string(), dst->_label.string());
return -2;
}
/* check quota availability */
size_t const quota = quota_lim_downscale(_quota, amount);
if (quota > _quota) {
PWRN("Transfer CPU quota, %s -> %s, insufficient quota %zu, need %zu",
_label.string(), dst->_label.string(), _quota, quota);
return -3;
}
/* transfer quota */
_transfer_quota(dst, quota);
return 0;
auto lambda = [&] (Cpu_session_component *dst) {
if (!dst) {
PWRN("Transfer CPU quota, %s, targeted session not found",
_label.string());
return -1;
}
/* check reference relationship */
if (dst->_ref != this && dst != _ref) {
PWRN("Transfer CPU quota, %s -> %s, no reference relation",
_label.string(), dst->_label.string());
return -2;
}
/* check quota availability */
size_t const quota = quota_lim_downscale(_quota, amount);
if (quota > _quota) {
PWRN("Transfer CPU quota, %s -> %s, insufficient quota %zu, need %zu",
_label.string(), dst->_label.string(), _quota, quota);
return -3;
}
/* transfer quota */
_transfer_quota(dst, quota);
return 0;
};
return _session_ep->apply(dst_cap, lambda);
}
@ -348,22 +376,23 @@ int Cpu_session_component::ref_account(Cpu_session_capability ref_cap)
return -2; }
/* lookup and check targeted CPU-session */
Object_pool<Cpu_session_component>::Guard
ref(_session_ep->lookup_and_lock(ref_cap));
if (!ref) {
PWRN("Set ref account, %s, targeted session not found",
_label.string());
return -1;
}
if (ref == this) {
PWRN("Set ref account, %s, self reference not allowed",
_label.string());
return -3;
}
/* establish ref-account relation from targeted CPU-session to us */
_ref = ref;
_ref->_insert_ref_member(this);
return 0;
auto lambda = [&] (Cpu_session_component *ref) {
if (!ref) {
PWRN("Set ref account, %s, targeted session not found",
_label.string());
return -1;
}
if (ref == this) {
PWRN("Set ref account, %s, self reference not allowed",
_label.string());
return -3;
}
/* establish ref-account relation from targeted CPU-session to us */
_ref = ref;
_ref->_insert_ref_member(this);
return 0;
};
return _session_ep->apply(ref_cap, lambda);
}

View File

@ -41,12 +41,13 @@ namespace Genode {
Local_addr local_addr = 0,
bool executable = false)
{
Object_pool<Dataspace_component>::Guard
ds(_ds_ep->lookup_and_lock(ds_cap));
if (!ds)
throw Invalid_dataspace();
auto lambda = [] (Dataspace_component *ds) {
if (!ds)
throw Invalid_dataspace();
return (void *)ds->phys_addr();
return (void *)ds->phys_addr();
};
return _ds_ep->apply(ds_cap, lambda);
}
void detach(Local_addr local_addr) { }

View File

@ -134,7 +134,7 @@ class Genode::Pager_entrypoint : public Object_pool<Pager_object>,
Ipc_pager _pager;
Cap_session *_cap_session;
Untyped_capability _manage(Pager_object *obj);
Untyped_capability _pager_object_cap(unsigned long badge);
public:

View File

@ -56,18 +56,20 @@ namespace Genode {
Session_capability cap = Root_component<Rm_session_component>::session(args, affinity);
/* lookup rm_session_component object */
Object_pool<Rm_session_component>::Guard rm_session(ep()->lookup_and_lock(cap));
if (!rm_session)
/* should never happen */
return cap;
auto lambda = [] (Rm_session_component *rm_session) {
if (!rm_session)
/* should never happen */
return;
/**
* Assign rm_session capability to dataspace component. It can
* not be done beforehand because the dataspace_component is
* constructed before the rm_session
*/
if (rm_session->dataspace_component())
rm_session->dataspace_component()->sub_rm_session(rm_session->cap());
/**
* Assign rm_session capability to dataspace component. It can
* not be done beforehand because the dataspace_component is
* constructed before the rm_session
*/
if (rm_session->dataspace_component())
rm_session->dataspace_component()->sub_rm_session(rm_session->cap());
};
ep()->apply(cap, lambda);
return cap;
}

View File

@ -282,6 +282,43 @@ namespace Genode {
Rm_dataspace_component _ds; /* dataspace representation of region map */
Dataspace_capability _ds_cap;
template <typename F>
auto _apply_to_dataspace(addr_t addr, F f, addr_t offset, unsigned level)
-> typename Trait::Functor<decltype(&F::operator())>::Return_type
{
using Functor = Trait::Functor<decltype(&F::operator())>;
using Return_type = typename Functor::Return_type;
Lock::Guard lock_guard(_lock);
/* skip further lookup when reaching the recursion limit */
if (!level) return f(this, nullptr, 0, 0);
/* lookup region and dataspace */
Rm_region *region = _map.metadata((void*)addr);
Dataspace_component *dsc = region ? region->dataspace()
: nullptr;
/* calculate offset in dataspace */
addr_t ds_offset = region ? (addr - region->base()
+ region->offset()) : 0;
/* check for nested dataspace */
Native_capability cap = dsc ? dsc->sub_rm_session()
: Native_capability();
if (!cap.valid()) return f(this, region, ds_offset, offset);
/* in case of a nested dataspace perform a recursive lookup */
auto lambda = [&] (Rm_session_component *rsc) -> Return_type
{
return (!rsc) ? f(nullptr, nullptr, ds_offset, offset)
: rsc->_apply_to_dataspace(ds_offset, f,
offset+region->base(),
--level);
};
return _session_ep->apply(cap, lambda);
}
public:
/**
@ -300,17 +337,6 @@ namespace Genode {
class Fault_area;
/**
* Reversely lookup dataspace and offset matching the specified address
*
* \return true lookup succeeded
*/
bool reverse_lookup(addr_t dst_base,
Fault_area *dst_fault_region,
Dataspace_component **src_dataspace,
Fault_area *src_fault_region,
Rm_session_component **sub_rm_session);
/**
* Register fault
*
@ -341,6 +367,20 @@ namespace Genode {
*/
void upgrade_ram_quota(size_t ram_quota) { _md_alloc.upgrade(ram_quota); }
/**
* Apply a function to dataspace attached at a given address
*
* /param addr address where the dataspace is attached
* /param f functor or lambda to apply
*/
template <typename F>
auto apply_to_dataspace(addr_t addr, F f)
-> typename Trait::Functor<decltype(&F::operator())>::Return_type
{
enum { RECURSION_LIMIT = 5 };
return _apply_to_dataspace(addr, f, 0, RECURSION_LIMIT);
}
/**************************************
** Region manager session interface **

View File

@ -1,5 +1,5 @@
/*
* \brief Generic implmentation of pager entrypoint
* \brief Generic implementation of pager entrypoint
* \author Norman Feske
* \author Stefan Kalkowski
* \date 2009-03-31
@ -20,6 +20,8 @@ using namespace Genode;
void Pager_entrypoint::entry()
{
using Pool = Object_pool<Pager_object>;
bool reply_pending = false;
while (1) {
@ -31,68 +33,63 @@ void Pager_entrypoint::entry()
reply_pending = false;
/* lookup referenced object */
Object_pool<Pager_object>::Guard _obj(lookup_and_lock(_pager.badge()));
Pager_object *obj = _obj;
/* handle request */
if (obj) {
if (_pager.is_exception()) {
obj->submit_exception_signal();
continue;
}
/* send reply if page-fault handling succeeded */
reply_pending = !obj->pager(_pager);
continue;
} else {
/*
* Prevent threads outside of core to mess with our wake-up
* interface. This condition can trigger if a process gets
* destroyed which triggered a page fault shortly before getting
* killed. In this case, 'wait_for_fault()' returns (because of
* the page fault delivery) but the pager-object lookup will fail
* (because core removed the process already).
*/
if (_pager.request_from_core()) {
Pool::apply(_pager.badge(), [&] (Pager_object *obj) {
if (obj) {
if (_pager.is_exception())
obj->submit_exception_signal();
else
/* send reply if page-fault handling succeeded */
reply_pending = !obj->pager(_pager);
} else {
/*
* We got a request from one of cores region-manager sessions
* to answer the pending page fault of a resolved region-manager
* client. Hence, we have to send the page-fault reply to the
* specified thread and answer the call of the region-manager
* session.
*
* When called from a region-manager session, we receive the
* core-local address of the targeted pager object via the
* first message word, which corresponds to the 'fault_ip'
* argument of normal page-fault messages.
* Prevent threads outside of core to mess with our wake-up
* interface. This condition can trigger if a process gets
* destroyed which triggered a page fault shortly before getting
* killed. In this case, 'wait_for_fault()' returns (because of
* the page fault delivery) but the pager-object lookup will fail
* (because core removed the process already).
*/
obj = reinterpret_cast<Pager_object *>(_pager.fault_ip());
if (_pager.request_from_core()) {
/* send reply to the calling region-manager session */
_pager.acknowledge_wakeup();
/*
* We got a request from one of cores region-manager sessions
* to answer the pending page fault of a resolved region-manager
* client. Hence, we have to send the page-fault reply to the
* specified thread and answer the call of the region-manager
* session.
*
* When called from a region-manager session, we receive the
* core-local address of the targeted pager object via the
* first message word, which corresponds to the 'fault_ip'
* argument of normal page-fault messages.
*/
obj = reinterpret_cast<Pager_object *>(_pager.fault_ip());
/* answer page fault of resolved pager object */
_pager.set_reply_dst(obj->cap());
_pager.acknowledge_wakeup();
/* send reply to the calling region-manager session */
_pager.acknowledge_wakeup();
/* answer page fault of resolved pager object */
_pager.set_reply_dst(obj->cap());
_pager.acknowledge_wakeup();
}
}
}
};
});
}
}
void Pager_entrypoint::dissolve(Pager_object *obj)
{
remove_locked(obj);
using Pool = Object_pool<Pager_object>;
if (obj) Pool::remove(obj);
}
Pager_capability Pager_entrypoint::manage(Pager_object *obj)
{
Native_capability cap = _manage(obj);
Native_capability cap = _pager_object_cap(obj->badge());
/* add server object to object pool */
obj->cap(cap);

View File

@ -26,23 +26,24 @@ using namespace Genode;
int Pd_session_component::bind_thread(Thread_capability thread)
{
Object_pool<Cpu_thread_component>::Guard cpu_thread(_thread_ep->lookup_and_lock(thread));
if (!cpu_thread) return -1;
return _thread_ep->apply(thread, [&] (Cpu_thread_component *cpu_thread) {
if (!cpu_thread) return -1;
if (cpu_thread->bound()) {
PWRN("rebinding of threads not supported");
return -2;
}
if (cpu_thread->bound()) {
PWRN("rebinding of threads not supported");
return -2;
}
Platform_thread *p_thread = cpu_thread->platform_thread();
Platform_thread *p_thread = cpu_thread->platform_thread();
int res = _pd.bind_thread(p_thread);
int res = _pd.bind_thread(p_thread);
if (res)
return res;
if (res)
return res;
cpu_thread->bound(true);
return 0;
cpu_thread->bound(true);
return 0;
});
}

View File

@ -26,9 +26,12 @@ static const bool verbose = false;
addr_t Ram_session_component::phys_addr(Ram_dataspace_capability ds)
{
Object_pool<Dataspace_component>::Guard dsc(_ds_ep->lookup_and_lock(ds));
if (!dsc) throw Invalid_dataspace();
return dsc->phys_addr();
auto lambda = [] (Dataspace_component *dsc) {
if (!dsc) throw Invalid_dataspace();
return dsc->phys_addr();
};
return _ds_ep->apply(ds, lambda);
}
@ -217,12 +220,12 @@ Ram_dataspace_capability Ram_session_component::alloc(size_t ds_size, Cache_attr
void Ram_session_component::free(Ram_dataspace_capability ds_cap)
{
Dataspace_component * ds =
dynamic_cast<Dataspace_component *>(_ds_ep->lookup_and_lock(ds_cap));
if (!ds)
return;
auto lambda = [this] (Dataspace_component *ds) {
if (!ds) return;
_free_ds(ds);
};
_free_ds(ds);
_ds_ep->apply(ds_cap, lambda);
}
@ -231,18 +234,21 @@ int Ram_session_component::ref_account(Ram_session_capability ram_session_cap)
/* the reference account cannot be defined twice */
if (_ref_account) return -2;
Object_pool<Ram_session_component>::Guard ref(_ram_session_ep->lookup_and_lock(ram_session_cap));
auto lambda = [this] (Ram_session_component *ref) {
/* check if recipient is a valid Ram_session_component */
if (!ref) return -1;
/* check if recipient is a valid Ram_session_component */
if (!ref) return -1;
/* deny the usage of the ram session as its own ref account */
/* XXX also check for cycles along the tree of ref accounts */
if (ref == this) return -3;
/* deny the usage of the ram session as its own ref account */
/* XXX also check for cycles along the tree of ref accounts */
if (ref == this) return -3;
_ref_account = ref;
_ref_account->_register_ref_account_member(this);
return 0;
_ref_account = ref;
_ref_account->_register_ref_account_member(this);
return 0;
};
return _ram_session_ep->apply(ram_session_cap, lambda);
}
@ -252,8 +258,9 @@ int Ram_session_component::transfer_quota(Ram_session_capability ram_session_cap
if (verbose)
PDBG("amount=%zu", amount);
Object_pool<Ram_session_component>::Guard dst(_ram_session_ep->lookup_and_lock(ram_session_cap));
return _transfer_quota(dst, amount);
auto lambda = [&] (Ram_session_component *dst) {
return _transfer_quota(dst, amount); };
return _ram_session_ep->apply(ram_session_cap, lambda);
}

View File

@ -165,6 +165,8 @@ namespace Genode {
int Rm_client::pager(Ipc_pager &pager)
{
using Fault_area = Rm_session_component::Fault_area;
Rm_session::Fault_type pf_type = pager.is_write_fault() ? Rm_session::WRITE_FAULT
: Rm_session::READ_FAULT;
addr_t pf_addr = pager.fault_addr();
@ -173,132 +175,87 @@ int Rm_client::pager(Ipc_pager &pager)
if (verbose_page_faults)
print_page_fault("page fault", pf_addr, pf_ip, pf_type, badge());
Rm_session_component *curr_rm_session = member_rm_session();
Rm_session_component *sub_rm_session = 0;
addr_t curr_rm_base = 0;
Dataspace_component *src_dataspace = 0;
Rm_session_component::Fault_area src_fault_area;
Rm_session_component::Fault_area dst_fault_area(pf_addr);
bool lookup;
auto lambda = [&] (Rm_session_component *rm_session,
Rm_region *region,
addr_t ds_offset,
addr_t region_offset) -> int
{
Dataspace_component * dsc = region ? region->dataspace() : nullptr;
if (!dsc) {
unsigned level;
enum { MAX_NESTING_LEVELS = 5 };
/*
* We found no attachment at the page-fault address and therefore have
* to reflect the page fault as region-manager fault. The signal
* handler is then expected to request the state of the region-manager
* session.
*/
/* helper guard to release the rm_session lock on return */
class Guard {
private:
/* print a warning if it's no managed-dataspace */
if (rm_session == member_rm_session())
print_page_fault("no RM attachment", pf_addr, pf_ip,
pf_type, badge());
Rm_session_component ** _release_session;
unsigned * _level;
/* register fault at responsible region-manager session */
if (rm_session)
rm_session->fault(this, pf_addr - region_offset, pf_type);
public:
explicit Guard(Rm_session_component ** rs, unsigned * level)
: _release_session(rs), _level(level) {}
~Guard() {
if ((*_level > 0) && (*_release_session))
(*_release_session)->release();
}
} release_guard(&curr_rm_session, &level);
/* traverse potentially nested dataspaces until we hit a leaf dataspace */
for (level = 0; level < MAX_NESTING_LEVELS; level++) {
lookup = curr_rm_session->reverse_lookup(curr_rm_base,
&dst_fault_area,
&src_dataspace,
&src_fault_area,
&sub_rm_session);
/* check if we need to traverse into a nested dataspace */
if (!sub_rm_session)
break;
if (!lookup) {
sub_rm_session->release();
break;
/* there is no attachment return an error condition */
return 1;
}
/* set up next iteration */
curr_rm_base = dst_fault_area.fault_addr()
- src_fault_area.fault_addr() + src_dataspace->map_src_addr();
if (level > 0)
curr_rm_session->release();
curr_rm_session = sub_rm_session;
sub_rm_session = 0;
}
if (level == MAX_NESTING_LEVELS) {
PWRN("Too many nesting levels of managed dataspaces");
return -1;
}
if (!lookup) {
addr_t ds_base = dsc->map_src_addr();
Fault_area src_fault_area(ds_base + ds_offset);
Fault_area dst_fault_area(pf_addr);
src_fault_area.constrain(ds_base, dsc->size());
dst_fault_area.constrain(region_offset + region->base(), region->size());
/*
* We found no attachment at the page-fault address and therefore have
* to reflect the page fault as region-manager fault. The signal
* handler is then expected to request the state of the region-manager
* session.
* Determine mapping size compatible with source and destination,
* and apply platform-specific constraint of mapping sizes.
*/
size_t map_size_log2 = dst_fault_area.common_size_log2(dst_fault_area,
src_fault_area);
map_size_log2 = constrain_map_size_log2(map_size_log2);
/* print a warning if it's no managed-dataspace */
if (curr_rm_session == member_rm_session())
print_page_fault("no RM attachment", pf_addr, pf_ip, pf_type, badge());
src_fault_area.constrain(map_size_log2);
dst_fault_area.constrain(map_size_log2);
if (!src_fault_area.valid() || !dst_fault_area.valid())
PERR("Invalid mapping");
/* register fault at responsible region-manager session */
curr_rm_session->fault(this, dst_fault_area.fault_addr() - curr_rm_base, pf_type);
/* there is no attachment return an error condition */
return 1;
}
/*
* Check if dataspace is compatible with page-fault type
*/
if (pf_type == Rm_session::WRITE_FAULT && !dsc->writable()) {
/*
* Determine mapping size compatible with source and destination,
* and apply platform-specific constraint of mapping sizes.
*/
size_t map_size_log2 = dst_fault_area.common_size_log2(dst_fault_area,
src_fault_area);
map_size_log2 = constrain_map_size_log2(map_size_log2);
/* attempted there is no attachment return an error condition */
print_page_fault("attempted write at read-only memory",
pf_addr, pf_ip, pf_type, badge());
src_fault_area.constrain(map_size_log2);
dst_fault_area.constrain(map_size_log2);
/* register fault at responsible region-manager session */
rm_session->fault(this, src_fault_area.fault_addr(), pf_type);
return 2;
}
/*
* Check if dataspace is compatible with page-fault type
*/
if (pf_type == Rm_session::WRITE_FAULT && !src_dataspace->writable()) {
Mapping mapping(dst_fault_area.base(), src_fault_area.base(),
dsc->cacheability(), dsc->is_io_mem(),
map_size_log2, dsc->writable());
/* attempted there is no attachment return an error condition */
print_page_fault("attempted write at read-only memory",
pf_addr, pf_ip, pf_type, badge());
/*
* On kernels with a mapping database, the 'dsc' dataspace is a leaf
* dataspace that corresponds to a virtual address range within core. To
* prepare the answer for the page fault, we make sure that this range is
* locally mapped in core. On platforms that support map operations of
* pages that are not locally mapped, the 'map_core_local' function may be
* empty.
*/
if (!dsc->is_io_mem())
mapping.prepare_map_operation();
/* register fault at responsible region-manager session */
curr_rm_session->fault(this, src_fault_area.fault_addr(), pf_type);
return 2;
}
Mapping mapping(dst_fault_area.base(),
src_fault_area.base(),
src_dataspace->cacheability(),
src_dataspace->is_io_mem(),
map_size_log2,
src_dataspace->writable());
/*
* On kernels with a mapping database, the 'dsc' dataspace is a leaf
* dataspace that corresponds to a virtual address range within core. To
* prepare the answer for the page fault, we make sure that this range is
* locally mapped in core. On platforms that support map operations of
* pages that are not locally mapped, the 'map_core_local' function may be
* empty.
*/
if (!src_dataspace->is_io_mem())
mapping.prepare_map_operation();
/* answer page fault with a flex-page mapping */
pager.set_reply_mapping(mapping);
return 0;
/* answer page fault with a flex-page mapping */
pager.set_reply_mapping(mapping);
return 0;
};
return member_rm_session()->apply_to_dataspace(pf_addr, lambda);
}
@ -357,107 +314,111 @@ Rm_session_component::attach(Dataspace_capability ds_cap, size_t size,
if (offset < 0 || align_addr(offset, get_page_size_log2()) != offset)
throw Invalid_args();
/* check dataspace validity */
Object_pool<Dataspace_component>::Guard dsc(_ds_ep->lookup_and_lock(ds_cap));
if (!dsc) throw Invalid_dataspace();
auto lambda = [&] (Dataspace_component *dsc) {
/* check dataspace validity */
if (!dsc) throw Invalid_dataspace();
if (!size)
size = dsc->size() - offset;
if (!size)
size = dsc->size() - offset;
/* work with page granularity */
size = align_addr(size, get_page_size_log2());
/* work with page granularity */
size = align_addr(size, get_page_size_log2());
/* deny creation of regions larger then the actual dataspace */
if (dsc->size() < size + offset)
throw Invalid_args();
/* deny creation of regions larger then the actual dataspace */
if (dsc->size() < size + offset)
throw Invalid_args();
/* allocate region for attachment */
void *r = 0;
if (use_local_addr) {
switch (_map.alloc_addr(size, local_addr).value) {
/* allocate region for attachment */
void *r = 0;
if (use_local_addr) {
switch (_map.alloc_addr(size, local_addr).value) {
case Range_allocator::Alloc_return::OUT_OF_METADATA:
throw Out_of_metadata();
case Range_allocator::Alloc_return::OUT_OF_METADATA:
throw Out_of_metadata();
case Range_allocator::Alloc_return::RANGE_CONFLICT:
throw Region_conflict();
case Range_allocator::Alloc_return::RANGE_CONFLICT:
throw Region_conflict();
case Range_allocator::Alloc_return::OK:
r = local_addr;
break;
}
} else {
/*
* Find optimal alignment for new region. First try natural alignment.
* If that is not possible, try again with successively less alignment
* constraints.
*/
size_t align_log2 = log2(size);
for (; align_log2 >= get_page_size_log2(); align_log2--) {
case Range_allocator::Alloc_return::OK:
r = local_addr;
break;
}
} else {
/*
* Don't use an aligment higher than the alignment of the backing
* store. The backing store would constrain the mapping size
* anyway such that a higher alignment of the region is of no use.
* Find optimal alignment for new region. First try natural alignment.
* If that is not possible, try again with successively less alignment
* constraints.
*/
if (((dsc->map_src_addr() + offset) & ((1UL << align_log2) - 1)) != 0)
continue;
size_t align_log2 = log2(size);
for (; align_log2 >= get_page_size_log2(); align_log2--) {
/* try allocating the align region */
Range_allocator::Alloc_return alloc_return =
_map.alloc_aligned(size, &r, align_log2);
/*
* Don't use an aligment higher than the alignment of the backing
* store. The backing store would constrain the mapping size
* anyway such that a higher alignment of the region is of no use.
*/
if (((dsc->map_src_addr() + offset) & ((1UL << align_log2) - 1)) != 0)
continue;
if (alloc_return.is_ok())
break;
else if (alloc_return.value == Range_allocator::Alloc_return::OUT_OF_METADATA) {
/* try allocating the align region */
Range_allocator::Alloc_return alloc_return =
_map.alloc_aligned(size, &r, align_log2);
if (alloc_return.is_ok())
break;
else if (alloc_return.value == Range_allocator::Alloc_return::OUT_OF_METADATA) {
_map.free(r);
throw Out_of_metadata();
}
}
if (align_log2 < get_page_size_log2()) {
_map.free(r);
throw Out_of_metadata();
throw Region_conflict();
}
}
if (align_log2 < get_page_size_log2()) {
/* store attachment info in meta data */
_map.metadata(r, Rm_region((addr_t)r, size, true, dsc, offset, this));
Rm_region *region = _map.metadata(r);
/* also update region list */
Rm_region_ref *p;
try { p = new(&_ref_slab) Rm_region_ref(region); }
catch (Allocator::Out_of_memory) {
_map.free(r);
throw Region_conflict();
}
}
/* store attachment info in meta data */
_map.metadata(r, Rm_region((addr_t)r, size, true, dsc, offset, this));
Rm_region *region = _map.metadata(r);
/* also update region list */
Rm_region_ref *p;
try { p = new(&_ref_slab) Rm_region_ref(region); }
catch (Allocator::Out_of_memory) {
_map.free(r);
throw Out_of_metadata();
}
_regions.insert(p);
/* inform dataspace about attachment */
dsc->attached_to(region);
if (verbose)
PDBG("attach ds %p (a=%lx,s=%zx,o=%lx) @ [%lx,%lx)",
(Dataspace_component *)dsc, dsc->phys_addr(), dsc->size(), offset, (addr_t)r, (addr_t)r + size);
/* check if attach operation resolves any faulting region-manager clients */
for (Rm_faulter *faulter = _faulters.head(); faulter; ) {
/* remember next pointer before possibly removing current list element */
Rm_faulter *next = faulter->next();
if (faulter->fault_in_addr_range((addr_t)r, size)) {
_faulters.remove(faulter);
faulter->continue_after_resolved_fault();
throw Out_of_metadata();
}
faulter = next;
}
_regions.insert(p);
return r;
/* inform dataspace about attachment */
dsc->attached_to(region);
if (verbose)
PDBG("attach ds %p (a=%lx,s=%zx,o=%lx) @ [%lx,%lx)",
(Dataspace_component *)dsc, dsc->phys_addr(), dsc->size(),
offset, (addr_t)r, (addr_t)r + size);
/* check if attach operation resolves any faulting region-manager clients */
for (Rm_faulter *faulter = _faulters.head(); faulter; ) {
/* remember next pointer before possibly removing current list element */
Rm_faulter *next = faulter->next();
if (faulter->fault_in_addr_range((addr_t)r, size)) {
_faulters.remove(faulter);
faulter->continue_after_resolved_fault();
}
faulter = next;
}
return r;
};
return _ds_ep->apply(ds_cap, lambda);
}
@ -612,19 +573,20 @@ Pager_capability Rm_session_component::add_client(Thread_capability thread)
{
/* lookup thread and setup correct parameters */
Object_pool<Cpu_thread_component>::Guard
cpu_thread(_thread_ep->lookup_and_lock(thread));
if (!cpu_thread) throw Invalid_thread();
auto lambda = [&] (Cpu_thread_component *cpu_thread) {
if (!cpu_thread) throw Invalid_thread();
/* determine identification of client when faulting */
badge = cpu_thread->platform_thread()->pager_object_badge();
/* determine identification of client when faulting */
badge = cpu_thread->platform_thread()->pager_object_badge();
/* determine cpu affinity of client thread */
location = cpu_thread->platform_thread()->affinity();
/* determine cpu affinity of client thread */
location = cpu_thread->platform_thread()->affinity();
address_space = cpu_thread->platform_thread()->address_space();
if (!Locked_ptr<Address_space>(address_space).is_valid())
throw Unbound_thread();
address_space = cpu_thread->platform_thread()->address_space();
if (!Locked_ptr<Address_space>(address_space).is_valid())
throw Unbound_thread();
};
_thread_ep->apply(thread, lambda);
}
/* serialize access */
@ -644,112 +606,45 @@ Pager_capability Rm_session_component::add_client(Thread_capability thread)
void Rm_session_component::remove_client(Pager_capability pager_cap)
{
Rm_client *client;
Rm_client * cl = dynamic_cast<Rm_client *>(_pager_ep->lookup_and_lock(pager_cap));
if (!cl) return;
auto lambda = [&] (Rm_client *cl) {
client = cl;
/*
* Rm_client is derived from Pager_object. If the Pager_object is also
* derived from Thread_base then the Rm_client object must be
* destructed without holding the rm_session_object lock. The native
* platform specific Thread_base implementation has to take care that
* all in-flight page handling requests are finished before
* destruction. (Either by waiting until the end of or by
* <deadlock free> cancellation of the last in-flight request.
* This operation can also require taking the rm_session_object lock.
*/
{
Lock::Guard lock_guard(_lock);
_clients.remove(cl);
}
if (!client) return;
/* call platform specific dissolve routines */
_pager_ep->dissolve(cl);
/*
* Rm_client is derived from Pager_object. If the Pager_object is also
* derived from Thread_base then the Rm_client object must be
* destructed without holding the rm_session_object lock. The native
* platform specific Thread_base implementation has to take care that
* all in-flight page handling requests are finished before
* destruction. (Either by waiting until the end of or by
* <deadlock free> cancellation of the last in-flight request.
* This operation can also require taking the rm_session_object lock.
*/
{
Lock::Guard lock_guard(_lock);
_clients.remove(client);
}
{
Lock::Guard lock_guard(_lock);
cl->dissolve_from_faulting_rm_session(this);
}
/* call platform specific dissolve routines */
_pager_ep->dissolve(client);
destroy(&_client_slab, cl);
}
{
Lock::Guard lock_guard(_lock);
client->dissolve_from_faulting_rm_session(this);
}
};
_pager_ep->apply(pager_cap, lambda);
bool Rm_session_component::reverse_lookup(addr_t dst_base,
Fault_area *dst_fault_area,
Dataspace_component **src_dataspace,
Fault_area *src_fault_area,
Rm_session_component **sub_rm_session)
{
/* serialize access */
Lock::Guard lock_guard(_lock);
/* rm-session-relative fault address */
addr_t fault_addr = dst_fault_area->fault_addr() - dst_base;
/* lookup region */
Rm_region *region = _map.metadata((void*)fault_addr);
if (!region)
return false;
/* request dataspace backing the region */
*src_dataspace = region->dataspace();
if (!*src_dataspace)
return false;
/*
* Constrain destination fault area to region
*
* Handle corner case when the 'dst_base' is negative. In this case, we
* determine the largest flexpage within the positive portion of the
* region.
*/
addr_t region_base = region->base() + dst_base;
size_t region_size = region->size();
/* check for overflow condition */
while ((long)region_base < 0 && (long)(region_base + region_size) > 0) {
/* increment base address by half of the region size */
region_base += region_size >> 1;
/* lower the region size by one log2 step */
region_size >>= 1;
}
dst_fault_area->constrain(region_base, region_size);
/* calculate source fault address relative to 'src_dataspace' */
addr_t src_fault_offset = fault_addr - region->base() + region->offset();
addr_t src_base = (*src_dataspace)->map_src_addr();
*src_fault_area = Fault_area(src_base + src_fault_offset);
/* constrain source fault area by the source dataspace dimensions */
src_fault_area->constrain(src_base, (*src_dataspace)->size());
if (!src_fault_area->valid() || !dst_fault_area->valid())
return false;
/* lookup and lock nested dataspace if required */
Native_capability session_cap = (*src_dataspace)->sub_rm_session();
if (session_cap.valid()) {
*sub_rm_session = dynamic_cast<Rm_session_component *>(_session_ep->lookup_and_lock(session_cap));
return (*sub_rm_session != 0);
}
/* loop refer to leaf */
*sub_rm_session = 0;
return true;
destroy(&_client_slab, client);
}
void Rm_session_component::fault(Rm_faulter *faulter, addr_t pf_addr,
Rm_session::Fault_type pf_type)
{
/* serialize access */
Lock::Guard lock_guard(_lock);
/* remember fault state in faulting thread */
faulter->fault(this, Rm_session::State(pf_type, pf_addr));
@ -869,13 +764,12 @@ Rm_session_component::~Rm_session_component()
_lock.unlock();
{
/* lookup thread and reset pager pointer */
Object_pool<Cpu_thread_component>::Guard
cpu_thread(_thread_ep->lookup_and_lock(thread_cap));
/* lookup thread and reset pager pointer */
auto lambda = [&] (Cpu_thread_component *cpu_thread) {
if (cpu_thread && (cpu_thread->platform_thread()->pager() == cl))
cpu_thread->platform_thread()->pager(0);
}
};
_thread_ep->apply(thread_cap, lambda);
destroy(&_client_slab, cl);

View File

@ -71,33 +71,33 @@ Signal_context_capability Signal_session_component::alloc_context(long imprint)
void Signal_session_component::free_context(Signal_context_capability context_cap)
{
Signal_context_component * context =
dynamic_cast<Signal_context_component *>(_context_ep->lookup_and_lock(context_cap));
if (!context) {
PWRN("specified signal-context capability has wrong type");
return;
}
_context_ep->apply(context_cap, [this] (Signal_context_component *context) {
if (!context) {
PWRN("specified signal-context capability has wrong type");
return;
}
_context_ep->dissolve(context);
destroy(&_contexts_slab, context);
_context_ep->dissolve(context);
destroy(&_contexts_slab, context);
});
}
void Signal_session_component::submit(Signal_context_capability context_cap,
unsigned cnt)
{
Object_pool<Signal_context_component>::Guard
context(_context_ep->lookup_and_lock(context_cap));
if (!context) {
/*
* We do not use PWRN() to enable the build system to suppress this
* warning in release mode (SPECS += release).
*/
PDBG("invalid signal-context capability");
return;
}
_context_ep->apply(context_cap, [&] (Signal_context_component *context) {
if (!context) {
/*
* We do not use PWRN() to enable the build system to suppress this
* warning in release mode (SPECS += release).
*/
PDBG("invalid signal-context capability");
return;
}
context->source()->submit(context, _ipc_ostream, cnt);
context->source()->submit(context, _ipc_ostream, cnt);
});
}

View File

@ -540,13 +540,17 @@ void Backend_memory::free(Genode::Ram_dataspace_capability cap)
{
using namespace Genode;
Memory_object_base *o = memory_pool.lookup_and_lock(cap);
if (!o)
return;
Memory_object_base *object;
auto lambda = [&] (Memory_object_base *o) {
object = o;
o->free();
memory_pool.remove_locked(o);
destroy(env()->heap(), o);
if (object) {
object->free();
memory_pool.remove(object);
}
};
memory_pool.apply(cap, lambda);
destroy(env()->heap(), object);
}

View File

@ -480,11 +480,14 @@ void Lx::backend_free(Genode::Ram_dataspace_capability cap)
{
using namespace Genode;
Memory_object_base *o = memory_pool.lookup_and_lock(cap);
if (!o)
return;
o->free();
memory_pool.remove_locked(o);
destroy(env()->heap(), o);
Memory_object_base *object;
auto lambda = [&] (Memory_object_base *o) {
object = o;
if (object) {
o->free();
memory_pool.remove(o);
}
};
memory_pool.apply(cap, lambda);
destroy(env()->heap(), object);
}

View File

@ -807,12 +807,9 @@ class Wm::Nitpicker::Session_component : public Rpc_object<Nitpicker::Session>,
View_handle view_handle(View_capability view_cap, View_handle handle) override
{
View *view = dynamic_cast<View *>(_ep.rpc_ep().lookup_and_lock(view_cap));
if (!view) return View_handle();
Object_pool<Rpc_object_base>::Guard guard(view);
return _view_handle_registry.alloc(*view, handle);
return _ep.rpc_ep().apply(view_cap, [&] (View *view) {
return (view) ? _view_handle_registry.alloc(*view, handle)
: View_handle(); });
}
View_capability view_capability(View_handle handle) override
@ -1071,50 +1068,64 @@ class Wm::Nitpicker::Root : public Genode::Rpc_object<Genode::Typed_root<Session
{
if (!args.is_valid_string()) throw Root::Invalid_args();
Rpc_object_base *session = _ep.rpc_ep().lookup_and_lock(session_cap);
auto lambda = [&] (Rpc_object_base *session) {
if (!session) {
PDBG("session lookup failed");
return;
}
if (!session) {
PDBG("session lookup failed");
return;
}
Session_component *regular_session =
dynamic_cast<Session_component *>(session);
Session_component *regular_session =
dynamic_cast<Session_component *>(session);
if (regular_session)
regular_session->upgrade(args.string());
if (regular_session)
regular_session->upgrade(args.string());
Decorator_nitpicker_session *decorator_session =
dynamic_cast<Decorator_nitpicker_session *>(session);
Decorator_nitpicker_session *decorator_session =
dynamic_cast<Decorator_nitpicker_session *>(session);
if (decorator_session)
decorator_session->upgrade(args.string());
session->release();
if (decorator_session)
decorator_session->upgrade(args.string());
};
_ep.rpc_ep().apply(session_cap, lambda);
}
void close(Genode::Session_capability session_cap) override
{
Rpc_object_base *session = _ep.rpc_ep().lookup_and_lock(session_cap);
Genode::Rpc_entrypoint &ep = _ep.rpc_ep();
Session_component *regular_session = dynamic_cast<Session_component *>(session);
Session_component *regular_session =
ep.apply(session_cap, [this] (Session_component *session) {
if (session) {
_sessions.remove(session);
_ep.dissolve(*session);
}
return session;
});
if (regular_session) {
_sessions.remove(regular_session);
_ep.dissolve(*regular_session);
Genode::destroy(_md_alloc, regular_session);
return;
}
if (session == _decorator_session) {
auto decorator_lambda = [this] (Decorator_nitpicker_session *session) {
_ep.dissolve(*_decorator_session);
Genode::destroy(_md_alloc, _decorator_session);
_decorator_session = nullptr;
return session;
};
if (ep.apply(session_cap, decorator_lambda) == _decorator_session) {
Genode::destroy(_md_alloc, _decorator_session);
return;
}
if (session == _layouter_session) {
auto layouter_lambda = [this] (Layouter_nitpicker_session *session) {
_ep.dissolve(*_layouter_session);
Genode::destroy(_md_alloc, _layouter_session);
_layouter_session = nullptr;
return session;
};
if (ep.apply(session_cap, layouter_lambda) == _layouter_session) {
Genode::destroy(_md_alloc, _layouter_session);
return;
}
}

View File

@ -479,102 +479,109 @@ namespace Platform {
Config_access config_access;
/* lookup device component for previous device */
Genode::Object_pool<Device_component>::Guard
prev(_ep->lookup_and_lock(prev_device));
auto lambda = [&] (Device_component *prev)
{
/*
* Start bus scanning after the previous device's location.
* If no valid device was specified for 'prev_device', start at
* the beginning.
*/
int bus = 0, device = 0, function = -1;
/*
* Start bus scanning after the previous device's location.
* If no valid device was specified for 'prev_device',
* start at the beginning.
*/
int bus = 0, device = 0, function = -1;
if (prev) {
Device_config config = prev->config();
bus = config.bus_number();
device = config.device_number();
function = config.function_number();
}
if (prev) {
Device_config config = prev->config();
bus = config.bus_number();
device = config.device_number();
function = config.function_number();
}
/*
* Scan buses for devices.
* If no device is found, return an invalid capability.
*/
Device_config config;
/*
* Scan buses for devices.
* If no device is found, return an invalid capability.
*/
Device_config config;
while (true) {
function += 1;
if (!_find_next(bus, device, function, &config, &config_access))
return Device_capability();
while (true) {
function += 1;
if (!_find_next(bus, device, function, &config,
&config_access))
return Device_capability();
/* get new bdf values */
bus = config.bus_number();
device = config.device_number();
function = config.function_number();
/* get new bdf values */
bus = config.bus_number();
device = config.device_number();
function = config.function_number();
/* if filter of driver don't match skip and continue */
if ((config.class_code() ^ device_class) & class_mask)
continue;
/* if filter of driver don't match skip and continue */
if ((config.class_code() ^ device_class) & class_mask)
continue;
/* check that policy permit access to the matched device */
if (permit_device(bus, device, function,
config.class_code()))
break;
}
/* check that policy permit access to the matched device */
if (permit_device(bus, device, function,
config.class_code()))
break;
}
/* lookup if we have a extended pci config space */
Genode::addr_t config_space = lookup_config_space(bus, device,
function);
/* lookup if we have a extended pci config space */
Genode::addr_t config_space =
lookup_config_space(bus, device, function);
/*
* A device was found. Create a new device component for the
* device and return its capability.
*/
try {
Device_component * dev = new (_device_slab) Device_component(config, config_space, _ep, this, msi_usage());
/*
* A device was found. Create a new device component for the
* device and return its capability.
*/
try {
Device_component * dev = new (_device_slab)
Device_component(config, config_space, _ep, this,
msi_usage());
/* if more than one driver uses the device - warn about */
if (bdf_in_use.get(Device_config::MAX_BUSES * bus +
Device_config::MAX_DEVICES * device +
function, 1))
PERR("Device %2x:%2x.%u is used by more than one "
"driver - session '%s'.", bus, device, function,
_label.string());
else
bdf_in_use.set(Device_config::MAX_BUSES * bus +
Device_config::MAX_DEVICES * device +
function, 1);
/* if more than one driver uses the device - warn about */
if (bdf_in_use.get(Device_config::MAX_BUSES * bus +
Device_config::MAX_DEVICES * device +
function, 1))
PERR("Device %2x:%2x.%u is used by more than one "
"driver - session '%s'.", bus, device, function,
_label.string());
else
bdf_in_use.set(Device_config::MAX_BUSES * bus +
Device_config::MAX_DEVICES * device +
function, 1);
_device_list.insert(dev);
return _ep->manage(dev);
} catch (Genode::Allocator::Out_of_memory) {
throw Device::Quota_exceeded();
}
_device_list.insert(dev);
return _ep->manage(dev);
} catch (Genode::Allocator::Out_of_memory) {
throw Device::Quota_exceeded();
}
};
return _ep->apply(prev_device, lambda);
}
void release_device(Device_capability device_cap)
{
auto lambda = [&] (Device_component *device)
{
if (!device)
return;
unsigned const bus = device->config().bus_number();
unsigned const dev = device->config().device_number();
unsigned const func = device->config().function_number();
bdf_in_use.clear(Device_config::MAX_BUSES * bus +
Device_config::MAX_DEVICES * dev + func, 1);
_device_list.remove(device);
_ep->dissolve(device);
if (device->config().valid())
destroy(_device_slab, device);
else
destroy(_md_alloc, device);
};
/* lookup device component for previous device */
Device_component *device = dynamic_cast<Device_component *>
(_ep->lookup_and_lock(device_cap));
if (!device)
return;
unsigned const bus = device->config().bus_number();
unsigned const dev = device->config().device_number();
unsigned const func = device->config().function_number();
bdf_in_use.clear(Device_config::MAX_BUSES * bus +
Device_config::MAX_DEVICES * dev + func, 1);
_device_list.remove(device);
_ep->dissolve(device);
if (device->config().valid())
destroy(_device_slab, device);
else
destroy(_md_alloc, device);
_ep->apply(device_cap, lambda);
}
Genode::Io_mem_dataspace_capability assign_device(Device_component * device)
@ -601,10 +608,8 @@ namespace Platform {
{
using namespace Genode;
Object_pool<Device_component>::Guard
device(_ep->lookup_and_lock(device_cap));
return assign_device(device);
return _ep->apply(device_cap, [&] (Device_component *device) {
return assign_device(device);});
}
/**

View File

@ -52,7 +52,6 @@ class Loader::Session_component : public Rpc_object<Session>
void _close(Rom_session_component *rom)
{
_ep.dissolve(rom);
_rom_sessions.remove(rom);
destroy(&_md_alloc, rom);
}
@ -73,7 +72,9 @@ class Loader::Session_component : public Rpc_object<Session>
Lock::Guard guard(_lock);
while (_rom_sessions.first()) {
_close(_rom_sessions.first()); }
_ep.remove(_rom_sessions.first());
_close(_rom_sessions.first());
}
}
Genode::Session_capability session(char const *args,
@ -108,10 +109,12 @@ class Loader::Session_component : public Rpc_object<Session>
{
Lock::Guard guard(_lock);
Rpc_object_base *rom = _ep.lookup_and_lock(session);
Rom_session_component *component;
Rom_session_component *component =
dynamic_cast<Rom_session_component *>(rom);
_ep.apply(session, [&] (Rom_session_component *rsc) {
component = rsc;
if (component) _ep.remove(component);
});
if (component) {
_close(component);

View File

@ -841,12 +841,12 @@ class Nitpicker::Session_component : public Genode::Rpc_object<Session>,
View_handle view_handle(View_capability view_cap, View_handle handle) override
{
View *view = dynamic_cast<View *>(_ep.lookup_and_lock(view_cap));
if (!view) return View_handle();
Object_pool<Rpc_object_base>::Guard guard(view);
return _view_handle_registry.alloc(*view, handle);
auto lambda = [&] (View *view)
{
return (view) ? _view_handle_registry.alloc(*view, handle)
: View_handle();
};
return _ep.apply(view_cap, lambda);
}
View_capability view_capability(View_handle handle) override
@ -924,15 +924,12 @@ class Nitpicker::Session_component : public Genode::Rpc_object<Session>,
return;
/* lookup targeted session object */
Session_component * const session =
session_cap.valid() ? dynamic_cast<Session_component *>(_ep.lookup_and_lock(session_cap)) : 0;
_mode.focused_session(session);
if (session)
session->release();
report_session(_focus_reporter, session);
auto lambda = [this] (Session_component *session)
{
_mode.focused_session(session);
report_session(_focus_reporter, session);
};
_ep.apply(session_cap, lambda);
}
void session_control(Label suffix, Session_control control) override

View File

@ -44,8 +44,8 @@ Genode::Session_capability Session_component::cap()
bool Session_component::belongs_to(Genode::Session_capability cap)
{
Object_pool<Session_component>::Guard session(_ep.lookup_and_lock(cap));
return session == this;
return _ep.apply(cap, [this] (Session_component *session) {
return session == this; });
}

View File

@ -204,36 +204,46 @@ namespace Gdb_monitor {
{
using namespace Genode;
Child_session *session = _sessions.lookup_and_lock(session_cap);
if (!session) {
PERR("attempt to upgrade unknown session");
return;
}
auto lambda = [&] (Child_session *session) {
if (!session) {
PERR("attempt to upgrade unknown session");
return;
}
Genode::size_t ram_quota =
Arg_string::find_arg(args.string(),
"ram_quota").ulong_value(0);
Genode::size_t ram_quota =
Arg_string::find_arg(args.string(),
"ram_quota").ulong_value(0);
/* forward session quota to child */
env()->ram_session()->transfer_quota(_child_ram, ram_quota);
/* forward session quota to child */
env()->ram_session()->transfer_quota(_child_ram, ram_quota);
session->ram_quota += ram_quota;
session->ram_quota += ram_quota;
/* inform child about quota upgrade */
_child_root.upgrade(session_cap, args);
/* inform child about quota upgrade */
_child_root.upgrade(session_cap, args);
};
_sessions.apply(session_cap, lambda);
}
void close(Session_capability session_cap)
{
using namespace Genode;
Child_session *session = _sessions.lookup_and_lock(session_cap);
if (!session) {
PERR("attempt to close unknown session");
return;
}
Child_session *session;
auto lambda = [&] (Child_session *s) {
session = s;
if (!session) {
PERR("attempt to close unknown session");
return;
}
_sessions.remove(session);
};
_sessions.apply(session_cap, lambda);
Genode::size_t ram_quota = session->ram_quota;
_sessions.remove_locked(session);
destroy(env()->heap(), session);
_child_root.close(session_cap);

View File

@ -42,13 +42,12 @@ Rm_session_component::Region *Rm_session_component::find_region(void *local_addr
*offset_in_region = ((addr_t)local_addr - (addr_t)region->start());
// PDBG("offset_in_region = %lx", *offset_in_region);
Object_pool<Dataspace_object>::Guard managed_ds_obj(_managed_ds_map->lookup_and_lock(region->ds_cap()));
if (managed_ds_obj) {
// PDBG("managed dataspace detected");
region = managed_ds_obj->rm_session_component()->find_region((void*)*offset_in_region, offset_in_region);
// if (region)
// PDBG("found sub region: start = %p, offset = %lx", region->start(), *offset_in_region);
}
_managed_ds_map->apply(region->ds_cap(), [&] (Dataspace_object *managed_ds_obj) {
if (managed_ds_obj)
region =
managed_ds_obj->rm_session_component()->find_region((void*)*offset_in_region,
offset_in_region);
});
return region;
}

View File

@ -118,12 +118,8 @@ namespace Noux {
};
class Dataspace_registry
class Dataspace_registry : public Object_pool<Dataspace_info>
{
private:
Object_pool<Dataspace_info> _pool;
public:
~Dataspace_registry()
@ -136,25 +132,8 @@ namespace Noux {
* created via 'Rm_dataspace_info::fork', are not handled by
* those destructors. So we have to clean them up here.
*/
while(Dataspace_info *info = _pool.first()) {
_pool.remove_locked(info);
destroy(env()->heap(), info);
}
}
void insert(Dataspace_info *info)
{
_pool.insert(info);
}
void remove(Dataspace_info *info)
{
_pool.remove_locked(info);
}
Dataspace_info *lookup_info(Dataspace_capability ds_cap)
{
return _pool.lookup_and_lock(ds_cap);
remove_all([&] (Dataspace_info *info) {
destroy(env()->heap(), info); });
}
};
@ -172,18 +151,17 @@ namespace Noux {
~Static_dataspace_info()
{
Static_dataspace_info *info =
dynamic_cast<Static_dataspace_info *>(_ds_registry.lookup_info(ds_cap()));
auto lambda = [this] (Static_dataspace_info *info) {
if (!info) {
PERR("lookup of binary ds info failed");
return;
}
if (!info) {
PERR("lookup of binary ds info failed");
return;
}
_ds_registry.remove(info);
info->dissolve_users();
_ds_registry.remove(info);
info->dissolve_users();
};
_ds_registry.apply(ds_cap(), lambda);
}
Dataspace_capability fork(Ram_session_capability,

View File

@ -126,26 +126,31 @@ namespace Noux {
void close(Genode::Session_capability session)
{
Rm_session_component * rm_session =
dynamic_cast<Rm_session_component *>(_ep.lookup_and_lock(session));
if (!rm_session) {
PWRN("Unexpected call of close with non-RM-session argument");
return;
}
Dataspace_info *info;
/* use RM dataspace as key to obtain the dataspace info object */
Dataspace_capability ds_cap = rm_session->dataspace();
auto lambda = [&] (Rm_session_component *rm_session) {
if (!rm_session) {
PWRN("Unexpected call of close with non-RM-session argument");
return;
}
/* release dataspace info */
Dataspace_info *info = _ds_registry.lookup_info(ds_cap);
if (!info) {
PWRN("Could not lookup dataspace info for local RM session");
return;
}
/* use RM dataspace as key to obtain the dataspace info object */
Dataspace_capability ds_cap = rm_session->dataspace();
_ds_registry.remove(info);
/* release dataspace info */
_ds_registry.apply(ds_cap, [&] (Dataspace_info *di) {
info = di;
if (!info) {
PWRN("Could not lookup dataspace info for local RM session");
return;
}
info->dissolve_users();
_ds_registry.remove(info);
info->dissolve_users();
});
};
_ep.apply(session, lambda);
/* 'rm_session' is deleted by deleting Rm_dataspace_info 'info' */
destroy(env()->heap(), info);

View File

@ -59,18 +59,20 @@ namespace Noux {
void close(Genode::Session_capability session)
{
/* acquire locked session object */
Rom_session_component *rom_session =
dynamic_cast<Rom_session_component *>(_ep.lookup_and_lock(session));
Rom_session_component *rom_session;
if (!rom_session) {
PWRN("Unexpected call of close with non-ROM-session argument");
return;
}
_ep.apply(session, [&] (Rom_session_component *rsc) {
rom_session = rsc;
_ep.dissolve(rom_session);
if (!rom_session) {
PWRN("Unexpected call of close with non-ROM-session argument");
return;
}
_ep.dissolve(rom_session);
});
destroy(env()->heap(), rom_session);
}
};
}

View File

@ -153,22 +153,26 @@ namespace Noux {
void free(Ram_dataspace_capability ds_cap)
{
Ram_dataspace_info *ds_info =
dynamic_cast<Ram_dataspace_info *>(_registry.lookup_info(ds_cap));
Ram_dataspace_info *ds_info;
if (!ds_info) {
PERR("RAM free: dataspace lookup failed");
return;
}
auto lambda = [&] (Ram_dataspace_info *rdi) {
ds_info = rdi;
_registry.remove(ds_info);
if (!ds_info) {
PERR("RAM free: dataspace lookup failed");
return;
}
ds_info->dissolve_users();
_registry.remove(ds_info);
_list.remove(ds_info);
_used_quota -= ds_info->size();
ds_info->dissolve_users();
env()->ram_session()->free(ds_cap);
_list.remove(ds_info);
_used_quota -= ds_info->size();
env()->ram_session()->free(ds_cap);
};
_registry.apply(ds_cap, lambda);
destroy(env()->heap(), ds_info);
}

View File

@ -124,21 +124,22 @@ class Noux::Rm_session_component : public Rpc_object<Rm_session>
Region * const region = _lookup_region_by_addr(addr);
if (!region) { return cap(); }
/* if there is no info for the region it can't be a sub RM */
Dataspace_capability ds_cap = region->ds;
typedef Object_pool<Dataspace_info>::Guard Info_guard;
Info_guard info(_ds_registry.lookup_info(ds_cap));
if (!info) { return cap(); }
auto lambda = [&] (Dataspace_info *info)
{
/* if there is no info for the region it can't be a sub RM */
if (!info) { return cap(); }
/* ask the dataspace info for an appropriate sub RM */
addr_t const region_base = region->local_addr;
addr_t const region_off = region->offset;
addr_t const sub_addr = addr - region_base + region_off;
Rm_session_capability sub_rm = info->lookup_rm_session(sub_addr);
/* ask the dataspace info for an appropriate sub RM */
addr_t const region_base = region->local_addr;
addr_t const region_off = region->offset;
addr_t const sub_addr = addr - region_base + region_off;
Rm_session_capability sub_rm = info->lookup_rm_session(sub_addr);
/* if the result is invalid the dataspace is no sub RM */
if (!sub_rm.valid()) { return cap(); }
return sub_rm;
/* if the result is invalid the dataspace is no sub RM */
if (!sub_rm.valid()) { return cap(); }
return sub_rm;
};
return _ds_registry.apply(region->ds, lambda);
}
/**
@ -158,49 +159,49 @@ class Noux::Rm_session_component : public Rpc_object<Rm_session>
{
Lock::Guard guard(_region_lock);
for (Region *curr = _regions.first(); curr; curr = curr->next_region()) {
auto lambda = [&] (Dataspace_info *info)
{
Dataspace_capability ds;
if (info) {
Dataspace_capability ds;
ds = info->fork(dst_ram, ds_registry, ep);
Object_pool<Dataspace_info>::Guard info(_ds_registry.lookup_info(curr->ds));
/*
* XXX We could detect dataspaces that are attached
* more than once. For now, we create a new fork
* for each attachment.
*/
if (info) {
} else {
ds = info->fork(dst_ram, ds_registry, ep);
PWRN("replay: missing ds_info for dataspace at addr 0x%lx",
curr->local_addr);
/*
* XXX We could detect dataspaces that are attached
* more than once. For now, we create a new fork
* for each attachment.
*/
/*
* If the dataspace is not a RAM dataspace, assume that
* it's a ROM dataspace.
*
* XXX Handle ROM dataspaces explicitly. For once, we
* need to make sure that they remain available
* until the child process exits even if the parent
* process exits earlier. Furthermore, we would
* like to detect unexpected dataspaces.
*/
ds = curr->ds;
}
} else {
if (!ds.valid()) {
PERR("replay: Error while forking dataspace");
return;
}
PWRN("replay: missing ds_info for dataspace at addr 0x%lx",
curr->local_addr);
/*
* If the dataspace is not a RAM dataspace, assume that
* it's a ROM dataspace.
*
* XXX Handle ROM dataspaces explicitly. For once, we
* need to make sure that they remain available
* until the child process exits even if the parent
* process exits earlier. Furthermore, we would
* like to detect unexpected dataspaces.
*/
ds = curr->ds;
}
if (!ds.valid()) {
PERR("replay: Error while forking dataspace");
continue;
}
Rm_session_client(dst_rm).attach(ds, curr->size,
curr->offset,
true,
curr->local_addr);
}
Rm_session_client(dst_rm).attach(ds, curr->size,
curr->offset,
true,
curr->local_addr);
};
_ds_registry.apply(curr->ds, lambda);
};
}
void poke(addr_t dst_addr, void const *src, size_t len)
@ -235,14 +236,13 @@ class Noux::Rm_session_component : public Rpc_object<Rm_session>
local_addr = region->local_addr;
}
Object_pool<Dataspace_info>::Guard info(_ds_registry.lookup_info(ds_cap));
if (!info) {
PERR("attempt to write to unknown dataspace type");
for (;;);
return;
}
info->poke(dst_addr - local_addr, src, len);
_ds_registry.apply(ds_cap, [&] (Dataspace_info *info) {
if (!info) {
PERR("attempt to write to unknown dataspace type");
for (;;);
}
info->poke(dst_addr - local_addr, src, len);
});
}
@ -275,20 +275,21 @@ class Noux::Rm_session_component : public Rpc_object<Rm_session>
Region(*this, ds, size, offset, local_addr);
/* register region as user of RAM dataspaces */
auto lambda = [&] (Dataspace_info *info)
{
Object_pool<Dataspace_info>::Guard info(_ds_registry.lookup_info(ds));
if (info) {
info->register_user(*region);
} else {
if (verbose_attach) {
PWRN("Trying to attach unknown dataspace type");
PWRN(" ds_info@%p at 0x%lx size=%zd offset=0x%lx",
info.object(), (long)local_addr,
info, (long)local_addr,
Dataspace_client(ds).size(), (long)offset);
}
}
}
};
_ds_registry.apply(ds, lambda);
/*
* Record attachment for later replay (needed during
@ -315,10 +316,8 @@ class Noux::Rm_session_component : public Rpc_object<Rm_session>
_regions.remove(region);
}
{
Object_pool<Dataspace_info>::Guard info(_ds_registry.lookup_info(region->ds));
if (info) info->unregister_user(*region);
}
_ds_registry.apply(region->ds, [&] (Dataspace_info *info) {
if (info) info->unregister_user(*region); });
destroy(env()->heap(), region);

View File

@ -69,17 +69,16 @@ namespace Noux {
* Lookup and lock ds info instead of directly accessing
* the '_ds_info' member.
*/
Object_pool<Dataspace_info>::Guard
info(_ds_registry.lookup_info(_ds_info.ds_cap()));
if (!info) {
_ds_registry.apply(_ds_info.ds_cap(), [this] (Dataspace_info *info) {
if (!info) {
PERR("~Rom_session_component: unexpected !info");
return;
}
}
_ds_registry.remove(&_ds_info);
_ds_registry.remove(&_ds_info);
info->dissolve_users();
info->dissolve_users();
});
}