/* * \brief Support code for the thread API * \author Norman Feske * \author Stefan Kalkowski * \date 2010-01-13 */ /* * Copyright (C) 2010-2015 Genode Labs GmbH * * This file is part of the Genode OS framework, which is distributed * under the terms of the GNU General Public License version 2. */ /* Genode includes */ #include #include #include #include #include /* base-internal includes */ #include /* local includes */ #include #include #include using namespace Genode; /** * Region-manager session for allocating stacks * * This class corresponds to the managed dataspace that is normally * used for organizing stacks within the stack area. * In contrast to the ordinary implementation, core's version does * not split between allocation of memory and virtual memory management. * Due to the missing availability of "real" dataspaces and capabilities * referring to it without having an entrypoint in place, the allocation * of a dataspace has no effect, but the attachment of the thereby "empty" * dataspace is doing both: allocation and attachment. */ class Stack_area_rm_session : public Rm_session { private: using Ds_slab = Synced_allocator >; Ds_slab _ds_slab { platform()->core_mem_alloc() }; enum { verbose = false }; public: /** * Allocate and attach on-the-fly backing store to stack area */ Local_addr attach(Dataspace_capability ds_cap, /* ignored capability */ size_t size, off_t offset, bool use_local_addr, Local_addr local_addr, bool executable) { /* allocate physical memory */ size = round_page(size); void *phys_base; Range_allocator *ra = platform_specific()->ram_alloc(); if (ra->alloc_aligned(size, &phys_base, get_page_size_log2()).is_error()) { PERR("could not allocate backing store for new stack"); return (addr_t)0; } if (verbose) PDBG("phys_base = %p, size = 0x%zx", phys_base, size); Dataspace_component *ds = new (&_ds_slab) Dataspace_component(size, 0, (addr_t)phys_base, CACHED, true, 0); if (!ds) { PERR("dataspace for core stack does not exist"); return (addr_t)0; } addr_t core_local_addr = stack_area_virtual_base() + (addr_t)local_addr; if (verbose) PDBG("core_local_addr = %lx, phys_addr = %lx, size = 0x%zx", core_local_addr, ds->phys_addr(), ds->size()); if (!map_local(ds->phys_addr(), core_local_addr, ds->size() >> get_page_size_log2())) { PERR("could not map phys %lx at local %lx", ds->phys_addr(), core_local_addr); return (addr_t)0; } ds->assign_core_local_addr((void*)core_local_addr); return local_addr; } void detach(Local_addr local_addr) { using Genode::addr_t; if ((addr_t)local_addr >= stack_area_virtual_size()) return; addr_t const detach = stack_area_virtual_base() + (addr_t)local_addr; addr_t const stack = stack_virtual_size(); addr_t const pages = ((detach & ~(stack - 1)) + stack - detach) >> get_page_size_log2(); unmap_local(detach, pages); } Pager_capability add_client(Thread_capability) { return Pager_capability(); } void remove_client(Pager_capability) { } void fault_handler(Signal_context_capability) { } State state() { return State(); } Dataspace_capability dataspace() { return Dataspace_capability(); } }; class Stack_area_ram_session : public Ram_session { public: Ram_dataspace_capability alloc(size_t size, Cache_attribute cached) { return reinterpret_cap_cast(Native_capability()); } void free(Ram_dataspace_capability ds) { } int ref_account(Ram_session_capability ram_session) { return 0; } int transfer_quota(Ram_session_capability ram_session, size_t amount) { return 0; } size_t quota() { return 0; } size_t used() { return 0; } }; /** * Return single instance of the context-area RM and RAM session */ namespace Genode { Rm_session *env_stack_area_rm_session() { static Stack_area_rm_session inst; return &inst; } Ram_session *env_stack_area_ram_session() { static Stack_area_ram_session inst; return &inst; } }