genode/repos/base-sel4/src/lib/base/capability_space.cc

170 lines
3.6 KiB
C++
Raw Normal View History

2015-05-10 19:51:10 +02:00
/*
* \brief Instance of the (Genode) capability space for non-core components
2015-05-10 19:51:10 +02:00
* \author Norman Feske
* \date 2015-05-11
2015-05-10 19:51:10 +02:00
*/
/*
* Copyright (C) 2015-2017 Genode Labs GmbH
2015-05-10 19:51:10 +02:00
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU Affero General Public License version 3.
2015-05-10 19:51:10 +02:00
*/
/* base includes */
#include <base/capability.h>
#include <base/log.h>
#include <util/bit_allocator.h>
2015-05-10 19:51:10 +02:00
/* base-internal includes */
#include <base/internal/capability_data.h>
#include <base/internal/capability_space_sel4.h>
2015-05-10 19:51:10 +02:00
/**
* Definition of capability meta data
*/
struct Genode::Native_capability::Data : Capability_data
{
Data(Rpc_obj_key key) : Capability_data(key) { }
Data() { }
};
using namespace Genode;
/**
* Singleton instance of component-local capability space
2015-05-10 19:51:10 +02:00
*/
namespace {
struct Local_capability_space
:
2017-06-22 18:47:02 +02:00
Capability_space_sel4<8*1024, 1UL << NUM_CORE_MANAGED_SEL_LOG2,
Native_capability::Data>
2015-05-10 19:51:10 +02:00
{ };
static Local_capability_space &local_capability_space()
{
static Local_capability_space capability_space;
return capability_space;
}
}
/*************************************************
** Allocator for component-local cap selectors **
*************************************************/
namespace {
class Sel_alloc : Bit_allocator<1UL << CSPACE_SIZE_LOG2>
{
private:
Follow practices suggested by "Effective C++" The patch adjust the code of the base, base-<kernel>, and os repository. To adapt existing components to fix violations of the best practices suggested by "Effective C++" as reported by the -Weffc++ compiler argument. The changes follow the patterns outlined below: * A class with virtual functions can no longer publicly inherit base classed without a vtable. The inherited object may either be moved to a member variable, or inherited privately. The latter would be used for classes that inherit 'List::Element' or 'Avl_node'. In order to enable the 'List' and 'Avl_tree' to access the meta data, the 'List' must become a friend. * Instead of adding a virtual destructor to abstract base classes, we inherit the new 'Interface' class, which contains a virtual destructor. This way, single-line abstract base classes can stay as compact as they are now. The 'Interface' utility resides in base/include/util/interface.h. * With the new warnings enabled, all member variables must be explicitly initialized. Basic types may be initialized with '='. All other types are initialized with braces '{ ... }' or as class initializers. If basic types and non-basic types appear in a row, it is nice to only use the brace syntax (also for basic types) and align the braces. * If a class contains pointers as members, it must now also provide a copy constructor and assignment operator. In the most cases, one would make them private, effectively disallowing the objects to be copied. Unfortunately, this warning cannot be fixed be inheriting our existing 'Noncopyable' class (the compiler fails to detect that the inheriting class cannot be copied and still gives the error). For now, we have to manually add declarations for both the copy constructor and assignment operator as private class members. Those declarations should be prepended with a comment like this: /* * Noncopyable */ Thread(Thread const &); Thread &operator = (Thread const &); In the future, we should revisit these places and try to replace the pointers with references. In the presence of at least one reference member, the compiler would no longer implicitly generate a copy constructor. So we could remove the manual declaration. Issue #465
2017-12-21 15:42:15 +01:00
Lock _lock { };
public:
Sel_alloc() { _reserve(0, 1UL << NUM_CORE_MANAGED_SEL_LOG2); }
unsigned alloc()
{
Lock::Guard guard(_lock);
return Bit_allocator::alloc();
}
void free(unsigned sel)
{
Lock::Guard guard(_lock);
Bit_allocator::free(sel);
}
};
static Sel_alloc &sel_alloc()
{
static Sel_alloc inst;
return inst;
}
}
2015-05-10 19:51:10 +02:00
/******************************************************
** Implementation of the Capability_space interface **
******************************************************/
Native_capability Capability_space::create_ep_cap(Thread &ep_thread)
2015-05-10 19:51:10 +02:00
{
Cap_sel const ep_sel = Cap_sel(ep_thread.native_thread().ep_sel);
2015-05-10 19:51:10 +02:00
Native_capability::Data *data =
&local_capability_space().create_capability(ep_sel, Rpc_obj_key());
return Native_capability(data);
2015-05-10 19:51:10 +02:00
}
void Capability_space::dec_ref(Native_capability::Data &data)
{
local_capability_space().dec_ref(data);
}
void Capability_space::inc_ref(Native_capability::Data &data)
{
local_capability_space().inc_ref(data);
}
Rpc_obj_key Capability_space::rpc_obj_key(Native_capability::Data const &data)
{
return local_capability_space().rpc_obj_key(data);
}
2015-05-11 08:43:43 +02:00
void Capability_space::print(Output &out, Native_capability::Data const &data)
{
return local_capability_space().print(out, data);
}
2015-05-11 08:43:43 +02:00
Capability_space::Ipc_cap_data Capability_space::ipc_cap_data(Native_capability const &cap)
{
return local_capability_space().ipc_cap_data(*cap.data());
}
Native_capability Capability_space::lookup(Rpc_obj_key rpc_obj_key)
{
Native_capability::Data *data = local_capability_space().lookup(rpc_obj_key);
return data ? Native_capability(data) : Native_capability();
2015-05-11 08:43:43 +02:00
}
unsigned Capability_space::alloc_rcv_sel()
{
unsigned const rcv_sel = sel_alloc().alloc();
seL4_SetCapReceivePath(INITIAL_SEL_CNODE, rcv_sel, CSPACE_SIZE_LOG2);
return rcv_sel;
2015-05-11 08:43:43 +02:00
}
void Capability_space::reset_sel(unsigned sel)
{
sel4: update to version 2.1 This patch updates seL4 from the experimental branch of one year ago to the master branch of version 2.1. The transition has the following implications. In contrast to the experimental branch, the master branch has no way to manually define the allocation of kernel objects within untyped memory ranges. Instead, the kernel maintains a built-in allocation policy. This policy rules out the deallocation of once-used parts of untyped memory. The only way to reuse memory is to revoke the entire untyped memory range. Consequently, we cannot share a large untyped memory range for kernel objects of different protection domains. In order to reuse memory at a reasonably fine granularity, we need to split the initial untyped memory ranges into small chunks that can be individually revoked. Those chunks are called "untyped pages". An untyped page is a 4 KiB untyped memory region. The bootstrapping of core has to employ a two-stage allocation approach now. For creating the initial kernel objects for core, which remain static during the entire lifetime of the system, kernel objects are created directly out of the initial untyped memory regions as reported by the kernel. The so-called "initial untyped pool" keeps track of the consumption of those untyped memory ranges by mimicking the kernel's internal allocation policy. Kernel objects created this way can be of any size. For example the phys CNode, which is used to store page-frame capabilities is 16 MiB in size. Also, core's CSpace uses a relatively large CNode. After the initial setup phase, all remaining untyped memory is turned into untyped pages. From this point on, new created kernel objects cannot exceed 4 KiB in size because one kernel object cannot span multiple untyped memory regions. The capability selectors for untyped pages are organized similarly to those of page-frame capabilities. There is a new 2nd-level CNode (UNTYPED_CORE_CNODE) that is dimensioned according to the maximum amount of physical memory (1M entries, each entry representing 4 KiB). The CNode is organized such that an index into the CNode directly corresponds to the physical frame number of the underlying memory. This way, we can easily determine a untyped page selector for any physical addresses, i.e., for revoking the kernel objects allocated at a specific physical page. The downside is the need for another 16 MiB chunk of meta data. Also, we need to keep in mind that this approach won't scale to 64-bit systems. We will eventually need to replace the PHYS_CORE_CNODE and UNTYPED_CORE_CNODE by CNode hierarchies to model a sparsely populated CNode. The size constrain of kernel objects has the immediate implication that the VM CSpaces of protection domains must be organized via several levels of CNodes. I.e., as the top-level CNode of core has a size of 2^12, the remaining 20 PD-specific CSpace address bits are organized as a 2nd-level 2^4 padding CNode, a 3rd-level 2^8 CNode, and several 4th-level 2^8 leaf CNodes. The latter contain the actual selectors for the page tables and page-table entries of the respective PD. As another slight difference from the experimental branch, the master branch requires the explicit assignment of page directories to an ASID pool. Besides the adjustment to the new seL4 version, the patch introduces a dedicated type for capability selectors. Previously, we just used to represent them as unsigned integer values, which became increasingly confusing. The new type 'Cap_sel' is a PD-local capability selector. The type 'Cnode_index' is an index into a CNode (which is not generally not the entire CSpace of the PD). Fixes #1887
2016-02-03 14:50:44 +01:00
int ret = seL4_CNode_Delete(INITIAL_SEL_CNODE, sel, CSPACE_SIZE_LOG2);
if (ret != 0)
warning("seL4_CNode_Delete returned ", ret);
2015-05-11 08:43:43 +02:00
}
Native_capability Capability_space::import(Ipc_cap_data ipc_cap_data)
{
Native_capability::Data *data =
&local_capability_space().create_capability(ipc_cap_data.sel,
ipc_cap_data.rpc_obj_key);
2015-05-11 08:43:43 +02:00
return Native_capability(data);
2015-05-11 08:43:43 +02:00
}