2015-05-05 00:15:50 +02:00
|
|
|
/*
|
|
|
|
* \brief Associate page-table and frame selectors with virtual addresses
|
|
|
|
* \author Norman Feske
|
|
|
|
* \date 2015-05-04
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2015 Genode Labs GmbH
|
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
|
|
* under the terms of the GNU General Public License version 2.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _CORE__INCLUDE__PAGE_TABLE_REGISTRY_H_
|
|
|
|
#define _CORE__INCLUDE__PAGE_TABLE_REGISTRY_H_
|
|
|
|
|
|
|
|
/* Genode includes */
|
|
|
|
#include <util/list.h>
|
|
|
|
#include <base/exception.h>
|
2016-07-08 13:44:52 +02:00
|
|
|
#include <base/log.h>
|
2015-05-05 00:15:50 +02:00
|
|
|
|
|
|
|
/* core includes */
|
|
|
|
#include <util.h>
|
sel4: update to version 2.1
This patch updates seL4 from the experimental branch of one year ago to
the master branch of version 2.1. The transition has the following
implications.
In contrast to the experimental branch, the master branch has no way to
manually define the allocation of kernel objects within untyped memory
ranges. Instead, the kernel maintains a built-in allocation policy. This
policy rules out the deallocation of once-used parts of untyped memory.
The only way to reuse memory is to revoke the entire untyped memory
range. Consequently, we cannot share a large untyped memory range for
kernel objects of different protection domains. In order to reuse memory
at a reasonably fine granularity, we need to split the initial untyped
memory ranges into small chunks that can be individually revoked. Those
chunks are called "untyped pages". An untyped page is a 4 KiB untyped
memory region.
The bootstrapping of core has to employ a two-stage allocation approach
now. For creating the initial kernel objects for core, which remain
static during the entire lifetime of the system, kernel objects are
created directly out of the initial untyped memory regions as reported
by the kernel. The so-called "initial untyped pool" keeps track of the
consumption of those untyped memory ranges by mimicking the kernel's
internal allocation policy. Kernel objects created this way can be of
any size. For example the phys CNode, which is used to store page-frame
capabilities is 16 MiB in size. Also, core's CSpace uses a relatively
large CNode.
After the initial setup phase, all remaining untyped memory is turned
into untyped pages. From this point on, new created kernel objects
cannot exceed 4 KiB in size because one kernel object cannot span
multiple untyped memory regions. The capability selectors for untyped
pages are organized similarly to those of page-frame capabilities. There
is a new 2nd-level CNode (UNTYPED_CORE_CNODE) that is dimensioned
according to the maximum amount of physical memory (1M entries, each
entry representing 4 KiB). The CNode is organized such that an index
into the CNode directly corresponds to the physical frame number of the
underlying memory. This way, we can easily determine a untyped page
selector for any physical addresses, i.e., for revoking the kernel
objects allocated at a specific physical page. The downside is the need
for another 16 MiB chunk of meta data. Also, we need to keep in mind
that this approach won't scale to 64-bit systems. We will eventually
need to replace the PHYS_CORE_CNODE and UNTYPED_CORE_CNODE by CNode
hierarchies to model a sparsely populated CNode.
The size constrain of kernel objects has the immediate implication that
the VM CSpaces of protection domains must be organized via several
levels of CNodes. I.e., as the top-level CNode of core has a size of
2^12, the remaining 20 PD-specific CSpace address bits are organized as
a 2nd-level 2^4 padding CNode, a 3rd-level 2^8 CNode, and several
4th-level 2^8 leaf CNodes. The latter contain the actual selectors for
the page tables and page-table entries of the respective PD.
As another slight difference from the experimental branch, the master
branch requires the explicit assignment of page directories to an ASID
pool.
Besides the adjustment to the new seL4 version, the patch introduces a
dedicated type for capability selectors. Previously, we just used to
represent them as unsigned integer values, which became increasingly
confusing. The new type 'Cap_sel' is a PD-local capability selector. The
type 'Cnode_index' is an index into a CNode (which is not generally not
the entire CSpace of the PD).
Fixes #1887
2016-02-03 14:50:44 +01:00
|
|
|
#include <cap_sel_alloc.h>
|
2015-05-05 00:15:50 +02:00
|
|
|
|
|
|
|
namespace Genode { class Page_table_registry; }
|
|
|
|
|
|
|
|
|
|
|
|
class Genode::Page_table_registry
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
|
|
|
|
class Lookup_failed : Exception { };
|
2016-07-08 13:44:52 +02:00
|
|
|
class Mapping_cache_full : Exception { };
|
2015-05-05 00:15:50 +02:00
|
|
|
|
|
|
|
private:
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX use AVL tree (with virtual address as key) instead of list
|
|
|
|
*/
|
|
|
|
|
|
|
|
class Page_table : public List<Page_table>::Element
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
|
|
|
|
struct Entry : List<Entry>::Element
|
|
|
|
{
|
|
|
|
addr_t const addr;
|
|
|
|
unsigned const sel;
|
|
|
|
|
|
|
|
Entry(addr_t addr, unsigned sel) : addr(addr), sel(sel) { }
|
|
|
|
};
|
|
|
|
|
|
|
|
addr_t const addr;
|
|
|
|
|
2015-05-17 22:24:47 +02:00
|
|
|
static constexpr bool verbose = false;
|
|
|
|
|
2015-05-05 00:15:50 +02:00
|
|
|
private:
|
|
|
|
|
|
|
|
List<Entry> _entries;
|
|
|
|
|
|
|
|
static addr_t _page_frame_base(addr_t addr)
|
|
|
|
{
|
|
|
|
return addr & get_page_mask();
|
|
|
|
}
|
|
|
|
|
|
|
|
bool _entry_exists(addr_t addr) const
|
|
|
|
{
|
|
|
|
for (Entry const *e = _entries.first(); e; e = e->next()) {
|
|
|
|
if (_page_frame_base(e->addr) == _page_frame_base(addr))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
2015-05-07 15:47:15 +02:00
|
|
|
class Lookup_failed : Exception { };
|
|
|
|
|
2015-05-05 00:15:50 +02:00
|
|
|
Page_table(addr_t addr) : addr(addr) { }
|
|
|
|
|
2015-05-07 15:47:15 +02:00
|
|
|
Entry &lookup(addr_t addr)
|
|
|
|
{
|
|
|
|
for (Entry *e = _entries.first(); e; e = e->next()) {
|
|
|
|
if (_page_frame_base(e->addr) == _page_frame_base(addr))
|
|
|
|
return *e;
|
|
|
|
}
|
|
|
|
throw Lookup_failed();
|
|
|
|
}
|
|
|
|
|
2016-04-05 15:16:39 +02:00
|
|
|
void insert_entry(Allocator &entry_alloc, addr_t addr, unsigned sel)
|
2015-05-05 00:15:50 +02:00
|
|
|
{
|
|
|
|
if (_entry_exists(addr)) {
|
|
|
|
PWRN("trying to insert page frame for 0x%lx twice", addr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-07-08 13:44:52 +02:00
|
|
|
try {
|
|
|
|
_entries.insert(new (entry_alloc) Entry(addr, sel));
|
|
|
|
} catch (Genode::Allocator::Out_of_memory) {
|
|
|
|
throw Mapping_cache_full();
|
|
|
|
}
|
2015-05-05 00:15:50 +02:00
|
|
|
}
|
2015-05-07 15:47:15 +02:00
|
|
|
|
2016-04-05 15:16:39 +02:00
|
|
|
void remove_entry(Allocator &entry_alloc, addr_t addr)
|
2015-05-07 15:47:15 +02:00
|
|
|
{
|
|
|
|
try {
|
|
|
|
Entry &entry = lookup(addr);
|
|
|
|
_entries.remove(&entry);
|
2016-04-05 15:16:39 +02:00
|
|
|
destroy(entry_alloc, &entry);
|
2015-05-07 15:47:15 +02:00
|
|
|
} catch (Lookup_failed) {
|
2015-05-17 22:24:47 +02:00
|
|
|
if (verbose)
|
|
|
|
PWRN("trying to remove non-existing page frame for 0x%lx", addr);
|
2015-05-07 15:47:15 +02:00
|
|
|
}
|
|
|
|
}
|
2016-07-08 13:44:52 +02:00
|
|
|
|
|
|
|
void flush_all(Allocator &entry_alloc)
|
|
|
|
{
|
|
|
|
for (; Entry *entry = _entries.first();) {
|
|
|
|
_entries.remove(entry);
|
|
|
|
destroy(entry_alloc, entry);
|
|
|
|
}
|
|
|
|
}
|
2015-05-05 00:15:50 +02:00
|
|
|
};
|
|
|
|
|
2016-04-05 15:16:39 +02:00
|
|
|
/**
|
|
|
|
* Allocator operating on a static memory pool
|
|
|
|
*
|
|
|
|
* \param ELEM element type
|
|
|
|
* \param MAX maximum number of elements
|
|
|
|
*
|
|
|
|
* The size of a single ELEM must be a multiple of sizeof(long).
|
|
|
|
*/
|
|
|
|
template <typename ELEM, size_t MAX>
|
|
|
|
class Static_allocator : public Allocator
|
sel4: update to version 2.1
This patch updates seL4 from the experimental branch of one year ago to
the master branch of version 2.1. The transition has the following
implications.
In contrast to the experimental branch, the master branch has no way to
manually define the allocation of kernel objects within untyped memory
ranges. Instead, the kernel maintains a built-in allocation policy. This
policy rules out the deallocation of once-used parts of untyped memory.
The only way to reuse memory is to revoke the entire untyped memory
range. Consequently, we cannot share a large untyped memory range for
kernel objects of different protection domains. In order to reuse memory
at a reasonably fine granularity, we need to split the initial untyped
memory ranges into small chunks that can be individually revoked. Those
chunks are called "untyped pages". An untyped page is a 4 KiB untyped
memory region.
The bootstrapping of core has to employ a two-stage allocation approach
now. For creating the initial kernel objects for core, which remain
static during the entire lifetime of the system, kernel objects are
created directly out of the initial untyped memory regions as reported
by the kernel. The so-called "initial untyped pool" keeps track of the
consumption of those untyped memory ranges by mimicking the kernel's
internal allocation policy. Kernel objects created this way can be of
any size. For example the phys CNode, which is used to store page-frame
capabilities is 16 MiB in size. Also, core's CSpace uses a relatively
large CNode.
After the initial setup phase, all remaining untyped memory is turned
into untyped pages. From this point on, new created kernel objects
cannot exceed 4 KiB in size because one kernel object cannot span
multiple untyped memory regions. The capability selectors for untyped
pages are organized similarly to those of page-frame capabilities. There
is a new 2nd-level CNode (UNTYPED_CORE_CNODE) that is dimensioned
according to the maximum amount of physical memory (1M entries, each
entry representing 4 KiB). The CNode is organized such that an index
into the CNode directly corresponds to the physical frame number of the
underlying memory. This way, we can easily determine a untyped page
selector for any physical addresses, i.e., for revoking the kernel
objects allocated at a specific physical page. The downside is the need
for another 16 MiB chunk of meta data. Also, we need to keep in mind
that this approach won't scale to 64-bit systems. We will eventually
need to replace the PHYS_CORE_CNODE and UNTYPED_CORE_CNODE by CNode
hierarchies to model a sparsely populated CNode.
The size constrain of kernel objects has the immediate implication that
the VM CSpaces of protection domains must be organized via several
levels of CNodes. I.e., as the top-level CNode of core has a size of
2^12, the remaining 20 PD-specific CSpace address bits are organized as
a 2nd-level 2^4 padding CNode, a 3rd-level 2^8 CNode, and several
4th-level 2^8 leaf CNodes. The latter contain the actual selectors for
the page tables and page-table entries of the respective PD.
As another slight difference from the experimental branch, the master
branch requires the explicit assignment of page directories to an ASID
pool.
Besides the adjustment to the new seL4 version, the patch introduces a
dedicated type for capability selectors. Previously, we just used to
represent them as unsigned integer values, which became increasingly
confusing. The new type 'Cap_sel' is a PD-local capability selector. The
type 'Cnode_index' is an index into a CNode (which is not generally not
the entire CSpace of the PD).
Fixes #1887
2016-02-03 14:50:44 +01:00
|
|
|
{
|
2016-04-05 15:16:39 +02:00
|
|
|
private:
|
sel4: update to version 2.1
This patch updates seL4 from the experimental branch of one year ago to
the master branch of version 2.1. The transition has the following
implications.
In contrast to the experimental branch, the master branch has no way to
manually define the allocation of kernel objects within untyped memory
ranges. Instead, the kernel maintains a built-in allocation policy. This
policy rules out the deallocation of once-used parts of untyped memory.
The only way to reuse memory is to revoke the entire untyped memory
range. Consequently, we cannot share a large untyped memory range for
kernel objects of different protection domains. In order to reuse memory
at a reasonably fine granularity, we need to split the initial untyped
memory ranges into small chunks that can be individually revoked. Those
chunks are called "untyped pages". An untyped page is a 4 KiB untyped
memory region.
The bootstrapping of core has to employ a two-stage allocation approach
now. For creating the initial kernel objects for core, which remain
static during the entire lifetime of the system, kernel objects are
created directly out of the initial untyped memory regions as reported
by the kernel. The so-called "initial untyped pool" keeps track of the
consumption of those untyped memory ranges by mimicking the kernel's
internal allocation policy. Kernel objects created this way can be of
any size. For example the phys CNode, which is used to store page-frame
capabilities is 16 MiB in size. Also, core's CSpace uses a relatively
large CNode.
After the initial setup phase, all remaining untyped memory is turned
into untyped pages. From this point on, new created kernel objects
cannot exceed 4 KiB in size because one kernel object cannot span
multiple untyped memory regions. The capability selectors for untyped
pages are organized similarly to those of page-frame capabilities. There
is a new 2nd-level CNode (UNTYPED_CORE_CNODE) that is dimensioned
according to the maximum amount of physical memory (1M entries, each
entry representing 4 KiB). The CNode is organized such that an index
into the CNode directly corresponds to the physical frame number of the
underlying memory. This way, we can easily determine a untyped page
selector for any physical addresses, i.e., for revoking the kernel
objects allocated at a specific physical page. The downside is the need
for another 16 MiB chunk of meta data. Also, we need to keep in mind
that this approach won't scale to 64-bit systems. We will eventually
need to replace the PHYS_CORE_CNODE and UNTYPED_CORE_CNODE by CNode
hierarchies to model a sparsely populated CNode.
The size constrain of kernel objects has the immediate implication that
the VM CSpaces of protection domains must be organized via several
levels of CNodes. I.e., as the top-level CNode of core has a size of
2^12, the remaining 20 PD-specific CSpace address bits are organized as
a 2nd-level 2^4 padding CNode, a 3rd-level 2^8 CNode, and several
4th-level 2^8 leaf CNodes. The latter contain the actual selectors for
the page tables and page-table entries of the respective PD.
As another slight difference from the experimental branch, the master
branch requires the explicit assignment of page directories to an ASID
pool.
Besides the adjustment to the new seL4 version, the patch introduces a
dedicated type for capability selectors. Previously, we just used to
represent them as unsigned integer values, which became increasingly
confusing. The new type 'Cap_sel' is a PD-local capability selector. The
type 'Cnode_index' is an index into a CNode (which is not generally not
the entire CSpace of the PD).
Fixes #1887
2016-02-03 14:50:44 +01:00
|
|
|
|
2016-04-05 15:16:39 +02:00
|
|
|
Bit_allocator<MAX> _used;
|
2015-05-05 00:15:50 +02:00
|
|
|
|
2016-04-05 15:16:39 +02:00
|
|
|
struct Elem_space
|
|
|
|
{
|
|
|
|
long space[sizeof(ELEM)/sizeof(long)];
|
|
|
|
};
|
2015-05-05 00:15:50 +02:00
|
|
|
|
2016-04-05 15:16:39 +02:00
|
|
|
Elem_space _elements[MAX];
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
class Alloc_failed { };
|
|
|
|
|
|
|
|
bool alloc(size_t size, void **out_addr) override
|
|
|
|
{
|
|
|
|
*out_addr = nullptr;
|
|
|
|
|
|
|
|
if (size > sizeof(Elem_space)) {
|
|
|
|
PERR("unexpected allocation size of %zd", size);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
try {
|
|
|
|
*out_addr = &_elements[_used.alloc()]; }
|
|
|
|
catch (typename Bit_allocator<MAX>::Out_of_indices) {
|
|
|
|
return false; }
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t overhead(size_t) const override { return 0; }
|
|
|
|
|
|
|
|
void free(void *ptr, size_t) override
|
|
|
|
{
|
|
|
|
Elem_space *elem = reinterpret_cast<Elem_space *>(ptr);
|
|
|
|
unsigned const index = elem - &_elements[0];
|
|
|
|
_used.free(index);
|
|
|
|
}
|
2015-05-05 00:15:50 +02:00
|
|
|
|
2016-04-05 15:16:39 +02:00
|
|
|
bool need_size_for_free() const { return false; }
|
|
|
|
};
|
2015-05-05 00:15:50 +02:00
|
|
|
|
2016-04-05 15:16:39 +02:00
|
|
|
Static_allocator<Page_table, 128> _page_table_alloc;
|
2016-06-16 16:25:45 +02:00
|
|
|
Static_allocator<Page_table::Entry, 2048> _page_table_entry_alloc;
|
sel4: update to version 2.1
This patch updates seL4 from the experimental branch of one year ago to
the master branch of version 2.1. The transition has the following
implications.
In contrast to the experimental branch, the master branch has no way to
manually define the allocation of kernel objects within untyped memory
ranges. Instead, the kernel maintains a built-in allocation policy. This
policy rules out the deallocation of once-used parts of untyped memory.
The only way to reuse memory is to revoke the entire untyped memory
range. Consequently, we cannot share a large untyped memory range for
kernel objects of different protection domains. In order to reuse memory
at a reasonably fine granularity, we need to split the initial untyped
memory ranges into small chunks that can be individually revoked. Those
chunks are called "untyped pages". An untyped page is a 4 KiB untyped
memory region.
The bootstrapping of core has to employ a two-stage allocation approach
now. For creating the initial kernel objects for core, which remain
static during the entire lifetime of the system, kernel objects are
created directly out of the initial untyped memory regions as reported
by the kernel. The so-called "initial untyped pool" keeps track of the
consumption of those untyped memory ranges by mimicking the kernel's
internal allocation policy. Kernel objects created this way can be of
any size. For example the phys CNode, which is used to store page-frame
capabilities is 16 MiB in size. Also, core's CSpace uses a relatively
large CNode.
After the initial setup phase, all remaining untyped memory is turned
into untyped pages. From this point on, new created kernel objects
cannot exceed 4 KiB in size because one kernel object cannot span
multiple untyped memory regions. The capability selectors for untyped
pages are organized similarly to those of page-frame capabilities. There
is a new 2nd-level CNode (UNTYPED_CORE_CNODE) that is dimensioned
according to the maximum amount of physical memory (1M entries, each
entry representing 4 KiB). The CNode is organized such that an index
into the CNode directly corresponds to the physical frame number of the
underlying memory. This way, we can easily determine a untyped page
selector for any physical addresses, i.e., for revoking the kernel
objects allocated at a specific physical page. The downside is the need
for another 16 MiB chunk of meta data. Also, we need to keep in mind
that this approach won't scale to 64-bit systems. We will eventually
need to replace the PHYS_CORE_CNODE and UNTYPED_CORE_CNODE by CNode
hierarchies to model a sparsely populated CNode.
The size constrain of kernel objects has the immediate implication that
the VM CSpaces of protection domains must be organized via several
levels of CNodes. I.e., as the top-level CNode of core has a size of
2^12, the remaining 20 PD-specific CSpace address bits are organized as
a 2nd-level 2^4 padding CNode, a 3rd-level 2^8 CNode, and several
4th-level 2^8 leaf CNodes. The latter contain the actual selectors for
the page tables and page-table entries of the respective PD.
As another slight difference from the experimental branch, the master
branch requires the explicit assignment of page directories to an ASID
pool.
Besides the adjustment to the new seL4 version, the patch introduces a
dedicated type for capability selectors. Previously, we just used to
represent them as unsigned integer values, which became increasingly
confusing. The new type 'Cap_sel' is a PD-local capability selector. The
type 'Cnode_index' is an index into a CNode (which is not generally not
the entire CSpace of the PD).
Fixes #1887
2016-02-03 14:50:44 +01:00
|
|
|
|
2015-05-05 00:15:50 +02:00
|
|
|
List<Page_table> _page_tables;
|
|
|
|
|
|
|
|
static addr_t _page_table_base(addr_t addr)
|
|
|
|
{
|
|
|
|
return addr & ~(4*1024*1024 - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool _page_table_exists(addr_t addr) const
|
|
|
|
{
|
|
|
|
for (Page_table const *pt = _page_tables.first(); pt; pt = pt->next()) {
|
|
|
|
if (_page_table_base(pt->addr) == _page_table_base(addr))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
Page_table &_lookup(addr_t addr)
|
|
|
|
{
|
|
|
|
for (Page_table *pt = _page_tables.first(); pt; pt = pt->next()) {
|
|
|
|
if (_page_table_base(pt->addr) == _page_table_base(addr))
|
|
|
|
return *pt;
|
|
|
|
}
|
|
|
|
PDBG("page-table lookup failed");
|
|
|
|
throw Lookup_failed();
|
|
|
|
}
|
|
|
|
|
2015-05-17 22:24:47 +02:00
|
|
|
static constexpr bool verbose = false;
|
|
|
|
|
2015-05-05 00:15:50 +02:00
|
|
|
public:
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Constructor
|
|
|
|
*
|
|
|
|
* \param md_alloc backing store allocator for metadata
|
2016-04-05 15:16:39 +02:00
|
|
|
*
|
|
|
|
* XXX The md_alloc argument is currently unused as we dimension
|
|
|
|
* MAX_PAGE_TABLES and MAX_PAGE_TABLE_ENTRIES statically.
|
2015-05-05 00:15:50 +02:00
|
|
|
*/
|
2016-04-05 15:16:39 +02:00
|
|
|
Page_table_registry(Allocator &md_alloc) { }
|
2015-05-05 00:15:50 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Register page table
|
|
|
|
*
|
|
|
|
* \param addr virtual address
|
|
|
|
* \param sel page-table selector
|
|
|
|
*/
|
sel4: update to version 2.1
This patch updates seL4 from the experimental branch of one year ago to
the master branch of version 2.1. The transition has the following
implications.
In contrast to the experimental branch, the master branch has no way to
manually define the allocation of kernel objects within untyped memory
ranges. Instead, the kernel maintains a built-in allocation policy. This
policy rules out the deallocation of once-used parts of untyped memory.
The only way to reuse memory is to revoke the entire untyped memory
range. Consequently, we cannot share a large untyped memory range for
kernel objects of different protection domains. In order to reuse memory
at a reasonably fine granularity, we need to split the initial untyped
memory ranges into small chunks that can be individually revoked. Those
chunks are called "untyped pages". An untyped page is a 4 KiB untyped
memory region.
The bootstrapping of core has to employ a two-stage allocation approach
now. For creating the initial kernel objects for core, which remain
static during the entire lifetime of the system, kernel objects are
created directly out of the initial untyped memory regions as reported
by the kernel. The so-called "initial untyped pool" keeps track of the
consumption of those untyped memory ranges by mimicking the kernel's
internal allocation policy. Kernel objects created this way can be of
any size. For example the phys CNode, which is used to store page-frame
capabilities is 16 MiB in size. Also, core's CSpace uses a relatively
large CNode.
After the initial setup phase, all remaining untyped memory is turned
into untyped pages. From this point on, new created kernel objects
cannot exceed 4 KiB in size because one kernel object cannot span
multiple untyped memory regions. The capability selectors for untyped
pages are organized similarly to those of page-frame capabilities. There
is a new 2nd-level CNode (UNTYPED_CORE_CNODE) that is dimensioned
according to the maximum amount of physical memory (1M entries, each
entry representing 4 KiB). The CNode is organized such that an index
into the CNode directly corresponds to the physical frame number of the
underlying memory. This way, we can easily determine a untyped page
selector for any physical addresses, i.e., for revoking the kernel
objects allocated at a specific physical page. The downside is the need
for another 16 MiB chunk of meta data. Also, we need to keep in mind
that this approach won't scale to 64-bit systems. We will eventually
need to replace the PHYS_CORE_CNODE and UNTYPED_CORE_CNODE by CNode
hierarchies to model a sparsely populated CNode.
The size constrain of kernel objects has the immediate implication that
the VM CSpaces of protection domains must be organized via several
levels of CNodes. I.e., as the top-level CNode of core has a size of
2^12, the remaining 20 PD-specific CSpace address bits are organized as
a 2nd-level 2^4 padding CNode, a 3rd-level 2^8 CNode, and several
4th-level 2^8 leaf CNodes. The latter contain the actual selectors for
the page tables and page-table entries of the respective PD.
As another slight difference from the experimental branch, the master
branch requires the explicit assignment of page directories to an ASID
pool.
Besides the adjustment to the new seL4 version, the patch introduces a
dedicated type for capability selectors. Previously, we just used to
represent them as unsigned integer values, which became increasingly
confusing. The new type 'Cap_sel' is a PD-local capability selector. The
type 'Cnode_index' is an index into a CNode (which is not generally not
the entire CSpace of the PD).
Fixes #1887
2016-02-03 14:50:44 +01:00
|
|
|
void insert_page_table(addr_t addr, Cap_sel sel)
|
2015-05-05 00:15:50 +02:00
|
|
|
{
|
sel4: update to version 2.1
This patch updates seL4 from the experimental branch of one year ago to
the master branch of version 2.1. The transition has the following
implications.
In contrast to the experimental branch, the master branch has no way to
manually define the allocation of kernel objects within untyped memory
ranges. Instead, the kernel maintains a built-in allocation policy. This
policy rules out the deallocation of once-used parts of untyped memory.
The only way to reuse memory is to revoke the entire untyped memory
range. Consequently, we cannot share a large untyped memory range for
kernel objects of different protection domains. In order to reuse memory
at a reasonably fine granularity, we need to split the initial untyped
memory ranges into small chunks that can be individually revoked. Those
chunks are called "untyped pages". An untyped page is a 4 KiB untyped
memory region.
The bootstrapping of core has to employ a two-stage allocation approach
now. For creating the initial kernel objects for core, which remain
static during the entire lifetime of the system, kernel objects are
created directly out of the initial untyped memory regions as reported
by the kernel. The so-called "initial untyped pool" keeps track of the
consumption of those untyped memory ranges by mimicking the kernel's
internal allocation policy. Kernel objects created this way can be of
any size. For example the phys CNode, which is used to store page-frame
capabilities is 16 MiB in size. Also, core's CSpace uses a relatively
large CNode.
After the initial setup phase, all remaining untyped memory is turned
into untyped pages. From this point on, new created kernel objects
cannot exceed 4 KiB in size because one kernel object cannot span
multiple untyped memory regions. The capability selectors for untyped
pages are organized similarly to those of page-frame capabilities. There
is a new 2nd-level CNode (UNTYPED_CORE_CNODE) that is dimensioned
according to the maximum amount of physical memory (1M entries, each
entry representing 4 KiB). The CNode is organized such that an index
into the CNode directly corresponds to the physical frame number of the
underlying memory. This way, we can easily determine a untyped page
selector for any physical addresses, i.e., for revoking the kernel
objects allocated at a specific physical page. The downside is the need
for another 16 MiB chunk of meta data. Also, we need to keep in mind
that this approach won't scale to 64-bit systems. We will eventually
need to replace the PHYS_CORE_CNODE and UNTYPED_CORE_CNODE by CNode
hierarchies to model a sparsely populated CNode.
The size constrain of kernel objects has the immediate implication that
the VM CSpaces of protection domains must be organized via several
levels of CNodes. I.e., as the top-level CNode of core has a size of
2^12, the remaining 20 PD-specific CSpace address bits are organized as
a 2nd-level 2^4 padding CNode, a 3rd-level 2^8 CNode, and several
4th-level 2^8 leaf CNodes. The latter contain the actual selectors for
the page tables and page-table entries of the respective PD.
As another slight difference from the experimental branch, the master
branch requires the explicit assignment of page directories to an ASID
pool.
Besides the adjustment to the new seL4 version, the patch introduces a
dedicated type for capability selectors. Previously, we just used to
represent them as unsigned integer values, which became increasingly
confusing. The new type 'Cap_sel' is a PD-local capability selector. The
type 'Cnode_index' is an index into a CNode (which is not generally not
the entire CSpace of the PD).
Fixes #1887
2016-02-03 14:50:44 +01:00
|
|
|
/* XXX sel is unused */
|
|
|
|
|
2015-05-05 00:15:50 +02:00
|
|
|
if (_page_table_exists(addr)) {
|
|
|
|
PWRN("trying to insert page table for 0x%lx twice", addr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-04-05 15:16:39 +02:00
|
|
|
_page_tables.insert(new (_page_table_alloc) Page_table(addr));
|
2015-05-05 00:15:50 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool has_page_table_at(addr_t addr) const
|
|
|
|
{
|
|
|
|
return _page_table_exists(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Register page table entry
|
|
|
|
*
|
|
|
|
* \param addr virtual address
|
|
|
|
* \param sel page frame selector
|
|
|
|
*
|
|
|
|
* \throw Lookup_failed no page table for given address
|
|
|
|
*/
|
|
|
|
void insert_page_table_entry(addr_t addr, unsigned sel)
|
|
|
|
{
|
2016-04-05 15:16:39 +02:00
|
|
|
_lookup(addr).insert_entry(_page_table_entry_alloc, addr, sel);
|
2015-05-05 00:15:50 +02:00
|
|
|
}
|
2015-05-07 15:47:15 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Discard the information about the given virtual address
|
|
|
|
*/
|
|
|
|
void forget_page_table_entry(addr_t addr)
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
Page_table &page_table = _lookup(addr);
|
2016-04-05 15:16:39 +02:00
|
|
|
page_table.remove_entry(_page_table_entry_alloc, addr);
|
2015-05-07 15:47:15 +02:00
|
|
|
} catch (...) {
|
2015-05-17 22:24:47 +02:00
|
|
|
if (verbose)
|
|
|
|
PDBG("no PT entry found for virtual address 0x%lx", addr);
|
2015-05-07 15:47:15 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-08 13:44:52 +02:00
|
|
|
|
|
|
|
void flush_cache()
|
|
|
|
{
|
|
|
|
for (Page_table *pt = _page_tables.first(); pt; pt = pt->next())
|
|
|
|
pt->flush_all(_page_table_entry_alloc);
|
|
|
|
}
|
|
|
|
|
2015-05-07 15:47:15 +02:00
|
|
|
/**
|
|
|
|
* Apply functor 'fn' to selector of specified virtual address
|
|
|
|
*
|
|
|
|
* \param addr virtual address
|
|
|
|
*
|
|
|
|
* The functor is called with the selector of the page table entry
|
|
|
|
* (the copy of the phys frame selector) as argument.
|
|
|
|
*/
|
|
|
|
template <typename FN>
|
|
|
|
void apply(addr_t addr, FN const &fn)
|
|
|
|
{
|
|
|
|
try {
|
|
|
|
Page_table &page_table = _lookup(addr);
|
|
|
|
Page_table::Entry &entry = page_table.lookup(addr);
|
|
|
|
|
|
|
|
fn(entry.sel);
|
|
|
|
} catch (...) {
|
2015-05-17 22:24:47 +02:00
|
|
|
if (verbose)
|
|
|
|
PDBG("no PT entry found for virtual address 0x%lx", addr);
|
2015-05-07 15:47:15 +02:00
|
|
|
}
|
|
|
|
}
|
2015-05-05 00:15:50 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* _CORE__INCLUDE__PAGE_TABLE_REGISTRY_H_ */
|