4e97a6511b
* Instead of always re-load page-tables when a thread context is switched only do this when another user PD's thread is the next target, core-threads are always executed within the last PD's page-table set * remove the concept of the mode transition * instead map the exception vector once in bootstrap code into kernel's memory segment * when a new page directory is constructed for a user PD, copy over the top-level kernel segment entries on RISCV and X86, on ARM we use a designated page directory register for the kernel segment * transfer the current CPU id from bootstrap to core/kernel in a register to ease first stack address calculation * align cpu context member of threads and vms, because of x86 constraints regarding the stack-pointer loading * introduce Align_at template for members with alignment constraints * let the x86 hardware do part of the context saving in ISS, by passing the thread context into the TSS before leaving to user-land * use one exception vector for all ARM platforms including Arm_v6 Fix #2091
71 lines
1.7 KiB
C++
71 lines
1.7 KiB
C++
/*
|
|
* \brief Memory map of core
|
|
* \author Stefan Kalkowski
|
|
* \date 2016-11-24
|
|
*/
|
|
|
|
/*
|
|
* Copyright (C) 2016-2017 Genode Labs GmbH
|
|
*
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
* under the terms of the GNU Affero General Public License version 3.
|
|
*/
|
|
|
|
#ifndef _SRC__LIB__HW__MEMORY_MAP_H_
|
|
#define _SRC__LIB__HW__MEMORY_MAP_H_
|
|
|
|
#include <hw/mapping.h>
|
|
#include <hw/memory_region.h>
|
|
#include <hw/util.h>
|
|
|
|
namespace Hw {
|
|
struct Mmio_space;
|
|
|
|
namespace Mm {
|
|
Memory_region const user();
|
|
Memory_region const core_utcb_main_thread();
|
|
Memory_region const core_stack_area();
|
|
Memory_region const core_page_tables();
|
|
Memory_region const core_mmio();
|
|
Memory_region const core_heap();
|
|
Memory_region const system_exception_vector();
|
|
Memory_region const hypervisor_exception_vector();
|
|
Memory_region const supervisor_exception_vector();
|
|
Memory_region const boot_info();
|
|
}
|
|
}
|
|
|
|
struct Hw::Mmio_space : Hw::Memory_region_array
|
|
{
|
|
using Hw::Memory_region_array::Memory_region_array;
|
|
|
|
template <typename FUNC>
|
|
void for_each_mapping(FUNC f) const
|
|
{
|
|
addr_t virt_base = Mm::core_mmio().base;
|
|
auto lambda = [&] (Memory_region const & r) {
|
|
f(Mapping { r.base, virt_base, r.size, PAGE_FLAGS_KERN_IO });
|
|
virt_base += r.size + get_page_size();
|
|
};
|
|
for_each(lambda);
|
|
}
|
|
|
|
addr_t virt_addr(addr_t phys_addr) const
|
|
{
|
|
/*
|
|
* Sadly this method is used quite early during bootstrap
|
|
* where no exceptions can be used
|
|
*/
|
|
addr_t ret = 0;
|
|
for_each_mapping([&] (Mapping const & m)
|
|
{
|
|
if (phys_addr >= m.phys() && phys_addr < (m.phys()+m.size()))
|
|
ret = m.virt() + (phys_addr-m.phys());
|
|
});
|
|
|
|
return ret;
|
|
}
|
|
};
|
|
|
|
#endif /* _SRC__LIB__HW__MEMORY_MAP_H_ */
|