diff --git a/repos/base-hw/src/core/platform.h b/repos/base-hw/src/core/platform.h index c08223b49..2e0c52dbe 100644 --- a/repos/base-hw/src/core/platform.h +++ b/repos/base-hw/src/core/platform.h @@ -33,7 +33,10 @@ #include #include -namespace Genode { class Platform; }; +namespace Genode { + class Address_space; + class Platform; +}; class Genode::Platform : public Genode::Platform_generic { @@ -142,6 +145,7 @@ class Genode::Platform : public Genode::Platform_generic while (1) { Kernel::stop_thread(); } }; bool supports_direct_unmap() const { return 1; } + Address_space * core_pd() { return nullptr; } Affinity::Space affinity_space() const { return Affinity::Space(_boot_info().cpus); } diff --git a/repos/base-nova/src/core/include/platform.h b/repos/base-nova/src/core/include/platform.h index 1fef37717..303ba924e 100644 --- a/repos/base-nova/src/core/include/platform.h +++ b/repos/base-nova/src/core/include/platform.h @@ -18,6 +18,7 @@ /* core includes */ #include #include +#include namespace Genode { @@ -83,8 +84,8 @@ namespace Genode { size_t max_caps() const override { return _max_caps; } void wait_for_exit() override; - bool supports_unmap() override { return true; } bool supports_direct_unmap() const override { return true; } + Address_space * core_pd() { return nullptr; } Affinity::Space affinity_space() const override { return _cpus; } diff --git a/repos/base-sel4/src/core/include/platform.h b/repos/base-sel4/src/core/include/platform.h index d4a4a85fa..1258f9c83 100644 --- a/repos/base-sel4/src/core/include/platform.h +++ b/repos/base-sel4/src/core/include/platform.h @@ -24,6 +24,7 @@ namespace Genode { class Platform; template class Static_allocator; + class Address_space; } @@ -256,8 +257,8 @@ class Genode::Platform : public Platform_generic Affinity::Space affinity_space() const override { return sel4_boot_info().numNodes; } - bool supports_unmap() override { return true; } bool supports_direct_unmap() const override { return true; } + Address_space * core_pd() { return nullptr; } /******************* ** seL4 specific ** diff --git a/repos/base/src/core/include/platform_generic.h b/repos/base/src/core/include/platform_generic.h index 6421a308c..22a5f857b 100644 --- a/repos/base/src/core/include/platform_generic.h +++ b/repos/base/src/core/include/platform_generic.h @@ -80,11 +80,6 @@ namespace Genode { */ virtual void wait_for_exit() = 0; - /** - * Return true if platform supports unmap - */ - virtual bool supports_unmap() { return true; } - /** * Return true if platform supports direct unmap (no mapping db) */ diff --git a/repos/base/src/core/include/region_map_component.h b/repos/base/src/core/include/region_map_component.h index 2ded59c6c..8bd456e51 100644 --- a/repos/base/src/core/include/region_map_component.h +++ b/repos/base/src/core/include/region_map_component.h @@ -361,6 +361,19 @@ class Genode::Region_map_component : private Weak_object, return _session_ep->apply(cap, lambda); } + /* + * Returns the core-local address behind region 'r' + */ + addr_t _core_local_addr(Rm_region & r); + + /* + * Unmaps a memory area from all address spaces referencing it. + * + * \param base base address of region to unmap + * \param size size of region to unmap + */ + void _unmap_region(addr_t base, size_t size); + public: /** diff --git a/repos/base/src/core/region_map_component.cc b/repos/base/src/core/region_map_component.cc index 3932176c8..6233b5b22 100644 --- a/repos/base/src/core/region_map_component.cc +++ b/repos/base/src/core/region_map_component.cc @@ -468,27 +468,54 @@ Region_map_component::attach(Dataspace_capability ds_cap, size_t size, } -static void unmap_managed(Region_map_component *rm, Rm_region *region, int level) +addr_t Region_map_component::_core_local_addr(Rm_region & region) { - for (Rm_region *managed = rm->dataspace_component()->regions()->first(); - managed; - managed = managed->List::Element::next()) { + /** + * If this region references a managed dataspace, + * we have to recursively request the core-local address + */ + if (region.dataspace()->sub_rm().valid()) { + auto lambda = [&] (Region_map_component * rmc) -> addr_t + { + /** + * It is possible that there is no dataspace attached + * inside the managed dataspace, in that case return zero. + */ + Rm_region * r = rmc ? rmc->_map.metadata((void*)region.offset()) + : nullptr;; + return r ? rmc->_core_local_addr(*r) : 0; + }; + return _session_ep->apply(region.dataspace()->sub_rm(), lambda); + } - if (managed->base() - managed->offset() >= region->base() - region->offset() - && managed->base() - managed->offset() + managed->size() - <= region->base() - region->offset() + region->size()) - unmap_managed(managed->rm(), managed, level + 1); + /* return core-local address of dataspace + region offset */ + return region.dataspace()->core_local_addr() + region.offset(); +} - if (!managed->rm()->address_space()) - continue; - /* found a leaf node (here a leaf is an Region_map whose dataspace has no regions) */ +void Region_map_component::_unmap_region(addr_t base, size_t size) +{ + if (address_space()) address_space()->flush(base, size, { 0 }); - Address_space::Core_local_addr core_local - = { region->dataspace()->core_local_addr() + region->offset() }; - managed->rm()->address_space()->flush(managed->base() + region->base() - - managed->offset(), - region->size(), core_local); + /** + * Iterate over all regions that reference this region map + * as managed dataspace + */ + for (Rm_region * r = dataspace_component()->regions()->first(); + r; r = r->List::Element::next()) { + + /** + * Check whether the region referencing the managed dataspace + * and the region to unmap overlap + */ + addr_t ds_base = max((addr_t)r->offset(), base); + addr_t ds_end = min((addr_t)r->offset()+r->size(), base+size); + size_t ds_size = ds_base < ds_end ? ds_end - ds_base : 0; + + /* if size is not zero, there is an overlap */ + if (ds_size) + r->rm()->_unmap_region(r->base() + ds_base - r->offset(), + ds_size); } } @@ -519,7 +546,6 @@ void Region_map_component::detach(Local_addr local_addr) return; } - /* inform dataspace about detachment */ dsc->detached_from(region_ptr); @@ -530,54 +556,33 @@ void Region_map_component::detach(Local_addr local_addr) Rm_region region = *region_ptr; /* - * Deallocate region on platforms that support unmap - * - * On platforms without support for unmap, the - * same virtual address must not be reused. Hence, we never mark used - * regions as free. - * * We unregister the region from region map prior unmapping the pages to * make sure that page faults occurring immediately after the unmap * refer to an empty region not to the dataspace, which we just removed. */ - if (platform()->supports_unmap()) - _map.free(reinterpret_cast(region.base())); + _map.free(reinterpret_cast(region.base())); - /* - * This function gets called from the destructor of 'Dataspace_component', - * which iterates through all regions the dataspace is attached to. One - * particular case is the destruction of an 'Region_map_component' and its - * contained managed dataspace ('_ds') member. The type of this member is - * derived from 'Dataspace_component' and provides the 'sub_region_map' - * function, which can normally be used to distinguish managed dataspaces - * from leaf dataspaces. However, at destruction time of the '_dsc' base - * class, the vtable entry of 'sub_region_map' already points to the - * base-class's function. Hence, we cannot check the return value of this - * function to determine if the dataspace is a managed dataspace. Instead, - * we introduced a dataspace member '_managed' with the non-virtual accessor - * function 'managed'. - */ + if (!platform()->supports_direct_unmap()) { - if (_address_space) { + /* + * Determine core local address of the region, where necessary. + * If we can't retrieve it, it is not possible to unmap on kernels + * that do not support direct unmap functionality, therefore return in that + * case. + * Otherwise calling flush with core_local address zero on kernels that + * unmap indirectly via core's address space can lead to illegitime unmaps + * of core memory (reference issue #3082) + */ + Address_space::Core_local_addr core_local { _core_local_addr(region) }; + if (core_local.value) + platform_specific()->core_pd()->flush(0, region.size(), core_local); + } else { - if (!platform()->supports_direct_unmap() && dsc->managed() && - dsc->core_local_addr() == 0) { - - if (_diag.enabled) - warning("unmapping of managed dataspaces not yet supported"); - - } else { - Address_space::Core_local_addr core_local - = { dsc->core_local_addr() + region.offset() }; - _address_space->flush(region.base(), region.size(), core_local); - } + /* + * Unmap this memory region from all region maps referencing it. + */ + _unmap_region(region.base(), region.size()); } - - /* - * If region map is used as nested dataspace, unmap this dataspace from all - * region maps. - */ - unmap_managed(this, ®ion, 1); } @@ -672,10 +677,17 @@ Region_map_component::Region_map_component(Rpc_entrypoint &ep, Region_map_component::~Region_map_component() { - _ds_ep->dissolve(this); - lock_for_destruction(); + /* + * Normally, detaching ds from all regions maps is done in the + * destructor of the dataspace. But we do it here explicitely + * so that the regions refering this ds can retrieve it via + * there capabilities before it gets dissolved in the next step. + */ + _ds.detach_from_rm_sessions(); + _ds_ep->dissolve(this); + /* dissolve all clients from pager entrypoint */ Rm_client *cl; do {