hw: change update_pd to invalidate_tlb

In the past, the core-only privileged syscall `update_pd` was used only
to invalidate the TLB after removal of page-table entries.
By now, the whole TLB at least for one protection domain got invalidated,
but in preparation for optimization and upcomingARM v8 support,
it is necessary to deliver the virtual memory region that needs to get
invalidated. Moreover, the name of the call shall represent explicitely
that it is used to invalidate the TLB.

Ref #3405
This commit is contained in:
Stefan Kalkowski 2019-06-12 22:33:02 +02:00 committed by Christian Helmuth
parent d9a0f76e7a
commit 87015df66c
11 changed files with 34 additions and 36 deletions

View File

@ -42,7 +42,7 @@ namespace Kernel
constexpr Call_arg call_id_resume_thread() { return 104; }
constexpr Call_arg call_id_thread_pager() { return 105; }
constexpr Call_arg call_id_thread_quota() { return 106; }
constexpr Call_arg call_id_update_pd() { return 107; }
constexpr Call_arg call_id_invalidate_tlb() { return 107; }
constexpr Call_arg call_id_new_pd() { return 108; }
constexpr Call_arg call_id_delete_pd() { return 109; }
constexpr Call_arg call_id_new_signal_receiver() { return 110; }
@ -62,18 +62,13 @@ namespace Kernel
constexpr Call_arg call_id_new_core_thread() { return 124; }
/**
* Update locally effective domain configuration to in-memory state
*
* \param pd pointer to pd kernel object
*
* Kernel and/or hardware may cache parts of a domain configuration. This
* function ensures that the in-memory state of the targeted domain gets
* CPU-locally effective. The calling thread must not be destroyed while
* in this syscall.
* Invalidate TLB entries for the `pd` in region `addr`, `sz`
*/
inline void update_pd(Pd * const pd)
inline void invalidate_tlb(Pd * const pd, addr_t const addr,
size_t const sz)
{
call(call_id_update_pd(), (Call_arg)pd);
call(call_id_invalidate_tlb(), (Call_arg)pd, (Call_arg)addr,
(Call_arg)sz);
}

View File

@ -92,7 +92,7 @@ class Kernel::Pd : public Kernel::Object
* Check whether the given 'cpu' needs to do some maintainance
* work, after this pd has had changes in its page-tables
*/
bool update(Cpu & cpu);
bool invalidate_tlb(Cpu & cpu, addr_t addr, size_t size);
/***************

View File

@ -38,8 +38,10 @@ extern "C" void _core_start(void);
using namespace Kernel;
Thread::Pd_update::Pd_update(Thread & caller, Pd & pd, unsigned cnt)
: caller(caller), pd(pd), cnt(cnt)
Thread::Tlb_invalidation::Tlb_invalidation(Thread & caller, Pd & pd,
addr_t addr, size_t size,
unsigned cnt)
: caller(caller), pd(pd), addr(addr), size(size), cnt(cnt)
{
cpu_pool().work_list().insert(&_le);
caller._become_inactive(AWAITS_RESTART);
@ -626,17 +628,19 @@ void Thread::_call_delete_cap()
}
void Kernel::Thread::_call_update_pd()
void Kernel::Thread::_call_invalidate_tlb()
{
Pd * const pd = (Pd *) user_arg_1();
addr_t addr = (addr_t) user_arg_2();
size_t size = (size_t) user_arg_3();
unsigned cnt = 0;
cpu_pool().for_each_cpu([&] (Cpu & cpu) {
/* if a cpu needs to update increase the counter */
if (pd->update(cpu)) cnt++; });
if (pd->invalidate_tlb(cpu, addr, size)) cnt++; });
/* insert the work item in the list if there are outstanding cpus */
if (cnt) _pd_update.construct(*this, *pd, cnt);
if (cnt) _tlb_invalidation.construct(*this, *pd, addr, size, cnt);
}
@ -685,7 +689,7 @@ void Thread::_call()
case call_id_resume_thread(): _call_resume_thread(); return;
case call_id_cancel_thread_blocking(): _call_cancel_thread_blocking(); return;
case call_id_thread_pager(): _call_pager(); return;
case call_id_update_pd(): _call_update_pd(); return;
case call_id_invalidate_tlb(): _call_invalidate_tlb(); return;
case call_id_new_pd():
_call_new<Pd>(*(Hw::Page_table *) user_arg_2(),
*(Genode::Platform_pd *) user_arg_3());

View File

@ -63,17 +63,18 @@ class Kernel::Thread
Thread &operator = (Thread const &);
/**
* An update of page-table entries that requires architecture-wise
* maintainance operations, e.g., a TLB invalidation needs
* cross-cpu synchronization
* A TLB invalidation may need cross-cpu synchronization
*/
struct Pd_update : Inter_processor_work
struct Tlb_invalidation : Inter_processor_work
{
Thread & caller; /* the caller gets blocked until all finished */
Pd & pd; /* the corresponding pd */
addr_t addr;
size_t size;
unsigned cnt; /* count of cpus left */
Pd_update(Thread & caller, Pd & pd, unsigned cnt);
Tlb_invalidation(Thread & caller, Pd & pd, addr_t addr, size_t size,
unsigned cnt);
/************************************
** Inter_processor_work interface **
@ -100,7 +101,7 @@ class Kernel::Thread
void execute() override;
};
friend void Pd_update::execute();
friend void Tlb_invalidation::execute();
friend void Destroy::execute();
protected:
@ -128,8 +129,8 @@ class Kernel::Thread
bool _cancel_next_await_signal = false;
bool const _core = false;
Genode::Constructible<Pd_update> _pd_update {};
Genode::Constructible<Destroy> _destroy {};
Genode::Constructible<Tlb_invalidation> _tlb_invalidation {};
Genode::Constructible<Destroy> _destroy {};
/**
* Notice that another thread yielded the CPU to this thread
@ -211,7 +212,7 @@ class Kernel::Thread
void _call_await_request_msg();
void _call_send_request_msg();
void _call_send_reply_msg();
void _call_update_pd();
void _call_invalidate_tlb();
void _call_update_data_region();
void _call_update_instr_region();
void _call_print_char();

View File

@ -69,9 +69,7 @@ void Hw::Address_space::flush(addr_t virt, size_t size, Core_local_addr)
try {
_tt.remove_translation(virt, size, _tt_alloc);
/* update translation caches */
Kernel::update_pd(&_kernel_pd);
Kernel::invalidate_tlb(&_kernel_pd, virt, size);
} catch(...) {
error("tried to remove invalid region!");
}

View File

@ -14,7 +14,7 @@
#include <kernel/cpu.h>
#include <kernel/pd.h>
bool Kernel::Pd::update(Cpu & cpu)
bool Kernel::Pd::invalidate_tlb(Cpu & cpu, addr_t, size_t)
{
/* invalidate the TLB on the local CPU only */
if (cpu.id() == Cpu::executing_id()) {

View File

@ -80,7 +80,7 @@ void Kernel::Thread::_call_update_instr_region()
* coprocessor registers (there might be ARM SoCs where this is not valid,
* with several shareability domains, but until now we do not support them)
*/
void Kernel::Thread::Pd_update::execute() { };
void Kernel::Thread::Tlb_invalidation::execute() { };
void Thread::proceed(Cpu & cpu)

View File

@ -13,7 +13,7 @@
#include <kernel/pd.h>
bool Kernel::Pd::update(Kernel::Cpu&)
bool Kernel::Pd::invalidate_tlb(Kernel::Cpu&, addr_t, size_t)
{
Genode::Cpu::sfence();
return false;

View File

@ -18,7 +18,7 @@
using namespace Kernel;
void Thread::Pd_update::execute() {}
void Thread::Tlb_invalidation::execute() {}
void Thread::exception(Cpu & cpu)

View File

@ -15,7 +15,7 @@
#include <kernel/pd.h>
bool Kernel::Pd::update(Cpu & cpu)
bool Kernel::Pd::invalidate_tlb(Cpu & cpu, addr_t, size_t)
{
/* on the current CPU invalidate the TLB */
if (cpu.id() == Cpu::executing_id()) {

View File

@ -18,7 +18,7 @@
#include <kernel/thread.h>
#include <kernel/pd.h>
void Kernel::Thread::Pd_update::execute()
void Kernel::Thread::Tlb_invalidation::execute()
{
/* invalidate cpu-local TLB */
Cpu::invalidate_tlb();