hw: move global TLB flush to extra class

A subject that inherits from Processor_client not necessarily has the need for
doing a processor-global TLB flush (e.g. VMs). At the other hand the Thread
class (as representation of the only source of TLB flushes) is already one of
the largest classes in base-hw because it provides all the syscall backends
and should therefore not accumulate other aspects without a functional reason.
Hence, I decided to move the aspect of synchronizing a TLB flush over all
processors to a dedicated class named Processor_domain_update.
Additionally a singleton of Processor_domain_update_list is used to enable
each processor to see all update-domain requests that are currently pending.

fix #1174
This commit is contained in:
Martin Stein 2014-05-02 18:14:51 +02:00 committed by Norman Feske
parent 42397cb512
commit f3ae42275a
4 changed files with 159 additions and 98 deletions

View File

@ -20,25 +20,51 @@
#include <pic.h>
#include <timer.h>
using namespace Kernel;
namespace Kernel
{
/**
* Lists all pending domain updates
*/
class Processor_domain_update_list;
Pic * pic();
Timer * timer();
}
using Tlb_list_item = Genode::List_element<Processor_client>;
using Tlb_list = Genode::List<Tlb_list_item>;
static Tlb_list *tlb_list()
class Kernel::Processor_domain_update_list
:
public Double_list<Processor_domain_update>
{
static Tlb_list tlb_list;
return &tlb_list;
public:
/**
* Perform all pending domain updates on the executing processor
*/
void for_each_perform_locally()
{
for_each([] (Processor_domain_update * const domain_update) {
domain_update->_perform_locally();
});
}
};
namespace Kernel
{
/**
* Return singleton of the processor domain-udpate list
*/
Processor_domain_update_list * processor_domain_update_list()
{
static Processor_domain_update_list s;
return &s;
}
}
/**********************
** Processor_client **
**********************/
void Kernel::Processor_client::_interrupt(unsigned const processor_id)
{
/* determine handling for specific interrupt */
@ -52,7 +78,8 @@ void Kernel::Processor_client::_interrupt(unsigned const processor_id)
/* check wether the interrupt is our inter-processor interrupt */
if (ic->is_ip_interrupt(irq_id, processor_id)) {
_processor->ip_interrupt();
processor_domain_update_list()->for_each_perform_locally();
_processor->ip_interrupt_handled();
/* after all it must be a user interrupt */
} else {
@ -70,44 +97,9 @@ void Kernel::Processor_client::_interrupt(unsigned const processor_id)
void Kernel::Processor_client::_schedule() { _processor->schedule(this); }
void Kernel::Processor_client::tlb_to_flush(unsigned pd_id)
{
/* initialize pd and reference counters, and remove client from scheduler */
_flush_tlb_pd_id = pd_id;
for (unsigned i = 0; i < PROCESSORS; i++)
_flush_tlb_ref_cnt[i] = false;
_unschedule();
/* find the last working item in the TLB work queue */
Tlb_list_item * last = tlb_list()->first();
while (last && last->next()) last = last->next();
/* insert new work item at the end of the work list */
tlb_list()->insert(&_flush_tlb_li, last);
/* enforce kernel entry of other processors */
for (unsigned i = 0; i < PROCESSORS; i++)
pic()->trigger_ip_interrupt(i);
processor_pool()->processor(Processor::executing_id())->flush_tlb();
}
void Kernel::Processor_client::flush_tlb_by_id()
{
/* flush TLB on current processor and adjust ref counter */
Processor::flush_tlb_by_pid(_flush_tlb_pd_id);
_flush_tlb_ref_cnt[Processor::executing_id()] = true;
/* check whether all processors are done */
for (unsigned i = 0; i < PROCESSORS; i++)
if (!_flush_tlb_ref_cnt[i]) return;
/* remove work item from the list and re-schedule thread */
tlb_list()->remove(&_flush_tlb_li);
_schedule();
}
/***************
** Processor **
***************/
void Kernel::Processor::schedule(Processor_client * const client)
{
@ -125,10 +117,8 @@ void Kernel::Processor::schedule(Processor_client * const client)
* Additionailly we omit the interrupt if the insertion doesn't
* rescind the current scheduling choice of the processor.
*/
if (_scheduler.insert_and_check(client) && !_ip_interrupt_pending) {
pic()->trigger_ip_interrupt(_id);
_ip_interrupt_pending = true;
}
if (_scheduler.insert_and_check(client)) { trigger_ip_interrupt(); }
} else {
/* add client locally */
@ -137,6 +127,15 @@ void Kernel::Processor::schedule(Processor_client * const client)
}
void Kernel::Processor::trigger_ip_interrupt()
{
if (!_ip_interrupt_pending) {
pic()->trigger_ip_interrupt(_id);
_ip_interrupt_pending = true;
}
}
void Kernel::Processor_client::_unschedule()
{
assert(_processor->id() == Processor::executing_id());
@ -151,12 +150,43 @@ void Kernel::Processor_client::_yield()
}
void Kernel::Processor::flush_tlb()
/*****************************
** Processor_domain_update **
*****************************/
void Kernel::Processor_domain_update::_perform_locally()
{
/* iterate through the list of TLB work items, and proceed them */
for (Tlb_list_item * cli = tlb_list()->first(); cli;) {
Tlb_list_item * current = cli;
cli = current->next();
current->object()->flush_tlb_by_id();
}
/* perform domain update locally and get pending bit */
unsigned const processor_id = Processor::executing_id();
if (!_pending[processor_id]) { return; }
_domain_update();
_pending[processor_id] = false;
/* check wether there are still processors pending */
unsigned i = 0;
for (; i < PROCESSORS && !_pending[i]; i++) { }
if (i < PROCESSORS) { return; }
/* as no processors pending anymore, end the domain update */
processor_domain_update_list()->remove(this);
_processor_domain_update_unblocks();
}
bool Kernel::Processor_domain_update::_perform(unsigned const domain_id)
{
/* perform locally and leave it at that if in uniprocessor mode */
_domain_id = domain_id;
_domain_update();
if (PROCESSORS == 1) { return false; }
/* inform other processors and block until they are done */
processor_domain_update_list()->insert_tail(this);
unsigned const processor_id = Processor::executing_id();
for (unsigned i = 0; i < PROCESSORS; i++) {
if (i == processor_id) { continue; }
_pending[i] = true;
processor_pool()->processor(i)->trigger_ip_interrupt();
}
return true;
}

View File

@ -20,8 +20,6 @@
#include <processor_driver.h>
#include <kernel/scheduler.h>
#include <util/list.h>
namespace Kernel
{
using Genode::Processor_driver;
@ -32,6 +30,11 @@ namespace Kernel
*/
class Processor_client;
/**
* Ability to do a domain update on all processors
*/
class Processor_domain_update;
/**
* Multiplexes a single processor to multiple processor clients
*/
@ -43,6 +46,55 @@ namespace Kernel
class Processor;
}
class Kernel::Processor_domain_update
:
public Double_list_item<Processor_domain_update>
{
friend class Processor_domain_update_list;
private:
bool _pending[PROCESSORS];
unsigned _domain_id;
/**
* Domain-update back-end
*/
void _domain_update()
{
Processor_driver::flush_tlb_by_pid(_domain_id);
}
/**
* Perform the domain update on the executing processors
*/
void _perform_locally();
protected:
/**
* Constructor
*/
Processor_domain_update()
{
for (unsigned i = 0; i < PROCESSORS; i++) { _pending[i] = false; }
}
/**
* Perform the domain update on all processors
*
* \param domain_id kernel name of targeted domain
*
* \return wether the update blocks and reports back on completion
*/
bool _perform(unsigned const domain_id);
/**
* Notice that the update isn't pending on any processor anymore
*/
virtual void _processor_domain_update_unblocks() = 0;
};
class Kernel::Processor_client : public Processor_scheduler::Item
{
protected:
@ -50,12 +102,6 @@ class Kernel::Processor_client : public Processor_scheduler::Item
Processor * _processor;
Processor_lazy_state _lazy_state;
using List_item = Genode::List_element<Processor_client>;
List_item _flush_tlb_li; /* TLB maintainance work list item */
unsigned _flush_tlb_pd_id; /* id of pd that TLB entries are flushed */
bool _flush_tlb_ref_cnt[PROCESSORS]; /* reference counters */
/**
* Handle an interrupt exception that occured during execution
*
@ -94,18 +140,6 @@ class Kernel::Processor_client : public Processor_scheduler::Item
*/
virtual void proceed(unsigned const processor_id) = 0;
/**
* Enqueues TLB maintainance work into queue of the processors
*
* \param pd_id protection domain kernel object's id
*/
void tlb_to_flush(unsigned pd_id);
/**
* Flush TLB entries requested by this client on the current processor
*/
void flush_tlb_by_id();
/**
* Constructor
*
@ -115,8 +149,7 @@ class Kernel::Processor_client : public Processor_scheduler::Item
Processor_client(Processor * const processor, Priority const priority)
:
Processor_scheduler::Item(priority),
_processor(processor),
_flush_tlb_li(this)
_processor(processor)
{ }
/**
@ -190,25 +223,15 @@ class Kernel::Processor : public Processor_driver
return true;
}
/**
* Perform outstanding TLB maintainance work
*/
void flush_tlb();
/**
* Notice that the inter-processor interrupt isn't pending anymore
*/
void ip_interrupt()
{
/*
* This interrupt solely denotes that another processor has
* modified the scheduling plan of this processor and thus
* a more prior user context than the current one might be
* available.
*/
_ip_interrupt_pending = false;
flush_tlb();
}
void ip_interrupt_handled() { _ip_interrupt_pending = false; }
/**
* Raise the inter-processor interrupt of the processor
*/
void trigger_ip_interrupt();
/**
* Add a processor client to the scheduling plan of the processor

View File

@ -542,7 +542,7 @@ void Thread::_call_access_thread_regs()
void Thread::_call_update_pd()
{
tlb_to_flush(user_arg_1());
if (Processor_domain_update::_perform(user_arg_1())) { _pause(); }
}

View File

@ -71,6 +71,7 @@ class Kernel::Thread
public Processor_driver::User_context,
public Object<Thread, MAX_THREADS, Thread_ids, thread_ids, thread_pool>,
public Processor_client,
public Processor_domain_update,
public Ipc_node,
public Signal_context_killer,
public Signal_handler,
@ -286,6 +287,13 @@ class Kernel::Thread
void _await_request_succeeded();
void _await_request_failed();
/*****************************
** Processor_domain_update **
*****************************/
void _processor_domain_update_unblocks() { _resume(); }
public:
/**