2013-12-17 18:10:02 +01:00
|
|
|
/*
|
2014-03-11 01:21:56 +01:00
|
|
|
* \brief A multiplexable common instruction processor
|
2013-12-17 18:10:02 +01:00
|
|
|
* \author Martin Stein
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
* \author Stefan Kalkowski
|
2013-12-17 18:10:02 +01:00
|
|
|
* \date 2014-01-14
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2014 Genode Labs GmbH
|
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
|
|
* under the terms of the GNU General Public License version 2.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* core includes */
|
2014-03-03 15:29:05 +01:00
|
|
|
#include <kernel/processor.h>
|
2014-04-28 21:31:57 +02:00
|
|
|
#include <kernel/processor_pool.h>
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
#include <kernel/thread.h>
|
2014-03-11 01:21:56 +01:00
|
|
|
#include <kernel/irq.h>
|
|
|
|
#include <pic.h>
|
|
|
|
#include <timer.h>
|
2013-12-17 18:10:02 +01:00
|
|
|
|
2014-03-11 01:21:56 +01:00
|
|
|
namespace Kernel
|
|
|
|
{
|
2014-05-02 18:14:51 +02:00
|
|
|
/**
|
|
|
|
* Lists all pending domain updates
|
|
|
|
*/
|
|
|
|
class Processor_domain_update_list;
|
|
|
|
|
2014-03-11 01:21:56 +01:00
|
|
|
Pic * pic();
|
|
|
|
Timer * timer();
|
|
|
|
}
|
|
|
|
|
2014-05-02 18:14:51 +02:00
|
|
|
class Kernel::Processor_domain_update_list
|
|
|
|
:
|
|
|
|
public Double_list<Processor_domain_update>
|
|
|
|
{
|
|
|
|
public:
|
2014-04-28 21:31:57 +02:00
|
|
|
|
2014-05-02 18:14:51 +02:00
|
|
|
/**
|
|
|
|
* Perform all pending domain updates on the executing processor
|
|
|
|
*/
|
|
|
|
void for_each_perform_locally()
|
|
|
|
{
|
|
|
|
for_each([] (Processor_domain_update * const domain_update) {
|
|
|
|
domain_update->_perform_locally();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
};
|
2014-04-28 21:31:57 +02:00
|
|
|
|
2014-05-02 18:14:51 +02:00
|
|
|
namespace Kernel
|
2014-04-28 21:31:57 +02:00
|
|
|
{
|
2014-05-02 18:14:51 +02:00
|
|
|
/**
|
|
|
|
* Return singleton of the processor domain-udpate list
|
|
|
|
*/
|
|
|
|
Processor_domain_update_list * processor_domain_update_list()
|
|
|
|
{
|
|
|
|
static Processor_domain_update_list s;
|
|
|
|
return &s;
|
|
|
|
}
|
2014-04-28 21:31:57 +02:00
|
|
|
}
|
|
|
|
|
2014-03-11 01:21:56 +01:00
|
|
|
|
2014-05-02 18:14:51 +02:00
|
|
|
/**********************
|
|
|
|
** Processor_client **
|
|
|
|
**********************/
|
|
|
|
|
2014-03-11 01:21:56 +01:00
|
|
|
void Kernel::Processor_client::_interrupt(unsigned const processor_id)
|
|
|
|
{
|
|
|
|
/* determine handling for specific interrupt */
|
|
|
|
unsigned irq_id;
|
|
|
|
Pic * const ic = pic();
|
2014-05-21 15:52:05 +02:00
|
|
|
if (ic->take_request(irq_id)) {
|
2014-03-11 01:21:56 +01:00
|
|
|
|
2014-05-21 15:52:05 +02:00
|
|
|
/* check wether the interrupt is a processor-scheduling timeout */
|
|
|
|
if (!_processor->check_timer_interrupt(irq_id)) {
|
2014-03-11 01:21:56 +01:00
|
|
|
|
2014-05-21 15:52:05 +02:00
|
|
|
/* check wether the interrupt is our inter-processor interrupt */
|
|
|
|
if (ic->is_ip_interrupt(irq_id, processor_id)) {
|
2014-03-11 01:21:56 +01:00
|
|
|
|
2014-05-02 18:14:51 +02:00
|
|
|
processor_domain_update_list()->for_each_perform_locally();
|
|
|
|
_processor->ip_interrupt_handled();
|
2014-03-11 01:21:56 +01:00
|
|
|
|
2014-05-21 15:52:05 +02:00
|
|
|
/* after all it must be a user interrupt */
|
|
|
|
} else {
|
2014-03-11 01:21:56 +01:00
|
|
|
|
2014-05-21 15:52:05 +02:00
|
|
|
/* try to inform the user interrupt-handler */
|
|
|
|
Irq::occurred(irq_id);
|
|
|
|
}
|
2014-03-11 01:21:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/* end interrupt request at controller */
|
|
|
|
ic->finish_request();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-04-09 12:14:38 +02:00
|
|
|
void Kernel::Processor_client::_schedule() { _processor->schedule(this); }
|
2014-03-11 12:53:42 +01:00
|
|
|
|
|
|
|
|
2014-05-02 18:14:51 +02:00
|
|
|
/***************
|
|
|
|
** Processor **
|
|
|
|
***************/
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
|
2014-03-11 12:53:42 +01:00
|
|
|
void Kernel::Processor::schedule(Processor_client * const client)
|
2014-03-11 01:21:56 +01:00
|
|
|
{
|
2014-03-11 13:31:25 +01:00
|
|
|
if (_id != executing_id()) {
|
2014-03-11 01:21:56 +01:00
|
|
|
|
2014-03-11 15:58:31 +01:00
|
|
|
/*
|
|
|
|
* Remote add client and let target processor notice it if necessary
|
|
|
|
*
|
|
|
|
* The interrupt controller might provide redundant submission of
|
|
|
|
* inter-processor interrupts. Thus its possible that once the targeted
|
|
|
|
* processor is able to grab the kernel lock, multiple remote updates
|
|
|
|
* occured and consequently the processor traps multiple times for the
|
|
|
|
* sole purpose of recognizing the result of the accumulative changes.
|
|
|
|
* Hence, we omit further interrupts if there is one pending already.
|
|
|
|
* Additionailly we omit the interrupt if the insertion doesn't
|
|
|
|
* rescind the current scheduling choice of the processor.
|
|
|
|
*/
|
2014-05-02 18:14:51 +02:00
|
|
|
if (_scheduler.insert_and_check(client)) { trigger_ip_interrupt(); }
|
|
|
|
|
2014-03-11 13:31:25 +01:00
|
|
|
} else {
|
|
|
|
|
|
|
|
/* add client locally */
|
|
|
|
_scheduler.insert(client);
|
2014-03-11 01:21:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-05-02 18:14:51 +02:00
|
|
|
void Kernel::Processor::trigger_ip_interrupt()
|
|
|
|
{
|
|
|
|
if (!_ip_interrupt_pending) {
|
|
|
|
pic()->trigger_ip_interrupt(_id);
|
|
|
|
_ip_interrupt_pending = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-03-11 01:21:56 +01:00
|
|
|
void Kernel::Processor_client::_unschedule()
|
|
|
|
{
|
2014-04-09 12:14:38 +02:00
|
|
|
assert(_processor->id() == Processor::executing_id());
|
|
|
|
_processor->scheduler()->remove(this);
|
2014-03-11 01:21:56 +01:00
|
|
|
}
|
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
|
2014-03-11 01:21:56 +01:00
|
|
|
void Kernel::Processor_client::_yield()
|
2013-12-17 18:10:02 +01:00
|
|
|
{
|
2014-04-09 12:14:38 +02:00
|
|
|
assert(_processor->id() == Processor::executing_id());
|
|
|
|
_processor->scheduler()->yield_occupation();
|
2013-12-17 18:10:02 +01:00
|
|
|
}
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
|
|
|
|
|
2014-05-02 18:14:51 +02:00
|
|
|
/*****************************
|
|
|
|
** Processor_domain_update **
|
|
|
|
*****************************/
|
|
|
|
|
|
|
|
void Kernel::Processor_domain_update::_perform_locally()
|
|
|
|
{
|
|
|
|
/* perform domain update locally and get pending bit */
|
|
|
|
unsigned const processor_id = Processor::executing_id();
|
|
|
|
if (!_pending[processor_id]) { return; }
|
|
|
|
_domain_update();
|
|
|
|
_pending[processor_id] = false;
|
|
|
|
|
|
|
|
/* check wether there are still processors pending */
|
|
|
|
unsigned i = 0;
|
|
|
|
for (; i < PROCESSORS && !_pending[i]; i++) { }
|
|
|
|
if (i < PROCESSORS) { return; }
|
|
|
|
|
|
|
|
/* as no processors pending anymore, end the domain update */
|
|
|
|
processor_domain_update_list()->remove(this);
|
|
|
|
_processor_domain_update_unblocks();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Kernel::Processor_domain_update::_perform(unsigned const domain_id)
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
{
|
2014-05-02 18:14:51 +02:00
|
|
|
/* perform locally and leave it at that if in uniprocessor mode */
|
|
|
|
_domain_id = domain_id;
|
|
|
|
_domain_update();
|
|
|
|
if (PROCESSORS == 1) { return false; }
|
|
|
|
|
|
|
|
/* inform other processors and block until they are done */
|
|
|
|
processor_domain_update_list()->insert_tail(this);
|
|
|
|
unsigned const processor_id = Processor::executing_id();
|
|
|
|
for (unsigned i = 0; i < PROCESSORS; i++) {
|
|
|
|
if (i == processor_id) { continue; }
|
|
|
|
_pending[i] = true;
|
|
|
|
processor_pool()->processor(i)->trigger_ip_interrupt();
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
}
|
2014-05-02 18:14:51 +02:00
|
|
|
return true;
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
}
|