2013-12-17 18:10:02 +01:00
|
|
|
/*
|
2014-03-11 01:21:56 +01:00
|
|
|
* \brief A multiplexable common instruction processor
|
2013-12-17 18:10:02 +01:00
|
|
|
* \author Martin Stein
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
* \author Stefan Kalkowski
|
2013-12-17 18:10:02 +01:00
|
|
|
* \date 2014-01-14
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2014 Genode Labs GmbH
|
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
|
|
* under the terms of the GNU General Public License version 2.
|
|
|
|
*/
|
|
|
|
|
2014-03-03 15:29:05 +01:00
|
|
|
#ifndef _KERNEL__PROCESSOR_H_
|
|
|
|
#define _KERNEL__PROCESSOR_H_
|
2013-12-17 18:10:02 +01:00
|
|
|
|
|
|
|
/* core includes */
|
2014-03-03 00:12:53 +01:00
|
|
|
#include <processor_driver.h>
|
2014-03-11 01:21:56 +01:00
|
|
|
#include <kernel/scheduler.h>
|
2013-12-17 18:10:02 +01:00
|
|
|
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
#include <util/list.h>
|
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
namespace Kernel
|
|
|
|
{
|
2014-04-09 12:14:38 +02:00
|
|
|
using Genode::Processor_driver;
|
|
|
|
using Genode::Processor_lazy_state;
|
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
/**
|
2014-03-11 01:21:56 +01:00
|
|
|
* A single user of a multiplexable processor
|
2013-12-17 18:10:02 +01:00
|
|
|
*/
|
2014-03-11 01:21:56 +01:00
|
|
|
class Processor_client;
|
2013-12-17 18:10:02 +01:00
|
|
|
|
|
|
|
/**
|
2014-03-11 01:21:56 +01:00
|
|
|
* Multiplexes a single processor to multiple processor clients
|
2013-12-17 18:10:02 +01:00
|
|
|
*/
|
2014-03-11 01:21:56 +01:00
|
|
|
typedef Scheduler<Processor_client> Processor_scheduler;
|
2013-12-17 18:10:02 +01:00
|
|
|
|
|
|
|
/**
|
2014-03-11 01:21:56 +01:00
|
|
|
* A multiplexable common instruction processor
|
2013-12-17 18:10:02 +01:00
|
|
|
*/
|
2014-03-11 01:21:56 +01:00
|
|
|
class Processor;
|
2013-12-17 18:10:02 +01:00
|
|
|
}
|
|
|
|
|
2014-03-11 01:21:56 +01:00
|
|
|
class Kernel::Processor_client : public Processor_scheduler::Item
|
2013-12-17 18:10:02 +01:00
|
|
|
{
|
2014-03-11 01:21:56 +01:00
|
|
|
protected:
|
|
|
|
|
2014-04-09 12:14:38 +02:00
|
|
|
Processor * _processor;
|
|
|
|
Processor_lazy_state _lazy_state;
|
|
|
|
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
using List_item = Genode::List_element<Processor_client>;
|
|
|
|
|
|
|
|
List_item _flush_tlb_li; /* TLB maintainance work list item */
|
|
|
|
unsigned _flush_tlb_pd_id; /* id of pd that TLB entries are flushed */
|
2014-04-28 21:31:57 +02:00
|
|
|
bool _flush_tlb_ref_cnt[PROCESSORS]; /* reference counters */
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
|
2014-03-11 01:21:56 +01:00
|
|
|
/**
|
|
|
|
* Handle an interrupt exception that occured during execution
|
|
|
|
*
|
|
|
|
* \param processor_id kernel name of targeted processor
|
|
|
|
*/
|
|
|
|
void _interrupt(unsigned const processor_id);
|
2013-12-17 18:10:02 +01:00
|
|
|
|
|
|
|
/**
|
2014-03-11 01:21:56 +01:00
|
|
|
* Insert context into the processor scheduling
|
2013-12-17 18:10:02 +01:00
|
|
|
*/
|
2014-03-11 01:21:56 +01:00
|
|
|
void _schedule();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Remove context from the processor scheduling
|
|
|
|
*/
|
|
|
|
void _unschedule();
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Yield currently scheduled processor share of the context
|
|
|
|
*/
|
|
|
|
void _yield();
|
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
public:
|
|
|
|
|
2014-03-11 01:21:56 +01:00
|
|
|
/**
|
|
|
|
* Handle an exception that occured during execution
|
|
|
|
*
|
|
|
|
* \param processor_id kernel name of targeted processor
|
|
|
|
*/
|
|
|
|
virtual void exception(unsigned const processor_id) = 0;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Continue execution
|
|
|
|
*
|
|
|
|
* \param processor_id kernel name of targeted processor
|
|
|
|
*/
|
|
|
|
virtual void proceed(unsigned const processor_id) = 0;
|
|
|
|
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
/**
|
2014-04-28 21:31:57 +02:00
|
|
|
* Enqueues TLB maintainance work into queue of the processors
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
*
|
|
|
|
* \param pd_id protection domain kernel object's id
|
|
|
|
*/
|
|
|
|
void tlb_to_flush(unsigned pd_id);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Flush TLB entries requested by this client on the current processor
|
|
|
|
*/
|
|
|
|
void flush_tlb_by_id();
|
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
/**
|
|
|
|
* Constructor
|
|
|
|
*
|
|
|
|
* \param processor kernel object of targeted processor
|
2014-03-11 01:21:56 +01:00
|
|
|
* \param priority scheduling priority
|
2013-12-17 18:10:02 +01:00
|
|
|
*/
|
2014-03-11 01:21:56 +01:00
|
|
|
Processor_client(Processor * const processor, Priority const priority)
|
2013-12-17 18:10:02 +01:00
|
|
|
:
|
2014-03-11 01:21:56 +01:00
|
|
|
Processor_scheduler::Item(priority),
|
2014-04-09 12:14:38 +02:00
|
|
|
_processor(processor),
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
_flush_tlb_li(this)
|
2014-03-11 01:21:56 +01:00
|
|
|
{ }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Destructor
|
|
|
|
*/
|
|
|
|
~Processor_client()
|
2013-12-17 18:10:02 +01:00
|
|
|
{
|
2014-03-11 01:21:56 +01:00
|
|
|
if (!_scheduled()) { return; }
|
|
|
|
_unschedule();
|
2013-12-17 18:10:02 +01:00
|
|
|
}
|
2014-04-09 12:14:38 +02:00
|
|
|
|
|
|
|
|
|
|
|
/***************
|
|
|
|
** Accessors **
|
|
|
|
***************/
|
|
|
|
|
|
|
|
Processor_lazy_state * lazy_state() { return &_lazy_state; }
|
2013-12-17 18:10:02 +01:00
|
|
|
};
|
|
|
|
|
2014-03-03 00:12:53 +01:00
|
|
|
class Kernel::Processor : public Processor_driver
|
2013-12-17 18:10:02 +01:00
|
|
|
{
|
|
|
|
private:
|
|
|
|
|
2014-03-06 13:55:56 +01:00
|
|
|
unsigned const _id;
|
2013-12-17 18:10:02 +01:00
|
|
|
Processor_scheduler _scheduler;
|
2014-03-11 12:53:42 +01:00
|
|
|
bool _ip_interrupt_pending;
|
2013-12-17 18:10:02 +01:00
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Constructor
|
2014-03-06 13:55:56 +01:00
|
|
|
*
|
2014-03-11 01:21:56 +01:00
|
|
|
* \param id kernel name of the processor object
|
|
|
|
* \param idle_client client that gets scheduled on idle
|
2013-12-17 18:10:02 +01:00
|
|
|
*/
|
2014-03-11 01:21:56 +01:00
|
|
|
Processor(unsigned const id, Processor_client * const idle_client)
|
2014-03-06 13:55:56 +01:00
|
|
|
:
|
2014-03-11 12:53:42 +01:00
|
|
|
_id(id), _scheduler(idle_client), _ip_interrupt_pending(false)
|
2014-03-06 13:55:56 +01:00
|
|
|
{ }
|
2013-12-17 18:10:02 +01:00
|
|
|
|
2014-04-28 21:31:57 +02:00
|
|
|
/**
|
|
|
|
* Perform outstanding TLB maintainance work
|
|
|
|
*/
|
|
|
|
void flush_tlb();
|
|
|
|
|
2014-03-11 12:53:42 +01:00
|
|
|
/**
|
|
|
|
* Notice that the inter-processor interrupt isn't pending anymore
|
|
|
|
*/
|
|
|
|
void ip_interrupt()
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This interrupt solely denotes that another processor has
|
|
|
|
* modified the scheduling plan of this processor and thus
|
|
|
|
* a more prior user context than the current one might be
|
|
|
|
* available.
|
|
|
|
*/
|
|
|
|
_ip_interrupt_pending = false;
|
2014-04-28 21:31:57 +02:00
|
|
|
flush_tlb();
|
2014-03-11 12:53:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Add a processor client to the scheduling plan of the processor
|
|
|
|
*
|
|
|
|
* \param client targeted client
|
|
|
|
*/
|
|
|
|
void schedule(Processor_client * const client);
|
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
|
|
|
|
/***************
|
|
|
|
** Accessors **
|
|
|
|
***************/
|
|
|
|
|
2014-03-06 13:55:56 +01:00
|
|
|
unsigned id() const { return _id; }
|
|
|
|
|
2013-12-17 18:10:02 +01:00
|
|
|
Processor_scheduler * scheduler() { return &_scheduler; }
|
|
|
|
};
|
|
|
|
|
2014-03-03 15:29:05 +01:00
|
|
|
#endif /* _KERNEL__PROCESSOR_H_ */
|