hw: add and test totally sophisticated scheduler

The new scheduler serves the orthogonal requirements of both
high-throughput-oriented scheduling contexts (shortly called fill in the
scheduler) and low-latency-oriented scheduling contexts (shortly called
claim in the scheduler). Thus it knows two scheduling modes. Every claim
owns a CPU-time-quota expressed as percentage of a super period
(currently 1 second) and a priority that is absolute as long as the
claim has quota left for the current super period. At the end of a super
period the quota of all claims gets refreshed. During a super period,
the claim mode is dominant as long as any active claim has quota left.
Every time this isn't the case, the scheduler switches to scheduling of
fills. Fills are scheduled in a simple round robin with identical time
slices. Order and time-slices of the fill scheduling are not affected by
the super period. Now on thread creation, two arguments, priority and
quota are needed. If quota is 0, the new thread participates in CPU
scheduling with a fill only.  Otherwise he participates with both a
claim and a fill. This concept dovetails nicely with Genodes quota based
resource management as any process can grant subsets of its own
CPU-time and priorities to its child without knowing the global means of
CPU-time and priority.

The commit also adds a run script that enables an automated unit test of the
scheduler implementation.

fix #1225
This commit is contained in:
Martin Stein 2014-10-09 14:24:27 +02:00 committed by Christian Helmuth
parent a00eb9a66a
commit 1b1fd1e1f9
15 changed files with 820 additions and 493 deletions

View File

@ -0,0 +1,24 @@
#
# \brief Test CPU-scheduler implementation of core
# \author Martin Stein
# \date 2014-09-30
#
# build program images
build "test/cpu_scheduler"
# create directory where the boot files are written to
create_boot_directory
# create single boot image from the compiled program images
build_boot_image "test-cpu_scheduler" test
# configure qemu to use 64 MB RAM and avoid GUI mode
append qemu_args " -m 64 -nographic"
# execute the test in qemu if the targeted platform is supported
run_genode_until "done.*\n" 10
# check the output
grep_output {\[test\]}
compare_output_to { [test] done }

View File

@ -18,14 +18,21 @@ namespace Kernel
{
enum {
DEFAULT_STACK_SIZE = 16 * 1024,
USER_LAP_TIME_MS = 100,
MAX_PDS = 256,
MAX_THREADS = 256,
MAX_SIGNAL_RECEIVERS = 2048,
MAX_SIGNAL_CONTEXTS = 4096,
MAX_VMS = 4,
MAX_PRIORITY = 128,
};
/* amount of priority bands amongst quota owners in CPU scheduling */
constexpr unsigned cpu_priorities = 4;
/* super period in CPU scheduling and the overall allocatable CPU time */
constexpr unsigned cpu_quota_ms = 1000;
/* time slice for the round-robin mode and the idle in CPU scheduling */
constexpr unsigned cpu_fill_ms = 100;
}
#endif /* _KERNEL__CONFIGURATION_H_ */

View File

@ -0,0 +1,307 @@
/*
* \brief Schedules CPU shares for the execution time of a CPU
* \author Martin Stein
* \date 2014-10-09
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _KERNEL__CPU_SCHEDULER_H_
#define _KERNEL__CPU_SCHEDULER_H_
/* core includes */
#include <util.h>
#include <assert.h>
#include <kernel/configuration.h>
#include <kernel/double_list.h>
namespace Kernel
{
/**
* Priority of an unconsumed CPU claim versus other unconsumed CPU claims
*/
class Cpu_priority;
/**
* Scheduling context that has quota and priority (low-latency)
*/
class Cpu_claim : public Double_list_item { };
/**
* Scheduling context that has no quota or priority (best effort)
*/
class Cpu_fill : public Double_list_item { };
/**
* Scheduling context that is both claim and fill
*/
class Cpu_share;
/**
* Schedules CPU shares for the execution time of a CPU
*/
class Cpu_scheduler;
}
class Kernel::Cpu_priority
{
private:
unsigned _value;
public:
static constexpr signed min = 0;
static constexpr signed max = cpu_priorities - 1;
/**
* Construct priority with value 'v'
*/
Cpu_priority(signed const v) : _value(Genode::min(v, max)) { }
/*
* Standard operators
*/
Cpu_priority & operator =(signed const v)
{
_value = Genode::min(v, max);
return *this;
}
operator signed() const { return _value; }
};
class Kernel::Cpu_share : public Cpu_claim, public Cpu_fill
{
friend class Cpu_scheduler;
private:
signed const _prio;
unsigned const _quota;
unsigned _claim;
unsigned _fill;
bool _ready;
public:
/**
* Constructor
*
* \param p claimed priority
* \param q claimed quota
*/
Cpu_share(signed const p, unsigned const q)
: _prio(p), _quota(q), _claim(q), _ready(0) { }
};
class Kernel::Cpu_scheduler
{
private:
typedef Cpu_share Share;
typedef Cpu_fill Fill;
typedef Cpu_claim Claim;
typedef Double_list_typed<Claim> Claim_list;
typedef Double_list_typed<Fill> Fill_list;
typedef Cpu_priority Prio;
Claim_list _rcl[Prio::max + 1]; /* ready claims */
Claim_list _ucl[Prio::max + 1]; /* unready claims */
Fill_list _fills; /* ready fills */
Share * const _idle;
Share * _head;
unsigned _head_quota;
bool _head_claims;
unsigned const _quota;
unsigned _residual;
unsigned const _fill;
template <typename F> void _for_prios(F f) {
for (signed p = Prio::max; p > Prio::min - 1; p--) { f(p); } }
template <typename T>
static Share * _share(T * const t) { return static_cast<Share *>(t); }
static void _reset(Claim * const c) {
_share(c)->_claim = _share(c)->_quota; }
void _reset_claims(unsigned const p)
{
_rcl[p].for_each([&] (Claim * const c) { _reset(c); });
_ucl[p].for_each([&] (Claim * const c) { _reset(c); });
}
void _consumed(unsigned const q)
{
if (_residual -= q) { return; }
_residual = _quota;
_for_prios([&] (unsigned const p) { _reset_claims(p); });
}
void _set_head(Share * const s, unsigned const q, bool const c)
{
_head_quota = Genode::min(q, _residual);
_head_claims = c;
_head = s;
}
void _next_fill()
{
_head->_fill = _fill;
_fills.head_to_tail();
}
void _head_claimed(unsigned const q)
{
if (_head->_claim) { _head->_claim -= q; }
if (_head->_claim || !_head->_ready) { return; }
_rcl[_head->_prio].to_tail(_head);
}
void _head_filled(unsigned const q)
{
if (_fills.head() != _head) { return; }
if (q < _head->_fill) { _head->_fill -= q; }
else { _next_fill(); }
}
bool _claim_for_head()
{
for (signed p = Prio::max; p > Prio::min - 1; p--) {
Share * const s = _share(_rcl[p].head());
if (!s) { continue; }
if (!s->_claim) { continue; }
_set_head(s, s->_claim, 1);
return 1;
}
return 0;
}
bool _fill_for_head()
{
Share * const s = _share(_fills.head());
if (!s) { return 0; }
_set_head(s, s->_fill, 0);
return 1;
}
public:
/**
* Constructor
*
* \param i Gets scheduled with static quota when no other share
* is schedulable. Unremovable. All values get ignored.
* \param q total amount of time quota that can be claimed by shares
* \param f time-slice length of the fill round-robin
*/
Cpu_scheduler(Share * const i, unsigned const q, unsigned const f)
: _idle(i), _quota(q), _residual(q), _fill(f) { _set_head(i, f, 0); }
/**
* Update head according to the consumption of quota 'q'
*/
void update(unsigned q)
{
q = Genode::min(Genode::min(q, _head_quota), _residual);
if (_head_claims) { _head_claimed(q); }
else { _head_filled(q); }
_consumed(q);
if (_claim_for_head()) { return; }
if (_fill_for_head()) { return; }
_set_head(_idle, _fill, 0);
}
/**
* Set 's1' ready and return wether this outdates current head
*/
bool ready_check(Share * const s1)
{
ready(s1);
Share * s2 = _head;
if (!s1->_claim) { return s2 == _idle; }
if (!_head_claims) { return 1; }
if (s1->_prio != s2->_prio) { return s1->_prio > s2->_prio; }
for (; s2 && s2 != s1; s2 = _share(Claim_list::next(s2))) ;
return !s2;
}
/**
* Set share 's' ready
*/
void ready(Share * const s)
{
assert(!s->_ready && s != _idle);
s->_ready = 1;
s->_fill = _fill;
_fills.insert_tail(s);
if (!s->_quota) { return; }
_ucl[s->_prio].remove(s);
if (s->_claim) { _rcl[s->_prio].insert_head(s); }
else { _rcl[s->_prio].insert_tail(s); }
}
/**
* Set share 's' unready
*/
void unready(Share * const s)
{
assert(s->_ready && s != _idle);
s->_ready = 0;
_fills.remove(s);
if (!s->_quota) { return; }
_rcl[s->_prio].remove(s);
_ucl[s->_prio].insert_tail(s);
}
/**
* As far as possible current head won't be re-choosen for max. a round
*/
void yield()
{
assert(_head != _idle);
if (_head->_claim) { _head->_claim = 0; }
if (_head != _fills.head()) { return; }
_share(_fills.head())->_fill = _fill;
_fills.head_to_tail();
}
/**
* Remove share 's' from scheduler
*/
void remove(Share * const s)
{
assert(s != _idle && s != _head);
if (s->_ready) { _fills.remove(s); }
if (!s->_quota) { return; }
if (s->_ready) { _rcl[s->_prio].remove(s); }
else { _ucl[s->_prio].remove(s); }
}
/**
* Insert share 's' into scheduler
*/
void insert(Share * const s)
{
assert(!s->_ready);
if (!s->_quota) { return; }
s->_claim = s->_quota;
_ucl[s->_prio].insert_head(s);
}
/*
* Accessors
*/
Share * head() const { return _head; }
unsigned head_quota() const { return _head_quota; }
};
#endif /* _KERNEL__CPU_SCHEDULER_H_ */

View File

@ -40,13 +40,6 @@ class Kernel::Double_list_item
Double_list_item * _next;
Double_list_item * _prev;
Double_list * _list;
protected:
Double_list_item() : _list(0) { }
bool _listed() const { return _list; }
};
class Kernel::Double_list
@ -100,7 +93,6 @@ class Kernel::Double_list
i->_prev = _tail;
i->_next = 0;
_tail = i;
i->_list = this;
}
/**
@ -113,7 +105,6 @@ class Kernel::Double_list
i->_next = _head;
i->_prev = 0;
_head = i;
i->_list = this;
}
/**
@ -125,7 +116,6 @@ class Kernel::Double_list
else { i->_next->_prev = i->_prev; }
if (i == _head) { _head = i->_next; }
else { i->_prev->_next = i->_next; }
i->_list = 0;
}
/**

View File

@ -18,7 +18,7 @@
/* core includes */
#include <timer.h>
#include <cpu.h>
#include <kernel/scheduler.h>
#include <kernel/cpu_scheduler.h>
/* base includes */
#include <unmanaged_singleton.h>
@ -26,9 +26,9 @@
namespace Kernel
{
/**
* A single user of a multiplexable processor
* Context of a job (thread, VM, idle) that shall be executed by a CPU
*/
class Processor_client;
class Cpu_job;
/**
* Ability to do a domain update on all processors
@ -40,11 +40,6 @@ namespace Kernel
*/
class Cpu_idle;
/**
* Multiplexes a single processor to multiple processor clients
*/
typedef Scheduler<Processor_client> Processor_scheduler;
/**
* A multiplexable common instruction processor
*/
@ -78,7 +73,7 @@ class Kernel::Processor_domain_update : public Double_list_item
/**
* Perform the domain update on the executing processors
*/
void _perform_locally();
void _do();
protected:
@ -97,7 +92,7 @@ class Kernel::Processor_domain_update : public Double_list_item
*
* \return wether the update blocks and reports back on completion
*/
bool _perform(unsigned const domain_id);
bool _do_global(unsigned const domain_id);
/**
* Notice that the update isn't pending on any processor anymore
@ -105,15 +100,13 @@ class Kernel::Processor_domain_update : public Double_list_item
virtual void _processor_domain_update_unblocks() = 0;
};
class Kernel::Processor_client : public Processor_scheduler::Item
class Kernel::Cpu_job : public Cpu_share
{
protected:
Processor * _processor;
Processor * _cpu;
Cpu_lazy_state _lazy_state;
unsigned _tics_consumed;
/**
* Handle an interrupt exception that occured during execution
*
@ -153,54 +146,29 @@ class Kernel::Processor_client : public Processor_scheduler::Item
virtual void proceed(unsigned const processor_id) = 0;
/**
* Constructor
*
* \param processor kernel object of targeted processor
* \param priority scheduling priority
* Construct a job with scheduling priority 'prio'
*/
Processor_client(Processor * const processor, Priority const priority)
:
Processor_scheduler::Item(priority),
_processor(processor),
_tics_consumed(0)
{ }
Cpu_job(Cpu_priority const p) : Cpu_share(p, 0), _cpu(0) { }
/**
* Destructor
*/
~Processor_client()
{
if (!_scheduled()) { return; }
_unschedule();
}
~Cpu_job();
/**
* Update how many tics the client consumed from its current time slice
*
* \param tics_left tics that aren't consumed yet at the slice
* \param tics_per_slice tics that the slice provides
* Link job to CPU 'cpu'
*/
void update_tics_consumed(unsigned const tics_left,
unsigned const tics_per_slice)
{
unsigned const old_tics_left = tics_per_slice - _tics_consumed;
_tics_consumed += old_tics_left - tics_left;
}
/**
* Reset how many tics the client consumed from its current time slice
*/
void reset_tics_consumed() { _tics_consumed = 0; }
void affinity(Processor * const cpu);
/***************
** Accessors **
***************/
void cpu(Processor * const cpu) { _cpu = cpu; }
Cpu_lazy_state * lazy_state() { return &_lazy_state; }
unsigned tics_consumed() { return _tics_consumed; }
};
class Kernel::Cpu_idle : public Cpu::User_context, public Processor_client
class Kernel::Cpu_idle : public Cpu::User_context, public Cpu_job
{
private:
@ -242,58 +210,33 @@ class Kernel::Processor : public Cpu
{
private:
unsigned const _id;
Cpu_idle _idle;
Processor_scheduler _scheduler;
bool _ip_interrupt_pending;
Timer * const _timer;
typedef Cpu_job Job;
void _start_timer(unsigned const tics) {
_timer->start_one_shot(tics, _id); }
unsigned const _id;
Cpu_idle _idle;
Timer * const _timer;
Cpu_scheduler _scheduler;
bool _ip_interrupt_pending;
unsigned _tics_per_slice() {
return _timer->ms_to_tics(USER_LAP_TIME_MS); }
void _update_timer(unsigned const tics_consumed,
unsigned const tics_per_slice)
{
assert(tics_consumed <= tics_per_slice);
if (tics_consumed >= tics_per_slice) { _start_timer(1); }
else { _start_timer(tics_per_slice - tics_consumed); }
}
unsigned _quota() const { return _timer->ms_to_tics(cpu_quota_ms); }
unsigned _fill() const { return _timer->ms_to_tics(cpu_fill_ms); }
Job * _head() const { return static_cast<Job *>(_scheduler.head()); }
public:
/**
* Constructor
*
* \param id kernel name of the processor
* \param timer scheduling timer
* Construct object for CPU 'id' with scheduling timer 'timer'
*/
Processor(unsigned const id, Timer * const timer)
:
_id(id), _idle(this), _scheduler(&_idle),
_ip_interrupt_pending(false), _timer(timer)
{ }
_id(id), _idle(this), _timer(timer),
_scheduler(&_idle, _quota(), _fill()),
_ip_interrupt_pending(false) { }
/**
* Initializate on the processor that this object corresponds to
* Check if IRQ 'i' was due to a scheduling timeout
*/
void init_processor_local() { _update_timer(0, _tics_per_slice()); }
/**
* Check for a scheduling timeout and handle it in case
*
* \param interrupt_id kernel name of interrupt that caused this call
*
* \return wether it was a timeout and therefore has been handled
*/
bool check_timer_interrupt(unsigned const interrupt_id)
{
if (_timer->interrupt_id(_id) != interrupt_id) { return false; }
_scheduler.yield_occupation();
return true;
}
bool timer_irq(unsigned const i) { return _timer->interrupt_id(_id) == i; }
/**
* Notice that the inter-processor interrupt isn't pending anymore
@ -306,24 +249,46 @@ class Kernel::Processor : public Cpu
void trigger_ip_interrupt();
/**
* Add a processor client to the scheduling plan of the processor
*
* \param client targeted client
* Schedule 'job' at this CPU
*/
void schedule(Processor_client * const client);
void schedule(Job * const job);
/**
* Handle exception of the processor and proceed its user execution
*/
void exception();
void exception()
{
/* update old job */
Job * const old_job = _head();
old_job->exception(_id);
/* update scheduler */
unsigned const old_time = _scheduler.head_quota();
unsigned const new_time = _timer->value(_id);
unsigned quota = old_time > new_time ? old_time - new_time : 1;
_scheduler.update(quota);
/* get new job */
Job * const new_job = _head();
quota = _scheduler.head_quota();
assert(quota);
_timer->start_one_shot(quota, _id);
/* switch between lazy state of old and new job */
Cpu_lazy_state * const old_state = old_job->lazy_state();
Cpu_lazy_state * const new_state = new_job->lazy_state();
prepare_proceeding(old_state, new_state);
/* resume new job */
new_job->proceed(_id);
}
/***************
** Accessors **
***************/
unsigned id() const { return _id; }
Processor_scheduler * scheduler() { return &_scheduler; }
Cpu_scheduler * scheduler() { return &_scheduler; }
};
class Kernel::Processor_pool

View File

@ -1,216 +0,0 @@
/*
* \brief Round-robin scheduler
* \author Martin Stein
* \date 2012-11-30
*/
/*
* Copyright (C) 2012-2013 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
#ifndef _KERNEL__SCHEDULER_H_
#define _KERNEL__SCHEDULER_H_
/* core includes */
#include <kernel/configuration.h>
#include <kernel/double_list.h>
#include <assert.h>
namespace Kernel
{
/**
* Range save priority value
*/
class Priority;
/**
* Inheritable ability for objects of type T to be item in a scheduler
*/
template <typename T>
class Scheduler_item;
/**
* Round robin scheduler for objects of type T
*/
template <typename T>
class Scheduler;
}
class Kernel::Priority
{
private:
unsigned _value;
public:
enum {
MIN = 0,
MAX = MAX_PRIORITY,
};
/**
* Constructor
*/
Priority(unsigned const priority)
:
_value(Genode::min(priority, MAX))
{ }
/**
* Assignment operator
*/
Priority & operator =(unsigned const priority)
{
_value = Genode::min(priority, MAX);
return *this;
}
operator unsigned() const { return _value; }
};
/**
* Ability to be item in a scheduler through inheritance
*/
template <typename T>
class Kernel::Scheduler_item : public Double_list_item
{
private:
Priority const _priority;
protected:
/**
* Return wether this item is managed by a scheduler currently
*/
bool _scheduled() const { return Double_list_item::_listed(); }
public:
/**
* Constructor
*
* \param p scheduling priority
*/
Scheduler_item(Priority const p) : _priority(p) { }
/***************
** Accessors **
***************/
Priority priority() const { return _priority; }
};
template <typename T>
class Kernel::Scheduler
{
private:
T * const _idle;
T * _occupant;
Double_list_typed<T> _items[Priority::MAX + 1];
bool _yield;
bool _does_update(T * const occupant)
{
if (_yield) {
_yield = false;
return true;
}
if (_occupant != occupant) { return true; }
return false;
}
public:
typedef Scheduler_item<T> Item;
/**
* Constructor
*/
Scheduler(T * const idle) : _idle(idle), _occupant(0) { }
/**
* Adjust occupant reference to the current scheduling plan
*
* \param updated true on return if the occupant has changed/yielded
* \param refreshed true on return if the occupant got a new timeslice
*
* \return updated occupant
*/
T * update_occupant(bool & updated, bool & refreshed)
{
for (int i = Priority::MAX; i >= 0 ; i--) {
T * const new_occupant = _items[i].head();
if (!new_occupant) { continue; }
updated = _does_update(new_occupant);
T * const old_occupant = _occupant;
if (!old_occupant) { refreshed = true; }
else {
unsigned const new_prio = new_occupant->priority();
unsigned const old_prio = old_occupant->priority();
refreshed = new_prio <= old_prio;
}
_occupant = new_occupant;
return new_occupant;
}
updated = _does_update(_idle);
refreshed = true;
_occupant = 0;
return _idle;
}
/**
* Adjust scheduling plan to the fact that the current occupant yileds
*/
void yield_occupation()
{
_yield = true;
if (!_occupant) { return; }
_items[_occupant->priority()].head_to_tail();
}
/**
* Include 'i' in scheduling
*/
void insert(T * const i)
{
assert(i != _idle);
_items[i->priority()].insert_tail(i);
}
/**
* Include item in scheduling and check wether an update is needed
*
* \param item targeted item
*
* \return wether the current occupant is out-dated after insertion
*/
bool insert_and_check(T * const item)
{
insert(item);
if (!_occupant) { return true; }
return item->priority() > _occupant->priority();
}
/**
* Exclude 'i' from scheduling
*/
void remove(T * const i) { _items[i->priority()].remove(i); }
/***************
** Accessors **
***************/
T * occupant() { return _occupant ? _occupant : _idle; }
T * idle() const { return _idle; }
};
#endif /* _KERNEL__SCHEDULER_H_ */

View File

@ -44,7 +44,7 @@ class Kernel::Thread
:
public Cpu::User_context,
public Object<Thread, MAX_THREADS, Thread_ids, thread_ids, thread_pool>,
public Processor_client,
public Cpu_job,
public Processor_domain_update,
public Ipc_node,
public Signal_context_killer,
@ -290,9 +290,9 @@ class Kernel::Thread
Native_utcb * const utcb, bool const start);
/**********************
** Processor_client **
**********************/
/*************
** Cpu_job **
*************/
void exception(unsigned const processor_id);
void proceed(unsigned const processor_id);

View File

@ -37,7 +37,7 @@ namespace Kernel
}
class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
public Processor_client
public Cpu_job
{
private:
@ -57,28 +57,25 @@ class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
* \param state initial CPU state
* \param context signal for VM exceptions other than interrupts
*/
Vm(void * const state,
Signal_context * const context)
Vm(void * const state, Signal_context * const context)
:
Processor_client(processor_pool()->primary_processor(),
Priority::MIN),
_state((Vm_state * const)state),
Cpu_job(Cpu_priority::min), _state((Vm_state * const)state),
_context(context)
{ }
{ Cpu_job::affinity(processor_pool()->primary_processor()); }
/****************
** Vm_session **
****************/
void run() { Processor_client::_schedule(); }
void run() { Cpu_job::_schedule(); }
void pause() { Processor_client::_unschedule(); }
void pause() { Cpu_job::_unschedule(); }
/**********************
** Processor_client **
**********************/
/*************
** Cpu_job **
*************/
void exception(unsigned const processor_id)
{
@ -90,7 +87,7 @@ class Kernel::Vm : public Object<Vm, MAX_VMS, Vm_ids, vm_ids, vm_pool>,
case Genode::Cpu_state::DATA_ABORT:
_state->dfar = Processor::Dfar::read();
default:
Processor_client::_unschedule();
Cpu_job::_unschedule();
_context->submit(1);
}
}

View File

@ -46,20 +46,15 @@ class Genode::Timer : public Mmio
inline void start_one_shot(uint32_t const tics, unsigned)
{
write<Cs::M1>(1);
read<Cs>();
write<Clo>(0);
write<Cmp>(read<Clo>() + tics);
write<Cs::M1>(1);
}
static uint32_t ms_to_tics(unsigned const ms) {
return (Board::SYSTEM_TIMER_CLOCK / 1000) * ms; }
void clear_interrupt(unsigned)
{
write<Cs::M1>(1);
read<Cs>();
}
unsigned value(unsigned)
{
Cmp::access_t const cmp = read<Cmp>();

View File

@ -283,12 +283,9 @@ extern "C" void init_kernel_multiprocessor()
*/
perf_counter()->enable();
/* locally initialize processor */
/* locally initialize interrupt controller */
unsigned const processor_id = Processor::executing_id();
Processor * const processor = processor_pool()->processor(processor_id);
processor->init_processor_local();
/* locally initialize interrupt controller */
pic()->init_processor_local();
pic()->unmask(Timer::interrupt_id(processor_id), processor_id);
@ -309,7 +306,7 @@ extern "C" void init_kernel_multiprocessor()
/* start thread with stack pointer at the top of stack */
static Native_utcb utcb;
static Thread t(Priority::MAX, "core");
static Thread t(Cpu_priority::max, "core");
_main_thread_id = t.id();
_main_thread_utcb = &utcb;
_main_thread_utcb->start_info()->init(t.id(), Genode::Native_capability());

View File

@ -37,20 +37,16 @@ namespace Kernel
}
class Kernel::Processor_domain_update_list
:
public Double_list_typed<Processor_domain_update>
: public Double_list_typed<Processor_domain_update>
{
typedef Processor_domain_update Update;
public:
/**
* Perform all pending domain updates on the executing processor
*/
void for_each_perform_locally()
{
for_each([] (Processor_domain_update * const domain_update) {
domain_update->_perform_locally();
});
}
void do_each() { for_each([] (Update * const u) { u->_do(); }); }
};
namespace Kernel
@ -63,11 +59,30 @@ namespace Kernel
}
/**********************
** Processor_client **
**********************/
/*************
** Cpu_job **
*************/
void Processor_client::_interrupt(unsigned const processor_id)
Cpu_job::~Cpu_job() { if (_cpu) { _cpu->scheduler()->remove(this); } }
void Cpu_job::_schedule() { _cpu->schedule(this); }
void Cpu_job::_unschedule()
{
assert(_cpu->id() == Processor::executing_id());
_cpu->scheduler()->unready(this);
}
void Cpu_job::_yield()
{
assert(_cpu->id() == Processor::executing_id());
_cpu->scheduler()->yield();
}
void Cpu_job::_interrupt(unsigned const processor_id)
{
/* determine handling for specific interrupt */
unsigned irq_id;
@ -75,13 +90,13 @@ void Processor_client::_interrupt(unsigned const processor_id)
if (ic->take_request(irq_id)) {
/* check wether the interrupt is a processor-scheduling timeout */
if (!_processor->check_timer_interrupt(irq_id)) {
if (!_cpu->timer_irq(irq_id)) {
/* check wether the interrupt is our inter-processor interrupt */
if (ic->is_ip_interrupt(irq_id, processor_id)) {
processor_domain_update_list()->for_each_perform_locally();
_processor->ip_interrupt_handled();
processor_domain_update_list()->do_each();
_cpu->ip_interrupt_handled();
/* after all it must be a user interrupt */
} else {
@ -96,15 +111,20 @@ void Processor_client::_interrupt(unsigned const processor_id)
}
void Processor_client::_schedule() { _processor->schedule(this); }
/********************
** Processor_idle **
********************/
Cpu_idle::Cpu_idle(Processor * const cpu) : Processor_client(cpu, 0)
void Cpu_job::affinity(Processor * const cpu)
{
_cpu = cpu;
_cpu->scheduler()->insert(this);
}
/**************
** Cpu_idle **
**************/
Cpu_idle::Cpu_idle(Processor * const cpu) : Cpu_job(Cpu_priority::min)
{
Cpu_job::cpu(cpu);
cpu_exception = RESET;
ip = (addr_t)&_main;
sp = (addr_t)&_stack[stack_size];
@ -118,29 +138,10 @@ void Cpu_idle::proceed(unsigned const cpu) { mtc()->continue_user(this, cpu); }
** Processor **
***************/
void Processor::schedule(Processor_client * const client)
void Processor::schedule(Job * const job)
{
if (_id != executing_id()) {
/*
* Remote add client and let target processor notice it if necessary
*
* The interrupt controller might provide redundant submission of
* inter-processor interrupts. Thus its possible that once the targeted
* processor is able to grab the kernel lock, multiple remote updates
* occured and consequently the processor traps multiple times for the
* sole purpose of recognizing the result of the accumulative changes.
* Hence, we omit further interrupts if there is one pending already.
* Additionailly we omit the interrupt if the insertion doesn't
* rescind the current scheduling choice of the processor.
*/
if (_scheduler.insert_and_check(client)) { trigger_ip_interrupt(); }
} else {
/* add client locally */
_scheduler.insert(client);
}
if (_id == executing_id()) { _scheduler.ready(job); }
else if (_scheduler.ready_check(job)) { trigger_ip_interrupt(); }
}
@ -153,25 +154,11 @@ void Processor::trigger_ip_interrupt()
}
void Processor_client::_unschedule()
{
assert(_processor->id() == Processor::executing_id());
_processor->scheduler()->remove(this);
}
void Processor_client::_yield()
{
assert(_processor->id() == Processor::executing_id());
_processor->scheduler()->yield_occupation();
}
/*****************************
** Processor_domain_update **
*****************************/
void Processor_domain_update::_perform_locally()
void Processor_domain_update::_do()
{
/* perform domain update locally and get pending bit */
unsigned const processor_id = Processor::executing_id();
@ -190,7 +177,7 @@ void Processor_domain_update::_perform_locally()
}
bool Processor_domain_update::_perform(unsigned const domain_id)
bool Processor_domain_update::_do_global(unsigned const domain_id)
{
/* perform locally and leave it at that if in uniprocessor mode */
_domain_id = domain_id;
@ -207,64 +194,3 @@ bool Processor_domain_update::_perform(unsigned const domain_id)
}
return true;
}
void Kernel::Processor::exception()
{
/*
* Request the current occupant without any update. While the
* processor was outside the kernel, another processor may have changed the
* scheduling of the local activities in a way that an update would return
* an occupant other than that whose exception caused the kernel entry.
*/
Processor_client * const old_client = _scheduler.occupant();
Cpu_lazy_state * const old_state = old_client->lazy_state();
old_client->exception(_id);
/*
* The processor local as well as remote exception-handling may have
* changed the scheduling of the local activities. Hence we must update the
* occupant.
*/
bool updated, refreshed;
Processor_client * const new_client =
_scheduler.update_occupant(updated, refreshed);
/**
* There are three scheduling situations we have to deal with:
*
* The client has not changed and didn't yield:
*
* The client must not update its time-slice state as the timer
* can continue as is and hence keeps providing the information.
*
* The client has changed or did yield and the previous client
* received a fresh timeslice:
*
* The previous client can reset his time-slice state.
* The timer must be re-programmed according to the time-slice
* state of the new client.
*
* The client has changed and the previous client did not receive
* a fresh timeslice:
*
* The previous client must update its time-slice state. The timer
* must be re-programmed according to the time-slice state of the
* new client.
*/
if (updated) {
unsigned const tics_per_slice = _tics_per_slice();
if (refreshed) { old_client->reset_tics_consumed(); }
else {
unsigned const tics_left = _timer->value(_id);
old_client->update_tics_consumed(tics_left, tics_per_slice);
}
_update_timer(new_client->tics_consumed(), tics_per_slice);
}
/**
* Apply the CPU state of the new client and continue his execution
*/
Cpu_lazy_state * const new_state = new_client->lazy_state();
prepare_proceeding(old_state, new_state);
new_client->proceed(_id);
}

View File

@ -133,39 +133,31 @@ void Thread::_pause()
void Thread::_schedule()
{
if (_state == SCHEDULED) { return; }
Processor_client::_schedule();
Cpu_job::_schedule();
_state = SCHEDULED;
}
void Thread::_unschedule(State const s)
{
if (_state == SCHEDULED) { Processor_client::_unschedule(); }
if (_state == SCHEDULED) { Cpu_job::_unschedule(); }
_state = s;
}
Thread::Thread(unsigned const priority, char const * const label)
:
Processor_client(0, priority),
Thread_base(this),
_state(AWAITS_START),
_pd(0),
_utcb_phys(0),
_signal_receiver(0),
_label(label)
{
cpu_exception = RESET;
}
Cpu_job(priority), Thread_base(this), _state(AWAITS_START), _pd(0),
_utcb_phys(0), _signal_receiver(0), _label(label)
{ cpu_exception = RESET; }
void Thread::init(Processor * const processor, Pd * const pd,
Native_utcb * const utcb_phys, bool const start)
Native_utcb * const utcb_phys, bool const start)
{
assert(_state == AWAITS_START)
/* store thread parameters */
Processor_client::_processor = processor;
Cpu_job::affinity(processor);
_utcb_phys = utcb_phys;
/* join protection domain */
@ -209,7 +201,7 @@ void Thread::exception(unsigned const processor_id)
_interrupt(processor_id);
return;
case UNDEFINED_INSTRUCTION:
if (_processor->retry_undefined_instr(&_lazy_state)) { return; }
if (_cpu->retry_undefined_instr(&_lazy_state)) { return; }
PWRN("undefined instruction");
_stop();
return;
@ -401,7 +393,7 @@ void Thread::_call_yield_thread()
{
Thread * const t = Thread::pool()->object(user_arg_1());
if (t) { t->_receive_yielded_cpu(); }
Processor_client::_yield();
Cpu_job::_yield();
}
@ -540,7 +532,7 @@ void Thread::_call_access_thread_regs()
void Thread::_call_update_pd()
{
if (Processor_domain_update::_perform(user_arg_1())) { _pause(); }
if (Processor_domain_update::_do_global(user_arg_1())) { _pause(); }
}

View File

@ -101,7 +101,8 @@ Platform_thread::Platform_thread(const char * const label,
_utcb_core_addr->core_start_info()->init(Cpu::primary_id());
/* create kernel object */
_id = Kernel::new_thread(_kernel_thread, Kernel::Priority::MAX, _label);
constexpr unsigned prio = Kernel::Cpu_priority::max;
_id = Kernel::new_thread(_kernel_thread, prio, _label);
if (!_id) {
PERR("failed to create kernel object");
throw Cpu_session::Thread_creation_failed();
@ -136,8 +137,8 @@ Platform_thread::Platform_thread(const char * const label,
_utcb_core_addr = (Native_utcb *)core_env()->rm_session()->attach(_utcb);
/* create kernel object */
enum { MAX_PRIO = Kernel::Priority::MAX };
auto const phys_prio = Cpu_session::scale_priority(MAX_PRIO, virt_prio);
constexpr unsigned max_prio = Kernel::Cpu_priority::max;
auto const phys_prio = Cpu_session::scale_priority(max_prio, virt_prio);
_id = Kernel::new_thread(_kernel_thread, phys_prio, _label);
if (!_id) {
PERR("failed to create kernel object");

View File

@ -0,0 +1,328 @@
/*
* \brief Test CPU-scheduler implementation of the kernel
* \author Martin Stein
* \date 2014-09-30
*/
/*
* Copyright (C) 2014 Genode Labs GmbH
*
* This file is part of the Genode OS framework, which is distributed
* under the terms of the GNU General Public License version 2.
*/
/* core includes */
#include <kernel/cpu_scheduler.h>
/*
* Utilities
*/
using Genode::size_t;
using Genode::addr_t;
using Kernel::Cpu_scheduler;
using Kernel::Cpu_share;
namespace Kernel { void test(); }
void * operator new(size_t s, void * p) { return p; }
struct Data
{
Cpu_share idle;
Cpu_scheduler scheduler;
char shares[9][sizeof(Cpu_share)];
Data() : idle(0, 0), scheduler(&idle, 1000, 100) { }
};
Data * data()
{
static Data d;
return &d;
}
void done()
{
Genode::printf("[test] done\n");
while (1) ;
}
unsigned share_id(void * const pointer)
{
addr_t const address = (addr_t)pointer;
addr_t const base = (addr_t)data()->shares;
if (address < base || address >= base + sizeof(data()->shares)) {
return 0; }
return (address - base) / sizeof(Cpu_share) + 1;
}
Cpu_share * share(unsigned const id)
{
if (!id) { return &data()->idle; }
return reinterpret_cast<Cpu_share *>(&data()->shares[id - 1]);
}
void create(unsigned const id)
{
Cpu_share * const s = share(id);
void * const p = (void *)s;
switch (id) {
case 1: new (p) Cpu_share(2, 230); break;
case 2: new (p) Cpu_share(0, 170); break;
case 3: new (p) Cpu_share(3, 110); break;
case 4: new (p) Cpu_share(1, 90); break;
case 5: new (p) Cpu_share(3, 120); break;
case 6: new (p) Cpu_share(0, 0); break;
case 7: new (p) Cpu_share(2, 180); break;
case 8: new (p) Cpu_share(2, 100); break;
case 9: new (p) Cpu_share(0, 0); break;
default: return;
}
data()->scheduler.insert(s);
}
void destroy(unsigned const id)
{
Cpu_share * const s = share(id);
data()->scheduler.remove(s);
s->~Cpu_share();
}
void update_check(unsigned const l, unsigned const c,
unsigned const s, unsigned const q)
{
data()->scheduler.update(c);
Cpu_share * const hs = data()->scheduler.head();
unsigned const hq = data()->scheduler.head_quota();
if (hs != share(s)) {
unsigned const hi = share_id(hs);
Genode::printf("[test] wrong share %u in line %u\n", hi, l);
done();
}
if (hq != q) {
Genode::printf("[test] wrong quota %u in line %u\n", hq, l);
done();
}
}
void ready_check(unsigned const l, unsigned const s, bool const x)
{
bool const y = data()->scheduler.ready_check(share(s));
if (y != x) {
Genode::printf("[test] wrong check result %u in line %u\n", y, l);
done();
}
}
/*
* Shortcuts for all basic operations that the test consists of
*/
#define C(s) create(s);
#define D(s) destroy(s);
#define A(s) data()->scheduler.ready(share(s));
#define I(s) data()->scheduler.unready(share(s));
#define Y data()->scheduler.yield();
#define U(c, s, q) update_check(__LINE__, c, s, q);
#define O(s) ready_check(__LINE__, s, true);
#define N(s) ready_check(__LINE__, s, false);
/**
* Main routine
*/
void Kernel::test()
{
/*
* Step-by-step testing
*
* Every line in this test is structured according to the scheme
* '<ops> U(t,c,s,q) <doc>' where the symbols are defined as follows:
*
* ops Operations that affect the schedule but not the head of the
* scheduler (which is a buffer to remember the last scheduling
* choice). These operations are:
*
* C(s) construct the context with ID 's' and insert it
* D(s) remove the context with ID 's' and destruct it
* A(s) set the context with ID 's' active
* I(s) set the context with ID 's' inactive
* O(s) do 'A(s)' and check that this will outdate the head
* N(s) do 'A(s)' and check that this won't outdate the head
* Y annotate that the current head wants to yield
*
* U(c,s,q) First update the head and time of the scheduler according to
* the new schedule and the fact that the head consumed a
* quantum of 'c'. Then check if the new head is the context
* with ID 's' that has a quota of 'q'.
*
* doc Documents the expected schedule for the point after the head
* update in the corresponding line. First it shows the time left for
* the actual round followd by a comma. Then it lists all claims
* via their ID followed by a ' (active) or a ° (inactive) and the
* corresponding quota. So 5°120 is the inactive context 5 that
* has a quota of 120 left for the current round. They are sorted
* decurrent by their priorities and the priority bands are
* separated by dashes. After the lowest priority band there is
* an additional dash followed by the current state of the round-
* robin scheduling that has no quota or priority. Only active
* contexts are listed here and only the head is listed together
* with its remaining quota. So 4'50 1 7 means that the active
* context 4 is the round-robin head with quota 50 remaining and
* that he is followed by the active contexts 1 and 7 both with a
* fresh time-slice.
*
* The order of operations is the same as in the operative kernel so each
* line can be seen as one "kernel pass". If any check in a line fails,
* the test prematurely stops and prints out where and why it has stopped.
*/
/* first round - idle */
U( 10, 0, 100) /* 0, - */
U( 90, 0, 100) /* 10, - */
U(120, 0, 100) /* 100, - */
U(130, 0, 100) /* 200, - */
U(140, 0, 100) /* 300, - */
U(150, 0, 100) /* 400, - */
U(160, 0, 100) /* 500, - */
U(170, 0, 100) /* 600, - */
U(180, 0, 100) /* 700, - */
U(190, 0, 100) /* 800, - */
U(200, 0, 100) /* 900, - */
/* second round - one claim, one filler */
C(1) U(111, 0, 100) /* 0, 1°230 - */
A(1) U(123, 1, 230) /* 100, 1'230 - 1'100 */
I(1) U(200, 0, 100) /* 200, 1°30 - */
A(1) U( 10, 1, 30) /* 400, 1'30 - 1'100 */
U(100, 1, 100) /* 410, 1'0 - 1'100 */
U(200, 1, 100) /* 440, 1'0 - 1'100 */
I(1) U(200, 0, 100) /* 540, 1°0 - */
U(200, 0, 100) /* 640, 1°0 - */
A(1) U( 10, 1, 100) /* 740, 1'0 - 1'100 */
U( 50, 1, 50) /* 750, 1'0 - 1'50 */
U( 20, 1, 30) /* 800, 1'0 - 1'30 */
U(100, 1, 100) /* 820, 1'0 - 1'100 */
U(200, 1, 50) /* 850, 1'0 - 1'100 */
U(200, 1, 230) /* 950, 1'230 - 1'100 */
/* third round - one claim per priority */
C(2) A(2) U( 50, 1, 180) /* 0, 1'180 - 2'170 - 1'100 2 */
I(1) U( 70, 2, 170) /* 50, 1°110 - 2'170 - 2'100 */
A(1) I(2) U(110, 1, 110) /* 120, 1'110 - 2°60 - 1'100 */
U( 90, 1, 20) /* 230, 1'20 - 2°60 - 1'100 */
A(2) I(1) U( 10, 2, 60) /* 320, 1°10 - 2'60 - 2'100 */
C(3) U( 40, 2, 20) /* 330, 3°110 - 1°10 - 2'10 - 2'100 */
A(3) U( 10, 3, 110) /* 370, 3'110 - 1°10 - 2'10 - 2'100 3 */
U(150, 2, 10) /* 380, 3'0 - 1°10 - 2'10 - 2'100 3 */
U( 10, 2, 100) /* 490, 3'0 - 1°10 - 2'0 - 2'100 3 */
U( 60, 2, 40) /* 500, 3'0 - 1°10 - 2'0 - 2'40 3 */
C(4) U( 60, 3, 100) /* 560, 3'0 - 1°10 - 4°90 - 2'0 - 3'100 2 */
C(6) A(6) U(120, 2, 100) /* 600, 3'0 - 1°10 - 4°90 - 2'0 - 2'100 6 3*/
A(4) U( 80, 4, 90) /* 700, 3'0 - 1°10 - 4'90 - 2'0 - 2'20 6 3 4 */
I(4) A(1) U( 50, 1, 10) /* 780, 3'0 - 1'10 - 4°40 - 2'0 - 2'20 6 3 1 */
U( 50, 2, 20) /* 830, 3'0 - 1'0 - 4°40 - 2'0 - 2'20 6 3 1 */
U( 50, 6, 100) /* 840, 3'0 - 1'0 - 4°40 - 2'0 - 6'100 3 1 2 */
U(100, 3, 40) /* 860, 3'0 - 1'0 - 4°40 - 2'0 - 3'100 1 2 6 */
U( 60, 3, 110) /* 960, 3'110 - 1'230 - 4°40 - 2'170 - 3'60 1 2 6 */
/* fourth round - multiple claims per priority */
C(5) U( 60, 3, 50) /* 0, 3'50 5°120 - 1'230 - 4°90 - 2'170 - 3'60 1 2 6 */
A(4) I(3) U( 40, 1, 230) /* 60, 3°10 5°120 - 1'230 - 4'90 - 2'170 - 1'100 2 6 4 */
C(7) A(7) U(200, 7, 180) /* 100, 3°10 5°120 - 7'180 1'30 - 4'90 - 2'170 - 1'100 2 6 4 7 */
C(8) A(5) U(100, 5, 120) /* 300, 5'120 3°10 - 7'80 1'30 8°100 - 4'90 - 2'170 - 1'100 2 6 4 7 5 */
A(3) U(100, 3, 10) /* 400, 3'10 5'20 - 7'80 1'30 8°100 - 4'90 - 2'170 - 1'100 2 6 4 7 5 3 */
U( 30, 5, 20) /* 500, 5'20 3'0 - 7'80 1'30 8°100 - 4'90 - 2'170 - 1'100 2 6 4 7 5 3 */
C(9) A(9) U( 10, 5, 10) /* 510, 5'10 3'0 - 7'80 1'30 8°100 - 4'90 - 2'170 - 1'100 2 6 4 7 5 3 9 */
U( 50, 7, 80) /* 520, 5'0 3'0 - 7'80 1'30 8°100 - 4'90 - 2'170 - 1'100 2 6 4 7 5 3 9 */
A(8) I(7) U( 10, 8, 100) /* 530, 5'0 3'0 - 8'100 1'30 7°70 - 4'90 - 2'170 - 1'100 2 6 4 5 3 9 8 */
I(8) U( 80, 1, 30) /* 540, 5'0 3'0 - 1'30 7°70 8°20 - 4'90 - 2'170 - 1'100 2 6 4 5 3 9 */
U(200, 4, 90) /* 620, 5'0 3'0 - 1'0 7°70 8°20 - 4'90 - 2'170 - 1'100 2 6 4 5 3 9 */
U(100, 2, 170) /* 650, 5'0 3'0 - 1'0 7°70 8°20 - 4'0 - 2'170 - 1'100 2 6 4 5 3 9 */
A(8) A(7) U( 10, 7, 70) /* 740, 5'0 3'0 - 7'70 8'20 1'0 - 4'0 - 2'160 - 1'100 2 6 4 5 3 9 8 7 */
I(7) I(3) U( 10, 8, 20) /* 750, 5'0 3°0 - 8'20 1'0 7°60 - 4'0 - 2'160 - 1'100 2 6 4 5 9 8 */
I(8) U( 10, 2, 160) /* 760, 5'0 3°0 - 1'0 7°60 8°10 - 4'0 - 2'160 - 1'100 2 6 4 5 9 */
I(2) U( 40, 1, 100) /* 770, 5'0 3°0 - 1'0 7°60 8°10 - 4'0 - 2°120 - 1'100 6 4 5 9 */
A(3) U( 30, 1, 70) /* 810, 5'0 3'0 - 1'0 7°60 8°10 - 4'0 - 2°120 - 1'100 6 4 5 9 3 */
U( 80, 6, 90) /* 840, 5'0 3'0 - 1'0 7°60 8°10 - 4'0 - 2°120 - 6'100 4 5 9 3 1 */
A(7) A(8) U( 10, 8, 10) /* 910, 5'0 3'0 - 8'10 7'60 1'0 - 4'0 - 2°120 - 6'90 4 5 9 3 1 7 8 */
U( 30, 7, 60) /* 920, 5'0 3'0 - 7'60 1'0 8'0 - 4°0 - 2°120 - 6'90 4 5 9 3 1 7 8 */
A(2) I(7) U( 10, 2, 60) /* 930, 5'0 3'0 - 1'0 8'0 7°50 - 4'0 - 2'120 - 6'90 4 5 9 3 1 8 2 */
I(3) I(5) U( 40, 2, 20) /* 940, 5°0 3°0 - 1'0 8'0 7°50 - 4'0 - 2'80 - 6'90 4 9 1 8 2 */
I(9) I(4) U( 10, 2, 10) /* 980, 5°0 3°0 - 1'0 8'0 7°50 - 4°0 - 2'70 - 6'90 1 8 2 */
U( 40, 1, 230) /* 990, 5°120 3°110 - 1'230 8'100 7°180 - 4°90 - 2'170 - 6'90 1 8 2 */
/* fifth round - yield, ready & check*/
I(6) U( 30, 1, 200) /* 0, 5°120 3°110 - 1'200 8'100 7°180 - 4°90 - 2'170 - 1'100 8 2 */
Y U( 20, 8, 100) /* 30, 5°120 3°110 - 8'100 1'0 7°180 - 4°90 - 2'170 - 8'100 2 1 */
U(200, 2, 170) /* 50, 5°120 3°110 - 1'0 8'0 7°180 - 4°90 - 2'170 - 8'100 2 1 */
Y U( 70, 8, 100) /* 150, 5°120 3°110 - 1'0 8'0 7°180 - 4°90 - 2'0 - 8'100 2 1 */
I(8) U( 40, 2, 100) /* 220, 5°120 3°110 - 1'0 7°180 8°0 - 4°90 - 2'0 - 2'100 1 */
I(1) U( 50, 2, 50) /* 260, 5°120 3°110 - 1°0 7°180 8°0 - 4°90 - 2'0 - 2'50 */
U( 10, 2, 40) /* 310, 5°120 3°110 - 1°0 7°180 8°0 - 4°90 - 2'0 - 2'40 */
N(1) U(200, 1, 100) /* 320, 5°120 3°110 - 1°0 7°180 8°0 - 4°90 - 2'0 - 1'100 2 */
U( 10, 1, 90) /* 360, 5°120 3°110 - 1'0 7°180 8°0 - 4°90 - 2'0 - 1'90 2 */
I(1) U( 10, 2, 100) /* 370, 5°120 3°110 - 1°0 7°180 8°0 - 4°90 - 2'0 - 2'100 */
O(5) U( 10, 5, 120) /* 380, 5'120 3°110 - 1°0 7°180 8°0 - 4°90 - 2'0 - 2'90 5 */
Y U( 90, 2, 90) /* 390, 5'0 3°110 - 1°0 7°180 8°0 - 4°90 - 2'0 - 2'90 5 */
Y U( 10, 5, 100) /* 480, 5'0 3°110 - 1°0 7°180 8°0 - 4°90 - 2'0 - 5'100 2 */
O(7) U( 10, 7, 180) /* 490, 5'0 3°110 - 7'180 1°0 8°0 - 4°90 - 2'0 - 5'90 2 7 */
Y U( 10, 5, 90) /* 500, 5'0 3°110 - 7'0 1°0 8°0 - 4°90 - 2'0 - 5'90 2 7 */
Y U( 10, 2, 100) /* 510, 5'0 3°110 - 7'0 1°0 8°0 - 4°90 - 2'0 - 2'100 7 5 */
Y U( 10, 7, 100) /* 520, 5'0 3°110 - 7'0 1°0 8°0 - 4°90 - 2'0 - 7'100 5 2 */
I(5) U( 10, 7, 90) /* 530, 5°0 3°110 - 7'0 1°0 8°0 - 4°90 - 2'0 - 7'90 2 */
I(7) N(5) U( 10, 2, 100) /* 540, 5'0 3°110 - 7°0 1°0 8°0 - 4°90 - 2'0 - 2'100 5 */
N(7) U(200, 5, 100) /* 550, 5'0 3°110 - 7'0 1°0 8°0 - 4°90 - 2'0 - 5'100 7 2 */
I(5) I(7) U( 10, 2, 100) /* 650, 5°0 3°110 - 7°0 1°0 8°0 - 4°90 - 2'0 - 2'100 */
I(2) U( 10, 0, 100) /* 660, 5°0 3°110 - 7°0 1°0 8°0 - 4°90 - 2°0 - */
U( 10, 0, 100) /* 670, 5°0 3°110 - 7°0 1°0 8°0 - 4°90 - 2°0 - */
U(100, 0, 100) /* 680, 5°0 3°110 - 7°0 1°0 8°0 - 4°90 - 2°0 - */
O(9) U( 10, 9, 100) /* 780, 5°0 3°110 - 7°0 1°0 8°0 - 4°90 - 2°0 - 9'100 */
N(6) U( 20, 9, 80) /* 790, 5°0 3°110 - 7°0 1°0 8°0 - 4°90 - 2°0 - 9'80 6 */
N(8) U( 10, 9, 70) /* 810, 5°0 3°110 - 8'0 7°0 1°0 - 4°90 - 2°0 - 9'70 6 8 */
Y U( 10, 6, 100) /* 820, 5°0 3°110 - 8'0 7°0 1°0 - 4°90 - 2°0 - 6'100 8 9 */
Y U( 10, 8, 100) /* 830, 5°0 3°110 - 8'0 7°0 1°0 - 4°90 - 2°0 - 8'100 9 6 */
N(7) Y U( 20, 9, 100) /* 840, 5°0 3°110 - 8'0 7'0 1°0 - 4°90 - 2°0 - 9'100 6 7 8 */
I(8) I(9) U( 10, 6, 100) /* 860, 5°0 3°110 - 7'0 8°0 1°0 - 4°90 - 2°0 - 6'100 7 */
I(6) I(7) U( 10, 0, 100) /* 870, 5°0 3°110 - 7°0 8°0 1°0 - 4°90 - 2°0 - */
O(4) U( 20, 4, 90) /* 880, 5°0 3°110 - 7°0 8°0 1°0 - 4'90 - 2°0 - 4'100 */
O(3) N(1) U( 10, 3, 90) /* 900, 3'110 5°0 - 1'0 7°0 8°0 - 4'80 - 2°0 - 4'100 3 1 */
N(5) I(4) U( 10, 3, 80) /* 910, 3'100 5'0 - 1'0 7°0 8°0 - 4°80 - 2°0 - 3 1 5 */
I(3) U( 10, 1, 70) /* 920, 5'0 3°90 - 1'0 7°0 8°0 - 4°80 - 2°0 - 1'100 5 */
O(3) U( 10, 3, 60) /* 930, 3'90 5'0 - 1'0 7°0 8°0 - 4°80 - 2°0 - 1'90 5 3 */
N(4) U( 10, 3, 50) /* 940, 3'80 5'0 - 1'0 7°0 8°0 - 4'80 - 2°0 - 1'90 5 3 4 */
I(4) U( 10, 3, 40) /* 950, 3'70 5'0 - 1'0 7°0 8°0 - 4°80 - 2°0 - 1'90 5 3 */
I(3) N(4) U( 10, 4, 30) /* 960, 5'0 3°60 - 1'0 7°0 8°0 - 4'80 - 2°0 - 1'90 5 4 */
I(4) U( 10, 1, 20) /* 970, 5'0 3°60 - 1'0 7°0 8°0 - 4°70 - 2°0 - 1'90 5 */
O(3) O(4) U( 10, 3, 10) /* 980, 3'60 5'0 - 1'0 7°0 8°0 - 4'70 - 2°0 - 1'80 5 3 4 */
Y U( 10, 5, 120) /* 990, 5'120 3'110 - 1'230 7°180 8°100 - 4'90 - 2°170 - 1'80 5 3 4 */
/* sixth round - destroy and re-create */
D(3) U( 30, 5, 90) /* 0, 5'90 - 1'230 7°180 8°100 - 4'90 - 2°170 - 1'80 5 4 */
I(5) U( 30, 1, 230) /* 30, 5°60 - 1'230 7°180 8°100 - 4'90 - 2°170 - 1'80 4 */
D(4) D(7) U( 20, 1, 210) /* 60, 5°60 - 1'210 8°100 - 2°170 - 1'80 4 */
I(1) N(9) U( 40, 9, 100) /* 80, 5°60 - 1°170 8°100 - 2°170 - 9'100 */
A(5) O(8) U( 70, 5, 60) /* 120, 5'60 - 1°170 8'100 - 2°170 - 9'30 5 8 */
D(8) I(5) U( 10, 9, 30) /* 190, 5°60 - 1°170 - 2°170 - 9'30 */
N(6) C(4) U( 10, 9, 20) /* 200, 5°60 - 1°170 - 4°90 - 2°170 - 9'20 6 */
D(5) O(4) U( 10, 4, 90) /* 210, 1°170 - 4'90 - 2°170 - 9'10 6 4 */
U(100, 9, 10) /* 220, 1°170 - 4'0 - 2°170 - 9'10 6 4 */
U( 10, 6, 100) /* 310, 1°170 - 4'0 - 2°170 - 6'100 4 9 */
D(4) U(200, 9, 100) /* 320, 1°170 - 2°170 - 9'100 6 */
C(5) A(5) U( 10, 5, 120) /* 420, 5'120 - 1°210 - 2°170 - 9'90 6 5 */
C(4) Y U( 10, 9, 90) /* 430, 5'0 - 1°170 - 4°90 - 2°170 - 9'90 6 5 */
O(4) Y U( 50, 4, 90) /* 440, 5'0 - 1°170 - 4'90 - 2°170 - 6'100 5 4 9 */
D(6) Y U( 10, 5, 100) /* 490, 5'0 - 1°170 - 4'0 - 2°170 - 5'100 4 9 */
D(9) U(200, 4, 100) /* 500, 5'0 - 1°170 - 4'0 - 2°170 - 4'100 5 */
C(7) C(8) U(200, 5, 100) /* 600, 5'0 - 1°170 7°180 8°100 - 4'0 - 2°170 - 5'100 4 */
O(1) O(7) U( 10, 7, 180) /* 700, 5'0 - 7'180 1'170 8°100 - 4'0 - 2°170 - 5'90 4 1 7 */
O(8) U( 40, 8, 100) /* 710, 5'0 - 8'100 7'140 1'170 - 4'0 - 2°170 - 5'90 4 1 7 8 */
D(7) U(200, 1, 150) /* 750, 5'0 - 1'170 8'0 - 4'0 - 2°170 - 5'90 4 1 8 */
Y U( 60, 5, 90) /* 850, 5'0 - 8'0 1'0 - 4'0 - 2°170 - 5'90 4 1 8 */
U(100, 5, 120) /* 910, 5'120 - 8'100 1'230 - 4'90 - 2°170 - 5'90 4 1 8 */
done();
}

View File

@ -0,0 +1,14 @@
#
# \brief Build config for a core that tests its CPU-scheduler implementation
# \author Martin Stein
# \date 2011-12-16
#
# set target name that this configuration applies to
TARGET = test-cpu_scheduler
# library that provides the whole configuration
LIBS += core
# add C++ sources
SRC_CC += kernel/test.cc