2014-03-11 01:21:56 +01:00
|
|
|
/*
|
|
|
|
* \brief Provide a processor object for every available processor
|
|
|
|
* \author Martin Stein
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
* \author Stefan Kalkowski
|
2014-03-11 01:21:56 +01:00
|
|
|
* \date 2014-01-14
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2014 Genode Labs GmbH
|
|
|
|
*
|
|
|
|
* This file is part of the Genode OS framework, which is distributed
|
|
|
|
* under the terms of the GNU General Public License version 2.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _KERNEL__PROCESSOR_POOL_H_
|
|
|
|
#define _KERNEL__PROCESSOR_POOL_H_
|
|
|
|
|
|
|
|
/* base includes */
|
|
|
|
#include <unmanaged_singleton.h>
|
|
|
|
|
|
|
|
/* core includes */
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
#include <kernel/kernel.h>
|
2014-03-11 01:21:56 +01:00
|
|
|
#include <kernel/thread.h>
|
|
|
|
|
|
|
|
namespace Kernel
|
|
|
|
{
|
|
|
|
/**
|
|
|
|
* Thread that consumes processor time if no other thread is available
|
|
|
|
*/
|
|
|
|
class Idle_thread;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Provides a processor object for every available processor
|
|
|
|
*/
|
|
|
|
class Processor_pool;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return Processor_pool singleton
|
|
|
|
*/
|
|
|
|
Processor_pool * processor_pool();
|
|
|
|
}
|
|
|
|
|
|
|
|
class Kernel::Idle_thread : public Thread
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
|
|
|
|
enum {
|
2014-03-14 11:52:38 +01:00
|
|
|
STACK_SIZE = sizeof(addr_t) * 32,
|
2014-03-11 01:21:56 +01:00
|
|
|
STACK_ALIGNM = Processor_driver::DATA_ACCESS_ALIGNM,
|
|
|
|
};
|
|
|
|
|
|
|
|
char _stack[STACK_SIZE] __attribute__((aligned(STACK_ALIGNM)));
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Main function of all idle threads
|
|
|
|
*/
|
|
|
|
static void _main()
|
|
|
|
{
|
|
|
|
while (1) { Processor_driver::wait_for_interrupt(); }
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Constructor
|
|
|
|
*
|
|
|
|
* \param processor kernel object of targeted processor
|
|
|
|
*/
|
|
|
|
Idle_thread(Processor * const processor)
|
|
|
|
:
|
|
|
|
Thread(Priority::MAX, "idle")
|
|
|
|
{
|
|
|
|
ip = (addr_t)&_main;
|
|
|
|
sp = (addr_t)&_stack[STACK_SIZE];
|
hw: restrict processor broadcast to TLB flushing
Removes the generic processor broadcast function call. By now, that call
was used for cross processor TLB maintance operations only. When core/kernel
gets its memory mapped on demand, and unmapped again, the previous cross
processor flush routine doesn't work anymore, because of a hen-egg problem.
The previous cross processor broadcast is realized using a thread constructed
by core running on top of each processor core. When constructing threads in
core, a dataspace for its thread context is constructed. Each constructed
RAM dataspace gets attached, zeroed out, and detached again. The detach
routine requires a TLB flush operation executed on each processor core.
Instead of executing a thread on each processor core, now a thread waiting
for a global TLB flush is removed from the scheduler queue, and gets attached
to a TLB flush queue of each processor. The processor local queue gets checked
whenever the kernel is entered. The last processor, which executed the TLB
flush, re-attaches the blocked thread to its scheduler queue again.
To ease uo the above described mechanism, a platform thread is now directly
associated with a platform pd object, instead of just associate it with the
kernel pd's id.
Ref #723
2014-04-28 20:36:00 +02:00
|
|
|
init(processor, core_pd(), 0, 0);
|
2014-03-11 01:21:56 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class Kernel::Processor_pool
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
|
|
|
|
char _processors[PROCESSORS][sizeof(Processor)];
|
|
|
|
char _idle_threads[PROCESSORS][sizeof(Idle_thread)];
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return idle thread of a specific processor
|
|
|
|
*
|
|
|
|
* \param processor_id kernel name of the targeted processor
|
|
|
|
*/
|
|
|
|
Idle_thread * _idle_thread(unsigned const processor_id) const
|
|
|
|
{
|
|
|
|
char * const p = const_cast<char *>(_idle_threads[processor_id]);
|
|
|
|
return reinterpret_cast<Idle_thread *>(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Constructor
|
|
|
|
*/
|
|
|
|
Processor_pool()
|
|
|
|
{
|
|
|
|
for (unsigned i = 0; i < PROCESSORS; i++) {
|
|
|
|
new (_idle_threads[i]) Idle_thread(processor(i));
|
|
|
|
new (_processors[i]) Processor(i, _idle_thread(i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the object of a specific processor
|
|
|
|
*
|
|
|
|
* \param id kernel name of the targeted processor
|
|
|
|
*/
|
|
|
|
Processor * processor(unsigned const id) const
|
|
|
|
{
|
|
|
|
assert(id < PROCESSORS);
|
|
|
|
char * const p = const_cast<char *>(_processors[id]);
|
|
|
|
return reinterpret_cast<Processor *>(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the object of the primary processor
|
|
|
|
*/
|
|
|
|
Processor * primary_processor() const
|
|
|
|
{
|
|
|
|
return processor(Processor::primary_id());
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* _KERNEL__PROCESSOR_POOL_H_ */
|